metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "2series/Project-Apps",
"score": 4
} |
#### File: counter_terrorism_with_ML/terror_ai/decision_tree_ml.py
```python
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
# Function to split the dataset
def splitdataset(balance_data):
# Seperating the target variable
X = balance_data.iloc[:, 0:-1]
Y = balance_data.iloc[:, -1]
# Spliting the dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size = 0.2, random_state = 2107)
return X_train, X_test, y_train, y_test
# Function to perform training with giniIndex.
def train_using_gini(X_train, X_test, y_train):
# Creating the classifier object
clf_gini = DecisionTreeClassifier(criterion = "gini",
random_state = 2107)
# Performing training
clf_gini.fit(X_train, y_train)
return clf_gini
# Function to perform training with entropy.
def train_using_entropy(X_train, y_train):
# Decision tree with entropy
clf_entropy = DecisionTreeClassifier(
criterion = "entropy", random_state = 2107)
# Performing training
clf_entropy.fit(X_train, y_train)
return clf_entropy
# Function to make predictions
def prediction(X_test, clf_object):
# Predicton on test with giniIndex
y_pred = clf_object.predict(X_test)
return y_pred
# Function to calculate accuracy
def cal_accuracy(y_test, y_pred):
return accuracy_score(y_test,y_pred)*100
# Driver code
def decision_main(df, test_df):
X = df.iloc[:, 0:-1]
y = df.iloc[:, -1]
X_train, X_test, y_train, y_test = splitdataset(df)
#clf_gini = train_using_gini(X_train, y_train)
clf_entropy = train_using_entropy(X_train, y_train)
# Prediction using entropy
y_pred_entropy = prediction(X_test, clf_entropy)
accuracy = cal_accuracy(y_test, y_pred_entropy)
y_pred = prediction(test_df, clf_entropy)
return y_pred, accuracy
``` |
{
"source": "2series/self_analysis",
"score": 3
} |
#### File: self_analysis/Build_API/app.py
```python
from flask import Flask, request, redirect, url_for, flash, jsonify
import numpy as np
import pickle as p
import pandas as pd
import json
app = Flask(__name__)
@app.route('/api/', methods=['POST'])
def makecalc():
j_data = request.get_json()
prediction = np.array2string(model.predict(j_data))
return jsonify(prediction)
if __name__ == '__main__':
modelfile = 'models/final_prediction.pickle'
model = p.load(open(modelfile, 'rb'))
app.run(debug=True,host='0.0.0.0')
```
#### File: self_analysis/Cars/identity-block.py
```python
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.initializers import glorot_uniform
from custom_layers.scale_layer import Scale
def identity_block(X, f, filters, stage, block):
"""
Implementation of the identity block as defined in Figure 3
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
Returns:
X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
"""
# defining name basis
eps = 1.1e-5
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
scale_name_base = 'scale' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value. You'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', use_bias=False, name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(epsilon = eps, axis = 3, name = bn_name_base + '2a')(X)
X = Scale(axis = 3, name = scale_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path
X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', use_bias=False, name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(epsilon = eps, axis = 3, name = bn_name_base + '2b')(X)
X = Scale(axis = 3, name = scale_name_base + '2b')(X)
X = Activation('relu')(X)
# Third component of main path
X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', use_bias=False, name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(epsilon = eps, axis = 3, name = bn_name_base + '2c')(X)
X = Scale(axis = 3, name = scale_name_base + '2c')(X)
# Final step: Add shortcut value to main path, and pass it through a RELU activation
X = Add()([X,X_shortcut])
X = Activation('relu')(X)
return X
``` |
{
"source": "2Shirt/WizardK",
"score": 3
} |
#### File: scripts/wk/exe.py
```python
import json
import logging
import os
import re
import subprocess
import time
from threading import Thread
from queue import Queue, Empty
import psutil
# STATIC VARIABLES
LOG = logging.getLogger(__name__)
# Classes
class NonBlockingStreamReader():
"""Class to allow non-blocking reads from a stream."""
# pylint: disable=too-few-public-methods
# Credits:
## https://gist.github.com/EyalAr/7915597
## https://stackoverflow.com/a/4896288
def __init__(self, stream):
self.stream = stream
self.queue = Queue()
def populate_queue(stream, queue):
"""Collect lines from stream and put them in queue."""
while not stream.closed:
try:
line = stream.read(1)
except ValueError:
# Assuming the stream was closed
line = None
if line:
queue.put(line)
self.thread = start_thread(
populate_queue,
args=(self.stream, self.queue),
)
def stop(self):
"""Stop reading from input stream."""
self.stream.close()
def read(self, timeout=None):
"""Read from queue if possible, returns item from queue."""
try:
return self.queue.get(block=timeout is not None, timeout=timeout)
except Empty:
return None
def save_to_file(self, proc, out_path):
"""Continuously save output to file while proc is running."""
LOG.debug('Saving process %s output to %s', proc, out_path)
while proc.poll() is None:
out = b''
out_bytes = b''
while out is not None:
out = self.read(0.1)
if out:
out_bytes += out
with open(out_path, 'a', encoding='utf-8') as _f:
_f.write(out_bytes.decode('utf-8', errors='ignore'))
# Close stream to prevent 100% CPU usage
self.stream.close()
# Functions
def build_cmd_kwargs(cmd, minimized=False, pipe=True, shell=False, **kwargs):
"""Build kwargs for use by subprocess functions, returns dict.
Specifically subprocess.run() and subprocess.Popen().
NOTE: If no encoding specified then UTF-8 will be used.
"""
LOG.debug(
'cmd: %s, minimized: %s, pipe: %s, shell: %s, kwargs: %s',
cmd, minimized, pipe, shell, kwargs,
)
cmd_kwargs = {
'args': cmd,
'shell': shell,
}
# Strip sudo if appropriate
if cmd[0] == 'sudo':
if os.name == 'posix' and os.geteuid() == 0: # pylint: disable=no-member
cmd.pop(0)
# Add additional kwargs if applicable
for key in 'check cwd encoding errors stderr stdin stdout'.split():
if key in kwargs:
cmd_kwargs[key] = kwargs[key]
# Default to UTF-8 encoding
if not ('encoding' in cmd_kwargs or 'errors' in cmd_kwargs):
cmd_kwargs['encoding'] = 'utf-8'
cmd_kwargs['errors'] = 'ignore'
# Start minimized
if minimized:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = 6
cmd_kwargs['startupinfo'] = startupinfo
# Pipe output
if pipe:
cmd_kwargs['stderr'] = subprocess.PIPE
cmd_kwargs['stdout'] = subprocess.PIPE
# Done
LOG.debug('cmd_kwargs: %s', cmd_kwargs)
return cmd_kwargs
def get_json_from_command(cmd, check=True, encoding='utf-8', errors='ignore'):
"""Capture JSON content from cmd output, returns dict.
If the data can't be decoded then either an exception is raised
or an empty dict is returned depending on errors.
"""
LOG.debug('Loading JSON data from cmd: %s', cmd)
json_data = {}
try:
proc = run_program(cmd, check=check, encoding=encoding, errors=errors)
json_data = json.loads(proc.stdout)
except (subprocess.CalledProcessError, json.decoder.JSONDecodeError):
if errors != 'ignore':
raise
return json_data
def get_procs(name, exact=True, try_again=True):
"""Get process object(s) based on name, returns list of proc objects."""
LOG.debug('name: %s, exact: %s', name, exact)
processes = []
regex = f'^{name}$' if exact else name
# Iterate over all processes
for proc in psutil.process_iter():
if re.search(regex, proc.name(), re.IGNORECASE):
processes.append(proc)
# Try again?
if not processes and try_again:
time.sleep(1)
processes = get_procs(name, exact, try_again=False)
# Done
return processes
def kill_procs(name, exact=True, force=False, timeout=30):
"""Kill all processes matching name (case-insensitively).
NOTE: Under Posix systems this will send SIGINT to allow processes
to gracefully exit.
If force is True then it will wait until timeout specified and then
send SIGKILL to any processes still alive.
"""
LOG.debug(
'name: %s, exact: %s, force: %s, timeout: %s',
name, exact, force, timeout,
)
target_procs = get_procs(name, exact=exact)
for proc in target_procs:
proc.terminate()
# Force kill if necesary
if force:
results = psutil.wait_procs(target_procs, timeout=timeout)
for proc in results[1]: # Alive processes
proc.kill()
def popen_program(cmd, minimized=False, pipe=False, shell=False, **kwargs):
"""Run program and return a subprocess.Popen object."""
LOG.debug(
'cmd: %s, minimized: %s, pipe: %s, shell: %s',
cmd, minimized, pipe, shell,
)
LOG.debug('kwargs: %s', kwargs)
cmd_kwargs = build_cmd_kwargs(
cmd,
minimized=minimized,
pipe=pipe,
shell=shell,
**kwargs)
try:
# pylint: disable=consider-using-with
proc = subprocess.Popen(**cmd_kwargs)
except FileNotFoundError:
LOG.error('Command not found: %s', cmd)
raise
LOG.debug('proc: %s', proc)
# Done
return proc
def run_program(cmd, check=True, pipe=True, shell=False, **kwargs):
# pylint: disable=subprocess-run-check
"""Run program and return a subprocess.CompletedProcess object."""
LOG.debug(
'cmd: %s, check: %s, pipe: %s, shell: %s',
cmd, check, pipe, shell,
)
LOG.debug('kwargs: %s', kwargs)
cmd_kwargs = build_cmd_kwargs(
cmd,
check=check,
pipe=pipe,
shell=shell,
**kwargs)
try:
proc = subprocess.run(**cmd_kwargs)
except FileNotFoundError:
LOG.error('Command not found: %s', cmd)
raise
LOG.debug('proc: %s', proc)
# Done
return proc
def start_thread(function, args=None, daemon=True):
"""Run function as thread in background, returns Thread object."""
LOG.debug(
'Starting background thread for function: %s, args: %s, daemon: %s',
function, args, daemon,
)
args = args if args else []
thread = Thread(target=function, args=args, daemon=daemon)
thread.start()
return thread
def stop_process(proc, graceful=True):
"""Stop process.
NOTES: proc should be a subprocess.Popen obj.
If graceful is True then a SIGTERM is sent before SIGKILL.
"""
# Graceful exit
if graceful:
if os.name == 'posix' and os.geteuid() != 0: # pylint: disable=no-member
run_program(['sudo', 'kill', str(proc.pid)], check=False)
else:
proc.terminate()
time.sleep(2)
# Force exit
if os.name == 'posix' and os.geteuid() != 0: # pylint: disable=no-member
run_program(['sudo', 'kill', '-9', str(proc.pid)], check=False)
else:
proc.kill()
def wait_for_procs(name, exact=True, timeout=None):
"""Wait for all process matching name."""
LOG.debug('name: %s, exact: %s, timeout: %s', name, exact, timeout)
target_procs = get_procs(name, exact=exact)
procs = psutil.wait_procs(target_procs, timeout=timeout)
# Raise exception if necessary
if procs[1]: # Alive processes
raise psutil.TimeoutExpired(name=name, seconds=timeout)
if __name__ == '__main__':
print("This file is not meant to be called directly.")
```
#### File: wk/hw/ddrescue.py
```python
import atexit
import datetime
import json
import logging
import math
import os
import pathlib
import plistlib
import re
import shutil
import subprocess
import time
from collections import OrderedDict
from docopt import docopt
import psutil
import pytz
from wk import cfg, debug, exe, io, log, net, std, tmux
from wk.cfg.ddrescue import DDRESCUE_SETTINGS
from wk.hw import obj as hw_obj
# STATIC VARIABLES
DOCSTRING = f'''{cfg.main.KIT_NAME_FULL}: ddrescue TUI
Usage:
ddrescue-tui
ddrescue-tui [options] (clone|image) [<source> [<destination>]]
ddrescue-tui (-h | --help)
Options:
-h --help Show this page
-s --dry-run Print commands to be used instead of running them
--force-local-map Skip mounting shares and save map to local drive
--start-fresh Ignore previous runs and start new recovery
'''
DETECT_DRIVES_NOTICE = '''
This option will force the drive controllers to rescan for devices.
The method used is not 100% reliable and may cause issues. If you see
any script errors or crashes after running this option then please
restart the computer and try again.
'''
CLONE_SETTINGS = {
'Source': None,
'Destination': None,
'Create Boot Partition': False,
'First Run': True,
'Needs Format': False,
'Table Type': None,
'Partition Mapping': [
# (5, 1) ## Clone source partition #5 to destination partition #1
],
}
if std.PLATFORM == 'Darwin':
DDRESCUE_SETTINGS['Default']['--idirect'] = {'Selected': False, 'Hidden': True}
DDRESCUE_SETTINGS['Default']['--odirect'] = {'Selected': False, 'Hidden': True}
DDRESCUE_LOG_REGEX = re.compile(
r'^\s*(?P<key>\S+):\s+'
r'(?P<size>\d+)\s+'
r'(?P<unit>[PTGMKB]i?B?)'
r'.*\(\s*(?P<percent>\d+\.?\d*)%\)$',
re.IGNORECASE,
)
REGEX_REMAINING_TIME = re.compile(
r'remaining time:'
r'\s*((?P<days>\d+)d)?'
r'\s*((?P<hours>\d+)h)?'
r'\s*((?P<minutes>\d+)m)?'
r'\s*((?P<seconds>\d+)s)?'
r'\s*(?P<na>n/a)?',
re.IGNORECASE
)
LOG = logging.getLogger(__name__)
MENU_ACTIONS = (
'Start',
f'Change settings {std.color_string("(experts only)", "YELLOW")}',
f'Detect drives {std.color_string("(experts only)", "YELLOW")}',
'Quit')
MENU_TOGGLES = {
'Auto continue (if recovery % over threshold)': True,
'Retry (mark non-rescued sectors "non-tried")': False,
}
PANE_RATIOS = (
12, # SMART
22, # ddrescue progress
4, # Journal (kernel messages)
)
PLATFORM = std.PLATFORM
RECOMMENDED_FSTYPES = re.compile(r'^(ext[234]|ntfs|xfs)$')
if PLATFORM == 'Darwin':
RECOMMENDED_FSTYPES = re.compile(r'^(apfs|hfs.?)$')
RECOMMENDED_MAP_FSTYPES = re.compile(
r'^(apfs|cifs|ext[234]|hfs.?|ntfs|smbfs|vfat|xfs)$'
)
SETTING_PRESETS = (
'Default',
'Fast',
'Safe',
)
STATUS_COLORS = {
'Passed': 'GREEN',
'Aborted': 'YELLOW',
'Skipped': 'YELLOW',
'Working': 'YELLOW',
'ERROR': 'RED',
}
TIMEZONE = pytz.timezone(cfg.main.LINUX_TIME_ZONE)
# Classes
class BlockPair():
"""Object for tracking source to dest recovery data."""
def __init__(self, source, destination, model, working_dir):
"""Initialize BlockPair()
NOTE: source should be a wk.hw.obj.Disk() object
and destination should be a pathlib.Path() object.
"""
self.source = source.path
self.destination = destination
self.map_data = {}
self.map_path = None
self.size = source.details['size']
self.status = OrderedDict({
'read': 'Pending',
'trim': 'Pending',
'scrape': 'Pending',
})
# Set map file
# e.g. '(Clone|Image)_Model[_p#]_Size[_Label].map'
map_name = model if model else 'None'
if source.details['bus'] == 'Image':
map_name = 'Image'
if source.details['parent']:
part_num = re.sub(r"^.*?(\d+)$", r"\1", source.path.name)
map_name += f'_p{part_num}'
size_str = std.bytes_to_string(
size=source.details["size"],
use_binary=False,
)
map_name += f'_{size_str.replace(" ", "")}'
if source.details.get('label', ''):
map_name += f'_{source.details["label"]}'
map_name = map_name.replace(' ', '_')
map_name = map_name.replace('/', '_')
if destination.is_dir():
# Imaging
self.map_path = pathlib.Path(f'{destination}/Image_{map_name}.map')
self.destination = self.map_path.with_suffix('.dd')
self.destination.touch()
else:
# Cloning
self.map_path = pathlib.Path(f'{working_dir}/Clone_{map_name}.map')
self.map_path.touch()
# Set initial status
self.set_initial_status()
def get_error_size(self):
"""Get error size in bytes, returns int."""
return self.size - self.get_rescued_size()
def get_percent_recovered(self):
"""Get percent rescued from map_data, returns float."""
return 100 * self.map_data.get('rescued', 0) / self.size
def get_rescued_size(self):
"""Get rescued size using map data.
NOTE: Returns 0 if no map data is available.
"""
self.load_map_data()
return self.map_data.get('rescued', 0)
def load_map_data(self):
"""Load map data from file.
NOTE: If the file is missing it is assumed that recovery hasn't
started yet so default values will be returned instead.
"""
data = {'full recovery': False, 'pass completed': False}
# Get output from ddrescuelog
cmd = [
'ddrescuelog',
'--binary-prefixes',
'--show-status',
f'--size={self.size}',
self.map_path,
]
proc = exe.run_program(cmd, check=False)
# Parse output
for line in proc.stdout.splitlines():
_r = DDRESCUE_LOG_REGEX.search(line)
if _r:
if _r.group('key') == 'rescued' and _r.group('percent') == '100':
# Fix rounding errors from ddrescuelog output
data['rescued'] = self.size
else:
data[_r.group('key')] = std.string_to_bytes(
f'{_r.group("size")} {_r.group("unit")}',
)
data['pass completed'] = 'current status: finished' in line.lower()
# Check if 100% done (only if map is present and non-zero size
# NOTE: ddrescuelog returns 0 (i.e. 100% done) for empty files
if self.map_path.exists() and self.map_path.stat().st_size != 0:
cmd = [
'ddrescuelog',
'--done-status',
f'--size={self.size}',
self.map_path,
]
proc = exe.run_program(cmd, check=False)
data['full recovery'] = proc.returncode == 0
# Done
self.map_data.update(data)
def pass_complete(self, pass_name):
"""Check if pass_num is complete based on map data, returns bool."""
complete = False
pending_size = 0
# Check map data
if self.map_data.get('full recovery', False):
complete = True
elif 'non-tried' not in self.map_data:
# Assuming recovery has not been attempted yet
complete = False
else:
# Check that current and previous passes are complete
pending_size = self.map_data['non-tried']
if pass_name in ('trim', 'scrape'):
pending_size += self.map_data['non-trimmed']
if pass_name == 'scrape':
pending_size += self.map_data['non-scraped']
if pending_size == 0:
complete = True
# Done
return complete
def safety_check(self):
"""Run safety check and abort if necessary."""
dest_size = -1
if self.destination.exists():
dest_obj = hw_obj.Disk(self.destination)
dest_size = dest_obj.details['size']
del dest_obj
# Check destination size if cloning
if not self.destination.is_file() and dest_size < self.size:
std.print_error(f'Invalid destination: {self.destination}')
raise std.GenericAbort()
def set_initial_status(self):
"""Read map data and set initial statuses."""
self.load_map_data()
percent = self.get_percent_recovered()
for name in self.status.keys():
if self.pass_complete(name):
self.status[name] = percent
else:
# Stop checking
if percent > 0:
self.status[name] = percent
break
def skip_pass(self, pass_name):
"""Mark pass as skipped if applicable."""
if self.status[pass_name] == 'Pending':
self.status[pass_name] = 'Skipped'
def update_progress(self, pass_name):
"""Update progress via map data."""
self.load_map_data()
# Update status
percent = self.get_percent_recovered()
if percent > 0:
self.status[pass_name] = percent
# Mark future passes as skipped if applicable
if percent == 100:
if pass_name == 'read':
self.status['trim'] = 'Skipped'
if pass_name in ('read', 'trim'):
self.status['scrape'] = 'Skipped'
class State():
# pylint: disable=too-many-public-methods
"""Object for tracking hardware diagnostic data."""
def __init__(self):
self.block_pairs = []
self.destination = None
self.log_dir = None
self.mode = None
self.panes = {}
self.source = None
self.working_dir = None
# Start a background process to maintain layout
self._init_tmux()
exe.start_thread(self._fix_tmux_layout_loop)
def _add_block_pair(self, source, destination):
"""Add BlockPair object and run safety checks."""
self.block_pairs.append(
BlockPair(
source=source,
destination=destination,
model=self.source.details['model'],
working_dir=self.working_dir,
))
def _get_clone_settings_path(self):
"""get Clone settings file path, returns pathlib.Path obj."""
description = self.source.details['model']
if not description:
description = self.source.path.name
return pathlib.Path(f'{self.working_dir}/Clone_{description}.json')
def _fix_tmux_layout(self, forced=True):
"""Fix tmux layout based on cfg.ddrescue.TMUX_LAYOUT."""
layout = cfg.ddrescue.TMUX_LAYOUT
needs_fixed = tmux.layout_needs_fixed(self.panes, layout)
# Main layout fix
try:
tmux.fix_layout(self.panes, layout, forced=forced)
except RuntimeError:
# Assuming self.panes changed while running
pass
# Source/Destination
if forced or needs_fixed:
self.update_top_panes()
# Return if Progress pane not present
if 'Progress' not in self.panes:
return
# SMART/Journal
if forced or needs_fixed:
height = tmux.get_pane_size(self.panes['Progress'])[1] - 2
p_ratios = [int((x/sum(PANE_RATIOS)) * height) for x in PANE_RATIOS]
if 'SMART' in self.panes:
tmux.resize_pane(self.panes['SMART'], height=p_ratios[0])
tmux.resize_pane(height=p_ratios[1])
if 'Journal' in self.panes:
tmux.resize_pane(self.panes['Journal'], height=p_ratios[2])
def _fix_tmux_layout_loop(self):
"""Fix tmux layout on a loop.
NOTE: This should be called as a thread.
"""
while True:
self._fix_tmux_layout(forced=False)
std.sleep(1)
def _init_tmux(self):
"""Initialize tmux layout."""
tmux.kill_all_panes()
# Source (placeholder)
self.panes['Source'] = tmux.split_window(
behind=True,
lines=2,
text=' ',
vertical=True,
)
# Started
self.panes['Started'] = tmux.split_window(
lines=cfg.ddrescue.TMUX_SIDE_WIDTH,
target_id=self.panes['Source'],
text=std.color_string(
['Started', time.strftime("%Y-%m-%d %H:%M %Z")],
['BLUE', None],
sep='\n',
),
)
# Source / Dest
self.update_top_panes()
def _load_settings(self, discard_unused_settings=False):
"""Load settings from previous run, returns dict."""
settings = {}
settings_file = self._get_clone_settings_path()
# Try loading JSON data
if settings_file.exists():
with open(settings_file, 'r', encoding='utf-8') as _f:
try:
settings = json.loads(_f.read())
except (OSError, json.JSONDecodeError) as err:
LOG.error('Failed to load clone settings')
std.print_error('Invalid clone settings detected.')
raise std.GenericAbort() from err
# Check settings
if settings:
if settings['First Run'] and discard_unused_settings:
# Previous run aborted before starting recovery, discard settings
settings = {}
else:
bail = False
for key in ('model', 'serial'):
if settings['Source'][key] != self.source.details[key]:
std.print_error(f"Clone settings don't match source {key}")
bail = True
if settings['Destination'][key] != self.destination.details[key]:
std.print_error(f"Clone settings don't match destination {key}")
bail = True
if bail:
raise std.GenericAbort()
# Update settings
if not settings:
settings = CLONE_SETTINGS.copy()
if not settings['Source']:
settings['Source'] = {
'model': self.source.details['model'],
'serial': self.source.details['serial'],
}
if not settings['Destination']:
settings['Destination'] = {
'model': self.destination.details['model'],
'serial': self.destination.details['serial'],
}
# Done
return settings
def _save_settings(self, settings):
"""Save settings for future runs."""
settings_file = self._get_clone_settings_path()
# Try saving JSON data
try:
with open(settings_file, 'w', encoding='utf-8') as _f:
json.dump(settings, _f)
except OSError as err:
std.print_error('Failed to save clone settings')
raise std.GenericAbort() from err
def add_clone_block_pairs(self):
"""Add device to device block pairs and set settings if necessary."""
source_sep = get_partition_separator(self.source.path.name)
dest_sep = get_partition_separator(self.destination.path.name)
settings = {}
source_parts = []
# Clone settings
settings = self._load_settings(discard_unused_settings=True)
# Add pairs
if settings['Partition Mapping']:
# Resume previous run, load pairs from settings file
for part_map in settings['Partition Mapping']:
bp_source = hw_obj.Disk(
f'{self.source.path}{source_sep}{part_map[0]}',
)
bp_dest = pathlib.Path(
f'{self.destination.path}{dest_sep}{part_map[1]}',
)
self._add_block_pair(bp_source, bp_dest)
else:
source_parts = select_disk_parts('Clone', self.source)
if self.source.path.samefile(source_parts[0].path):
# Whole disk (or single partition via args), skip settings
bp_dest = self.destination.path
self._add_block_pair(self.source, bp_dest)
else:
# New run, use new settings file
settings['Needs Format'] = True
offset = 0
user_choice = std.choice(
['G', 'M', 'S'],
'Format clone using GPT, MBR, or match Source type?',
)
if user_choice == 'G':
settings['Table Type'] = 'GPT'
elif user_choice == 'M':
settings['Table Type'] = 'MBR'
else:
# Match source type
settings['Table Type'] = get_table_type(self.source)
if std.ask('Create an empty Windows boot partition on the clone?'):
settings['Create Boot Partition'] = True
offset = 2 if settings['Table Type'] == 'GPT' else 1
# Add pairs
for dest_num, part in enumerate(source_parts):
dest_num += offset + 1
bp_dest = pathlib.Path(
f'{self.destination.path}{dest_sep}{dest_num}',
)
self._add_block_pair(part, bp_dest)
# Add to settings file
source_num = re.sub(r'^.*?(\d+)$', r'\1', part.path.name)
settings['Partition Mapping'].append([source_num, dest_num])
# Save settings
self._save_settings(settings)
# Done
return source_parts
def add_image_block_pairs(self, source_parts):
"""Add device to image file block pairs."""
for part in source_parts:
bp_dest = self.destination
self._add_block_pair(part, bp_dest)
def confirm_selections(self, prompt, source_parts=None):
"""Show selection details and prompt for confirmation."""
report = []
# Source
report.append(std.color_string('Source', 'GREEN'))
report.extend(build_object_report(self.source))
report.append(' ')
# Destination
report.append(std.color_string('Destination', 'GREEN'))
if self.mode == 'Clone':
report[-1] += std.color_string(' (ALL DATA WILL BE DELETED)', 'RED')
report.extend(build_object_report(self.destination))
report.append(' ')
# Show deletion warning if necessary
# NOTE: The check for block_pairs is to limit this section
# to the second confirmation
if self.mode == 'Clone' and self.block_pairs:
report.append(std.color_string('WARNING', 'YELLOW'))
report.append(
'All data will be deleted from the destination listed above.',
)
report.append(
std.color_string(
['This is irreversible and will lead to', 'DATA LOSS.'],
['YELLOW', 'RED'],
),
)
report.append(' ')
# Block pairs
if self.block_pairs:
report.extend(
build_block_pair_report(
self.block_pairs,
self._load_settings() if self.mode == 'Clone' else {},
),
)
report.append(' ')
# Map dir
if self.working_dir:
report.append(std.color_string('Map Save Directory', 'GREEN'))
report.append(f'{self.working_dir}/')
report.append(' ')
if not fstype_is_ok(self.working_dir, map_dir=True):
report.append(
std.color_string(
'Map file(s) are being saved to a non-recommended filesystem.',
'YELLOW',
),
)
report.append(
std.color_string(
['This is strongly discouraged and may lead to', 'DATA LOSS'],
[None, 'RED'],
),
)
report.append(' ')
# Source part(s) selected
if source_parts:
report.append(std.color_string('Source Part(s) selected', 'GREEN'))
if self.source.path.samefile(source_parts[0].path):
report.append('Whole Disk')
else:
report.append(std.color_string(f'{"NAME":<9} SIZE', 'BLUE'))
for part in source_parts:
report.append(
f'{part.path.name:<9} '
f'{std.bytes_to_string(part.details["size"], use_binary=False)}'
)
report.append(' ')
# Prompt user
std.clear_screen()
std.print_report(report)
if not std.ask(prompt):
raise std.GenericAbort()
def generate_report(self):
"""Generate report of overall and per block_pair results, returns list."""
report = []
# Header
report.append(f'{self.mode.title()} Results:')
report.append(' ')
report.append(f'Source: {self.source.description}')
if self.mode == 'Clone':
report.append(f'Destination: {self.destination.description}')
else:
report.append(f'Destination: {self.destination}/')
# Overall
report.append(' ')
error_size = self.get_error_size()
error_size_str = std.bytes_to_string(error_size, decimals=2)
if error_size > 0:
error_size_str = std.color_string(error_size_str, 'YELLOW')
percent = self.get_percent_recovered()
percent = format_status_string(percent, width=0)
report.append(f'Overall rescued: {percent}, error size: {error_size_str}')
# Block-Pairs
if len(self.block_pairs) > 1:
report.append(' ')
for pair in self.block_pairs:
error_size = pair.get_error_size()
error_size_str = std.bytes_to_string(error_size, decimals=2)
if error_size > 0:
error_size_str = std.color_string(error_size_str, 'YELLOW')
pair_size = std.bytes_to_string(pair.size, decimals=2)
percent = pair.get_percent_recovered()
percent = format_status_string(percent, width=0)
report.append(
f'{pair.source.name} ({pair_size}) '
f'rescued: {percent}, '
f'error size: {error_size_str}'
)
# Done
return report
def get_error_size(self):
"""Get total error size from block_pairs in bytes, returns int."""
return self.get_total_size() - self.get_rescued_size()
def get_percent_recovered(self):
"""Get total percent rescued from block_pairs, returns float."""
return 100 * self.get_rescued_size() / self.get_total_size()
def get_rescued_size(self):
"""Get total rescued size from all block pairs, returns int."""
return sum(pair.get_rescued_size() for pair in self.block_pairs)
def get_total_size(self):
"""Get total size of all block_pairs in bytes, returns int."""
return sum(pair.size for pair in self.block_pairs)
def init_recovery(self, docopt_args):
# pylint: disable=too-many-branches
"""Select source/dest and set env."""
std.clear_screen()
source_parts = []
# Set log
self.log_dir = log.format_log_path()
self.log_dir = pathlib.Path(
f'{self.log_dir.parent}/'
f'ddrescue-TUI_{time.strftime("%Y-%m-%d_%H%M%S%z")}/'
)
log.update_log_path(
dest_dir=self.log_dir,
dest_name='main',
keep_history=True,
timestamp=False,
)
# Set mode
self.mode = set_mode(docopt_args)
# Image mode is broken..
# TODO: Fix image mode
# Definitely for Linux, maybe for macOS
if self.mode == 'Image':
std.print_error("I'm sorry but image mode is currently broken...")
std.abort()
# Select source
self.source = get_object(docopt_args['<source>'])
if not self.source:
self.source = select_disk('Source')
self.update_top_panes()
# Select destination
self.destination = get_object(docopt_args['<destination>'])
if not self.destination:
if self.mode == 'Clone':
self.destination = select_disk('Destination', self.source)
elif self.mode == 'Image':
self.destination = select_path('Destination')
self.update_top_panes()
# Confirmation #1
self.confirm_selections(
prompt='Are these selections correct?',
source_parts=source_parts,
)
# Update panes
self.panes['Progress'] = tmux.split_window(
lines=cfg.ddrescue.TMUX_SIDE_WIDTH,
watch_file=f'{self.log_dir}/progress.out',
)
self.update_progress_pane('Idle')
# Set working dir
self.working_dir = get_working_dir(
self.mode,
self.destination,
force_local=docopt_args['--force-local-map'],
)
# Start fresh if requested
if docopt_args['--start-fresh']:
clean_working_dir(self.working_dir)
# Add block pairs
if self.mode == 'Clone':
source_parts = self.add_clone_block_pairs()
else:
source_parts = select_disk_parts(self.mode, self.source)
self.add_image_block_pairs(source_parts)
# Safety Checks #1
if self.mode == 'Clone':
self.safety_check_destination()
self.safety_check_size()
# Confirmation #2
self.update_progress_pane('Idle')
self.confirm_selections('Start recovery?')
# Unmount source and/or destination under macOS
if PLATFORM == 'Darwin':
for disk in (self.source, self.destination):
cmd = ['diskutil', 'unmountDisk', disk.path]
try:
exe.run_program(cmd)
except subprocess.CalledProcessError:
std.print_error('Failed to unmount source and/or destination')
std.abort()
# Prep destination
if self.mode == 'Clone':
self.prep_destination(source_parts, dry_run=docopt_args['--dry-run'])
# Safety Checks #2
if not docopt_args['--dry-run']:
for pair in self.block_pairs:
pair.safety_check()
def mark_started(self):
"""Edit clone settings, if applicable, to mark recovery as started."""
# Skip if not cloning
if self.mode != 'Clone':
return
# Skip if not using settings
# i.e. Cloning whole disk (or single partition via args)
if self.source.path.samefile(self.block_pairs[0].source):
return
# Update settings
settings = self._load_settings()
if settings.get('First Run', False):
settings['First Run'] = False
self._save_settings(settings)
def pass_above_threshold(self, pass_name):
"""Check if all block_pairs meet the pass threshold, returns bool."""
threshold = cfg.ddrescue.AUTO_PASS_THRESHOLDS[pass_name]
return all(
p.get_percent_recovered() >= threshold for p in self.block_pairs
)
def pass_complete(self, pass_name):
"""Check if all block_pairs completed pass_name, returns bool."""
return all(p.pass_complete(pass_name) for p in self.block_pairs)
def prep_destination(self, source_parts, dry_run=True):
"""Prep destination as necessary."""
# TODO: Split into Linux and macOS
# logical sector size is not easily found under macOS
# It might be easier to rewrite this section using macOS tools
dest_prefix = str(self.destination.path)
dest_prefix += get_partition_separator(self.destination.path.name)
esp_type = 'C12A7328-F81F-11D2-BA4B-00A0C93EC93B'
msr_type = 'E3C9E316-0B5C-4DB8-817D-F92DF00215AE'
part_num = 0
sfdisk_script = []
settings = self._load_settings()
# Bail early
if not settings['Needs Format']:
return
# Add partition table settings
if settings['Table Type'] == 'GPT':
sfdisk_script.append('label: gpt')
else:
sfdisk_script.append('label: dos')
sfdisk_script.append('unit: sectors')
sfdisk_script.append('')
# Add boot partition if requested
if settings['Create Boot Partition']:
if settings['Table Type'] == 'GPT':
part_num += 1
sfdisk_script.append(
build_sfdisk_partition_line(
table_type='GPT',
dev_path=f'{dest_prefix}{part_num}',
size='384MiB',
details={'parttype': esp_type, 'partlabel': 'EFI System'},
),
)
part_num += 1
sfdisk_script.append(
build_sfdisk_partition_line(
table_type=settings['Table Type'],
dev_path=f'{dest_prefix}{part_num}',
size='16MiB',
details={'parttype': msr_type, 'partlabel': 'Microsoft Reserved'},
),
)
elif settings['Table Type'] == 'MBR':
part_num += 1
sfdisk_script.append(
build_sfdisk_partition_line(
table_type='MBR',
dev_path=f'{dest_prefix}{part_num}',
size='100MiB',
details={'parttype': '0x7', 'partlabel': 'System Reserved'},
),
)
# Add selected partition(s)
for part in source_parts:
num_sectors = part.details['size'] / self.destination.details['log-sec']
num_sectors = math.ceil(num_sectors)
part_num += 1
sfdisk_script.append(
build_sfdisk_partition_line(
table_type=settings['Table Type'],
dev_path=f'{dest_prefix}{part_num}',
size=num_sectors,
details=part.details,
),
)
# Save sfdisk script
script_path = (
f'{self.working_dir}/'
f'sfdisk_{self.destination.path.name}.script'
)
with open(script_path, 'w', encoding='utf-8') as _f:
_f.write('\n'.join(sfdisk_script))
# Skip real format for dry runs
if dry_run:
LOG.info('Dry run, refusing to format destination')
return
# Format disk
LOG.warning('Formatting destination: %s', self.destination.path)
with open(script_path, 'r', encoding='utf-8') as _f:
proc = exe.run_program(
cmd=['sudo', 'sfdisk', self.destination.path],
stdin=_f,
check=False,
)
if proc.returncode != 0:
std.print_error('Error(s) encoundtered while formatting destination')
raise std.GenericAbort()
# Update settings
settings['Needs Format'] = False
self._save_settings(settings)
def retry_all_passes(self):
"""Prep block_pairs for a retry recovery attempt."""
bad_statuses = ('*', '/', '-')
LOG.warning('Updating block_pairs for retry')
# Update all block_pairs
for pair in self.block_pairs:
map_data = []
# Reset status strings
for name in pair.status.keys():
pair.status[name] = 'Pending'
# Mark all non-trimmed, non-scraped, and bad areas as non-tried
with open(pair.map_path, 'r', encoding='utf-8') as _f:
for line in _f.readlines():
line = line.strip()
if line.startswith('0x') and line.endswith(bad_statuses):
line = f'{line[:-1]}?'
map_data.append(line)
# Save updated map
with open(pair.map_path, 'w', encoding='utf-8') as _f:
_f.write('\n'.join(map_data))
# Reinitialize status
pair.set_initial_status()
def safety_check_destination(self):
"""Run safety checks for destination and abort if necessary."""
try:
self.destination.safety_checks()
except hw_obj.CriticalHardwareError as err:
std.print_error(
f'Critical error(s) detected for: {self.destination.path}',
)
raise std.GenericAbort() from err
def safety_check_size(self):
"""Run size safety check and abort if necessary."""
required_size = sum(pair.size for pair in self.block_pairs)
settings = self._load_settings() if self.mode == 'Clone' else {}
# Increase required_size if necessary
if self.mode == 'Clone' and settings.get('Needs Format', False):
if settings['Table Type'] == 'GPT':
# Below is the size calculation for the GPT
# 1 LBA for the protective MBR
# 33 LBAs each for the primary and backup GPT tables
# Source: https://en.wikipedia.org/wiki/GUID_Partition_Table
required_size += (1 + 33 + 33) * self.destination.details['phy-sec']
if settings['Create Boot Partition']:
# 384MiB EFI System Partition and a 16MiB MS Reserved partition
required_size += (384 + 16) * 1024**2
else:
# MBR only requires one LBA but adding a full 4096 bytes anyway
required_size += 4096
if settings['Create Boot Partition']:
# 100MiB System Reserved partition
required_size += 100 * 1024**2
# Reduce required_size if necessary
if self.mode == 'Image':
for pair in self.block_pairs:
if pair.destination.exists():
# NOTE: This uses the "max space" of the destination
# i.e. not the apparent size which is smaller for sparse files
# While this can result in an out-of-space error it's better
# than nothing.
required_size -= pair.destination.stat().st_size
# Check destination size
if self.mode == 'Clone':
destination_size = self.destination.details['size']
error_msg = 'A larger destination disk is required'
else:
# NOTE: Adding an extra 5% here to better ensure it will fit
destination_size = psutil.disk_usage(self.destination).free
destination_size *= 1.05
error_msg = 'Not enough free space on the destination'
if required_size > destination_size:
std.print_error(error_msg)
raise std.GenericAbort()
def save_debug_reports(self):
"""Save debug reports to disk."""
LOG.info('Saving debug reports')
debug_dir = pathlib.Path(f'{self.log_dir}/debug')
if not debug_dir.exists():
debug_dir.mkdir()
# State (self)
std.save_pickles({'state': self}, debug_dir)
with open(f'{debug_dir}/state.report', 'a', encoding='utf-8') as _f:
_f.write('[Debug report]\n')
_f.write('\n'.join(debug.generate_object_report(self)))
_f.write('\n')
# Block pairs
for _bp in self.block_pairs:
with open(
f'{debug_dir}/block_pairs.report', 'a', encoding='utf-8') as _f:
_f.write('[Debug report]\n')
_f.write('\n'.join(debug.generate_object_report(_bp)))
_f.write('\n')
def skip_pass(self, pass_name):
"""Mark block_pairs as skipped if applicable."""
for pair in self.block_pairs:
if pair.status[pass_name] == 'Pending':
pair.status[pass_name] = 'Skipped'
def update_progress_pane(self, overall_status):
"""Update progress pane."""
report = []
separator = '─────────────────────'
width = cfg.ddrescue.TMUX_SIDE_WIDTH
# Status
report.append(std.color_string(f'{"Status":^{width}}', 'BLUE'))
if 'NEEDS ATTENTION' in overall_status:
report.append(
std.color_string(f'{overall_status:^{width}}', 'YELLOW_BLINK'),
)
else:
report.append(f'{overall_status:^{width}}')
report.append(separator)
# Overall progress
if self.block_pairs:
total_rescued = self.get_rescued_size()
percent = self.get_percent_recovered()
report.append(std.color_string('Overall Progress', 'BLUE'))
report.append(
f'Rescued: {format_status_string(percent, width=width-9)}',
)
report.append(
std.color_string(
[f'{std.bytes_to_string(total_rescued, decimals=2):>{width}}'],
[get_percent_color(percent)],
),
)
report.append(separator)
# Block pair progress
for pair in self.block_pairs:
report.append(std.color_string(pair.source, 'BLUE'))
for name, status in pair.status.items():
name = name.title()
report.append(
f'{name}{format_status_string(status, width=width-len(name))}',
)
report.append(' ')
# EToC
if overall_status in ('Active', 'NEEDS ATTENTION'):
etoc = get_etoc()
report.append(separator)
report.append(std.color_string('Estimated Pass Finish', 'BLUE'))
if overall_status == 'NEEDS ATTENTION' or etoc == 'N/A':
report.append(std.color_string('N/A', 'YELLOW'))
else:
report.append(etoc)
# Write to progress file
out_path = pathlib.Path(f'{self.log_dir}/progress.out')
with open(out_path, 'w', encoding='utf-8') as _f:
_f.write('\n'.join(report))
def update_top_panes(self):
"""(Re)create top source/destination panes."""
source_exists = True
dest_exists = True
width = tmux.get_pane_size()[0]
width = int(width / 2) - 1
def _format_string(obj, width):
"""Format source/dest string using obj and width, returns str."""
string = ''
# Build base string
if isinstance(obj, hw_obj.Disk):
string = f'{obj.path} {obj.description}'
elif obj.is_dir():
string = f'{obj}/'
elif obj.is_file():
size_str = std.bytes_to_string(
obj.stat().st_size,
decimals=0,
use_binary=False)
string = f'{obj.name} {size_str}'
# Adjust for width
if len(string) > width:
if hasattr(obj, 'is_dir') and obj.is_dir():
string = f'...{string[-width+3:]}'
else:
string = f'{string[:width-3]}...'
# Done
return string
# Check source/dest existance
if self.source:
source_exists = self.source.path.exists()
if self.destination:
if isinstance(self.destination, hw_obj.Disk):
dest_exists = self.destination.path.exists()
else:
dest_exists = self.destination.exists()
# Kill destination pane
if 'Destination' in self.panes:
tmux.kill_pane(self.panes.pop('Destination'))
# Source
source_str = ' '
if self.source:
source_str = _format_string(self.source, width)
tmux.respawn_pane(
self.panes['Source'],
text=std.color_string(
['Source', '' if source_exists else ' (Missing)', '\n', source_str],
['BLUE', 'RED', None, None],
sep='',
),
)
# Destination
dest_str = ''
if self.destination:
dest_str = _format_string(self.destination, width)
self.panes['Destination'] = tmux.split_window(
percent=50,
vertical=False,
target_id=self.panes['Source'],
text=std.color_string(
['Destination', '' if dest_exists else ' (Missing)', '\n', dest_str],
['BLUE', 'RED', None, None],
sep='',
),
)
# Functions
def build_block_pair_report(block_pairs, settings):
"""Build block pair report, returns list."""
report = []
notes = []
if block_pairs:
report.append(std.color_string('Block Pairs', 'GREEN'))
else:
# Bail early
return report
# Show block pair mapping
if settings and settings['Create Boot Partition']:
if settings['Table Type'] == 'GPT':
report.append(f'{" —— ":<9} --> EFI System Partition')
report.append(f'{" —— ":<9} --> Microsoft Reserved Partition')
elif settings['Table Type'] == 'MBR':
report.append(f'{" —— ":<9} --> System Reserved')
for pair in block_pairs:
report.append(f'{pair.source.name:<9} --> {pair.destination.name}')
# Show resume messages as necessary
if settings:
if not settings['First Run']:
notes.append(
std.color_string(
['NOTE:', 'Clone settings loaded from previous run.'],
['BLUE', None],
),
)
if settings['Needs Format'] and settings['Table Type']:
msg = f'Destination will be formatted using {settings["Table Type"]}'
notes.append(
std.color_string(
['NOTE:', msg],
['BLUE', None],
),
)
if any(pair.get_rescued_size() > 0 for pair in block_pairs):
notes.append(
std.color_string(
['NOTE:', 'Resume data loaded from map file(s).'],
['BLUE', None],
),
)
# Add notes to report
if notes:
report.append(' ')
report.extend(notes)
# Done
return report
def build_ddrescue_cmd(block_pair, pass_name, settings):
"""Build ddrescue cmd using passed details, returns list."""
cmd = ['sudo', 'ddrescue']
if (block_pair.destination.is_block_device()
or block_pair.destination.is_char_device()):
cmd.append('--force')
if pass_name == 'read':
cmd.extend(['--no-trim', '--no-scrape'])
elif pass_name == 'trim':
# Allow trimming
cmd.append('--no-scrape')
elif pass_name == 'scrape':
# Allow trimming and scraping
pass
cmd.extend(settings)
cmd.append(f'--size={block_pair.size}')
if PLATFORM == 'Darwin':
# Use Raw disks if possible
for dev in (block_pair.source, block_pair.destination):
raw_dev = pathlib.Path(dev.with_name(f'r{dev.name}'))
if raw_dev.exists():
cmd.append(raw_dev)
else:
cmd.append(dev)
else:
cmd.append(block_pair.source)
cmd.append(block_pair.destination)
cmd.append(block_pair.map_path)
# Done
LOG.debug('ddrescue cmd: %s', cmd)
return cmd
def build_directory_report(path):
"""Build directory report, returns list."""
path = f'{path}/'
report = []
# Get details
if PLATFORM == 'Linux':
cmd = [
'findmnt',
'--output', 'SIZE,AVAIL,USED,FSTYPE,OPTIONS',
'--target', path,
]
proc = exe.run_program(cmd)
width = len(path) + 1
for line in proc.stdout.splitlines():
line = line.replace('\n', '')
if 'FSTYPE' in line:
line = std.color_string(f'{"PATH":<{width}}{line}', 'BLUE')
else:
line = f'{path:<{width}}{line}'
report.append(line)
else:
report.append(std.color_string('PATH', 'BLUE'))
report.append(str(path))
# Done
return report
def build_disk_report(dev):
"""Build device report, returns list."""
children = dev.details.get('children', [])
report = []
# Get widths
widths = {
'fstype': max(6, len(str(dev.details.get('fstype', '')))),
'label': max(5, len(str(dev.details.get('label', '')))),
'name': max(4, len(dev.path.name)),
}
for child in children:
widths['fstype'] = max(widths['fstype'], len(str(child['fstype'])))
widths['label'] = max(widths['label'], len(str(child['label'])))
widths['name'] = max(
widths['name'],
len(child['name'].replace('/dev/', '')),
)
widths = {k: v+1 for k, v in widths.items()}
# Disk details
report.append(f'{dev.path.name} {dev.description}')
report.append(' ')
dev_fstype = dev.details.get('fstype', '')
dev_label = dev.details.get('label', '')
dev_name = dev.path.name
dev_size = std.bytes_to_string(dev.details["size"], use_binary=False)
# Partition details
report.append(
std.color_string(
(
f'{"NAME":<{widths["name"]}}'
f'{" " if children else ""}'
f'{"SIZE":<7}'
f'{"FSTYPE":<{widths["fstype"]}}'
f'{"LABEL":<{widths["label"]}}'
),
'BLUE',
),
)
report.append(
f'{dev_name if dev_name else "":<{widths["name"]}}'
f'{" " if children else ""}'
f'{dev_size:>6} '
f'{dev_fstype if dev_fstype else "":<{widths["fstype"]}}'
f'{dev_label if dev_label else "":<{widths["label"]}}'
)
for child in children:
fstype = child['fstype']
label = child['label']
name = child['name'].replace('/dev/', '')
size = std.bytes_to_string(child["size"], use_binary=False)
report.append(
f'{name if name else "":<{widths["name"]}}'
f'{size:>6} '
f'{fstype if fstype else "":<{widths["fstype"]}}'
f'{label if label else "":<{widths["label"]}}'
)
# Indent children
if len(children) > 1:
report = [
*report[:4],
*[f'├─{line}' for line in report[4:-1]],
f'└─{report[-1]}',
]
elif len(children) == 1:
report[-1] = f'└─{report[-1]}'
# Done
return report
def build_main_menu():
"""Build main menu, returns wk.std.Menu."""
menu = std.Menu(title=std.color_string('ddrescue TUI: Main Menu', 'GREEN'))
menu.separator = ' '
# Add actions, options, etc
for action in MENU_ACTIONS:
if not (PLATFORM == 'Darwin' and 'Detect drives' in action):
menu.add_action(action)
for toggle, selected in MENU_TOGGLES.items():
menu.add_toggle(toggle, {'Selected': selected})
# Done
return menu
def build_object_report(obj):
"""Build object report, returns list."""
report = []
# Get details based on object given
if hasattr(obj, 'is_dir') and obj.is_dir():
# Directory report
report = build_directory_report(obj)
else:
# Device report
report = build_disk_report(obj)
# Done
return report
def build_settings_menu(silent=True):
"""Build settings menu, returns wk.std.Menu."""
title_text = [
std.color_string('ddrescue TUI: Expert Settings', 'GREEN'),
' ',
std.color_string(
['These settings can cause', 'MAJOR DAMAGE', 'to drives'],
['YELLOW', 'RED', 'YELLOW'],
),
'Please read the manual before making changes',
]
menu = std.Menu(title='\n'.join(title_text))
menu.separator = ' '
preset = 'Default'
if not silent:
# Ask which preset to use
print(f'Available ddrescue presets: {" / ".join(SETTING_PRESETS)}')
preset = std.choice(SETTING_PRESETS, 'Please select a preset:')
# Fix selection
for _p in SETTING_PRESETS:
if _p.startswith(preset):
preset = _p
# Add default settings
menu.add_action('Load Preset')
menu.add_action('Main Menu')
for name, details in DDRESCUE_SETTINGS['Default'].items():
menu.add_option(name, details.copy())
# Update settings using preset
if preset != 'Default':
for name, details in DDRESCUE_SETTINGS[preset].items():
menu.options[name].update(details.copy())
# Done
return menu
def build_sfdisk_partition_line(table_type, dev_path, size, details):
"""Build sfdisk partition line using passed details, returns str."""
line = f'{dev_path} : size={size}'
dest_type = ''
source_filesystem = str(details.get('fstype', '')).upper()
source_table_type = ''
source_type = details.get('parttype', '')
# Set dest type
if re.match(r'^0x\w+$', source_type):
# Both source and dest are MBR
source_table_type = 'MBR'
if table_type == 'MBR':
dest_type = source_type.replace('0x', '').lower()
elif re.match(r'^\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$', source_type):
# Source is a GPT type
source_table_type = 'GPT'
if table_type == 'GPT':
dest_type = source_type.upper()
if not dest_type:
# Assuming changing table types, set based on FS
if source_filesystem in cfg.ddrescue.PARTITION_TYPES.get(table_type, {}):
dest_type = cfg.ddrescue.PARTITION_TYPES[table_type][source_filesystem]
line += f', type={dest_type}'
# Safety Check
if not dest_type:
std.print_error(f'Failed to determine partition type for: {dev_path}')
raise std.GenericAbort()
# Add extra details
if details.get('partlabel', ''):
line += f', name="{details["partlabel"]}"'
if details.get('partuuid', '') and source_table_type == table_type:
# Only add UUID if source/dest table types match
line += f', uuid={details["partuuid"].upper()}'
# Done
return line
def check_destination_health(destination):
"""Check destination health, returns str."""
result = ''
# Bail early
if not isinstance(destination, hw_obj.Disk):
# Return empty string
return result
# Run safety checks
try:
destination.safety_checks()
except hw_obj.CriticalHardwareError:
result = 'Critical hardware error detected on destination'
except hw_obj.SMARTSelfTestInProgressError:
result = 'SMART self-test in progress on destination'
except hw_obj.SMARTNotSupportedError:
pass
# Done
return result
def clean_working_dir(working_dir):
"""Clean working directory to ensure a fresh recovery session.
NOTE: Data from previous sessions will be preserved
in a backup directory.
"""
backup_dir = pathlib.Path(f'{working_dir}/prev')
backup_dir = io.non_clobber_path(backup_dir)
backup_dir.mkdir()
# Move settings, maps, etc to backup_dir
for entry in os.scandir(working_dir):
if entry.name.endswith(('.dd', '.json', '.map')):
new_path = f'{backup_dir}/{entry.name}'
new_path = io.non_clobber_path(new_path)
shutil.move(entry.path, new_path)
def format_status_string(status, width):
"""Format colored status string, returns str."""
color = None
percent = -1
status_str = str(status)
# Check if status is percentage
try:
percent = float(status_str)
except ValueError:
# Assuming status is text
pass
# Format status
if percent >= 0:
# Percentage
color = get_percent_color(percent)
status_str = f'{percent:{width-2}.2f} %'
if '100.00' in status_str and percent < 100:
# Always round down to 99.99%
LOG.warning('Rounding down to 99.99 from %s', percent)
status_str = f'{"99.99 %":>{width}}'
else:
# Text
color = STATUS_COLORS.get(status_str, None)
status_str = f'{status_str:>{width}}'
# Add color if necessary
if color:
status_str = std.color_string(status_str, color)
# Done
return status_str
def fstype_is_ok(path, map_dir=False):
"""Check if filesystem type is acceptable, returns bool."""
is_ok = False
fstype = None
# Get fstype
if PLATFORM == 'Darwin':
# Check all parent dirs until a mountpoint is found
test_path = pathlib.Path(path)
while test_path:
fstype = get_fstype_macos(test_path)
if fstype != 'UNKNOWN':
break
fstype = None
test_path = test_path.parent
elif PLATFORM == 'Linux':
cmd = [
'findmnt',
'--noheadings',
'--output', 'FSTYPE',
'--target', path,
]
proc = exe.run_program(cmd, check=False)
fstype = proc.stdout
fstype = fstype.strip().lower()
# Check fstype
if map_dir:
is_ok = RECOMMENDED_MAP_FSTYPES.match(fstype)
else:
is_ok = RECOMMENDED_FSTYPES.match(fstype)
# Done
return is_ok
def get_ddrescue_settings(settings_menu):
"""Get ddrescue settings from menu selections, returns list."""
settings = []
# Check menu selections
for name, details in settings_menu.options.items():
if details['Selected']:
if 'Value' in details:
settings.append(f'{name}={details["Value"]}')
else:
settings.append(name)
# Done
return settings
def get_etoc():
"""Get EToC from ddrescue output, returns str."""
delta = None
delta_dict = {}
etoc = 'Unknown'
now = datetime.datetime.now(tz=TIMEZONE)
output = tmux.capture_pane()
# Search for EToC delta
matches = re.findall(r'remaining time:.*$', output, re.MULTILINE)
if matches:
match = REGEX_REMAINING_TIME.search(matches[-1])
if match.group('na'):
etoc = 'N/A'
else:
for key in ('days', 'hours', 'minutes', 'seconds'):
delta_dict[key] = match.group(key)
delta_dict = {k: int(v) if v else 0 for k, v in delta_dict.items()}
delta = datetime.timedelta(**delta_dict)
# Calc EToC if delta found
if delta:
etoc_datetime = now + delta
etoc = etoc_datetime.strftime('%Y-%m-%d %H:%M %Z')
# Done
return etoc
def get_fstype_macos(path):
"""Get fstype for path under macOS, returns str."""
fstype = 'UNKNOWN'
proc = exe.run_program(['mount'], check=False)
# Bail early
if proc.returncode:
return fstype
# Parse output
match = re.search(rf'{path} \((\w+)', proc.stdout)
if match:
fstype = match.group(1)
# Done
return fstype
def get_object(path):
"""Get object based on path, returns obj."""
obj = None
# Bail early
if not path:
return obj
# Check path
path = pathlib.Path(path).resolve()
if path.is_block_device() or path.is_char_device():
obj = hw_obj.Disk(path)
# Child/Parent check
parent = obj.details['parent']
if parent:
std.print_warning(f'"{obj.path}" is a child device')
if std.ask(f'Use parent device "{parent}" instead?'):
obj = hw_obj.Disk(parent)
elif path.is_dir():
obj = path
elif path.is_file():
# Assuming file is a raw image, mounting
loop_path = mount_raw_image(path)
obj = hw_obj.Disk(loop_path)
# Abort if obj not set
if not obj:
std.print_error(f'Invalid source/dest path: {path}')
raise std.GenericAbort()
# Done
return obj
def get_partition_separator(name):
"""Get partition separator based on device name, returns str."""
separator = ''
if re.search(r'(loop|mmc|nvme)', name, re.IGNORECASE):
separator = 'p'
return separator
def get_percent_color(percent):
"""Get color based on percentage, returns str."""
color = None
if percent > 100:
color = 'PURPLE'
elif percent >= 99:
color = 'GREEN'
elif percent >= 90:
color = 'YELLOW'
elif percent > 0:
color = 'RED'
# Done
return color
def get_table_type(disk):
"""Get disk partition table type, returns str.
NOTE: If resulting table type is not GPT or MBR
then an exception is raised.
"""
table_type = str(disk.details.get('pttype', '')).upper()
table_type = table_type.replace('DOS', 'MBR')
# Check type
if table_type not in ('GPT', 'MBR'):
std.print_error(f'Unsupported partition table type: {table_type}')
raise std.GenericAbort()
# Done
return table_type
def get_working_dir(mode, destination, force_local=False):
"""Get working directory using mode and destination, returns path."""
ticket_id = None
working_dir = None
# Set ticket ID
while ticket_id is None:
ticket_id = std.input_text(
prompt='Please enter ticket ID:',
allow_empty_response=False,
)
ticket_id = ticket_id.replace(' ', '_')
if not re.match(r'^\d+', ticket_id):
ticket_id = None
# Use preferred path if possible
if mode == 'Image':
try:
path = pathlib.Path(destination).resolve()
except TypeError as err:
std.print_error(f'Invalid destination: {destination}')
raise std.GenericAbort() from err
if path.exists() and fstype_is_ok(path, map_dir=False):
working_dir = path
elif mode == 'Clone' and not force_local:
std.print_info('Mounting backup shares...')
net.mount_backup_shares(read_write=True)
for server in cfg.net.BACKUP_SERVERS:
path = pathlib.Path(
f'/{"Volumes" if PLATFORM == "Darwin" else "Backups"}/{server}',
)
if path.exists() and fstype_is_ok(path, map_dir=True):
# Acceptable path found
working_dir = path
break
# Default to current dir if necessary
if not working_dir:
LOG.error('Failed to set preferred working directory')
working_dir = pathlib.Path(os.getcwd())
# Set subdir using ticket ID
if mode == 'Clone':
working_dir = working_dir.joinpath(ticket_id)
# Create directory
working_dir.mkdir(parents=True, exist_ok=True)
os.chdir(working_dir)
# Done
LOG.info('Set working directory to: %s', working_dir)
return working_dir
def is_missing_source_or_destination(state):
"""Check if source or destination dissapeared, returns bool."""
missing = False
items = {
'Source': state.source,
'Destination': state.destination,
}
# Check items
for name, item in items.items():
if not item:
continue
if hasattr(item, 'path'):
if not item.path.exists():
missing = True
std.print_error(f'{name} disappeared')
elif hasattr(item, 'exists'):
if not item.exists():
missing = True
std.print_error(f'{name} disappeared')
else:
LOG.error('Unknown %s type: %s', name, item)
# Update top panes
state.update_top_panes()
# Done
return missing
def source_or_destination_changed(state):
"""Verify the source and destination objects are still valid."""
changed = False
# Compare objects
for obj in (state.source, state.destination):
if not obj:
changed = True
elif hasattr(obj, 'exists'):
# Assuming dest path
changed = changed or not obj.exists()
elif isinstance(obj, hw_obj.Disk):
compare_dev = hw_obj.Disk(obj.path)
for key in ('model', 'serial'):
changed = changed or obj.details[key] != compare_dev.details[key]
# Update top panes
state.update_top_panes()
# Done
if changed:
std.print_error('Source and/or Destination changed')
return changed
def main():
# pylint: disable=too-many-branches
"""Main function for ddrescue TUI."""
args = docopt(DOCSTRING)
log.update_log_path(dest_name='ddrescue-TUI', timestamp=True)
# Check if running inside tmux
if 'TMUX' not in os.environ:
LOG.error('tmux session not found')
raise RuntimeError('tmux session not found')
# Init
atexit.register(tmux.kill_all_panes)
main_menu = build_main_menu()
settings_menu = build_settings_menu()
state = State()
try:
state.init_recovery(args)
except (FileNotFoundError, std.GenericAbort):
is_missing_source_or_destination(state)
std.abort()
# Show menu
while True:
selection = main_menu.advanced_select()
# Change settings
if 'Change settings' in selection[0]:
while True:
selection = settings_menu.settings_select()
if 'Load Preset' in selection:
# Rebuild settings menu using preset
settings_menu = build_settings_menu(silent=False)
else:
break
# Detect drives
if 'Detect drives' in selection[0]:
std.clear_screen()
std.print_warning(DETECT_DRIVES_NOTICE)
if std.ask('Are you sure you proceed?'):
std.print_standard('Forcing controllers to rescan for devices...')
cmd = 'echo "- - -" | sudo tee /sys/class/scsi_host/host*/scan'
exe.run_program(cmd, check=False, shell=True)
if source_or_destination_changed(state):
std.abort()
# Start recovery
if 'Start' in selection:
std.clear_screen()
run_recovery(state, main_menu, settings_menu, dry_run=args['--dry-run'])
# Quit
if 'Quit' in selection:
total_percent = state.get_percent_recovered()
if total_percent == 100:
break
# Recovey < 100%
std.print_warning('Recovery is less than 100%')
if std.ask('Are you sure you want to quit?'):
break
# Save results to log
LOG.info('')
for line in state.generate_report():
LOG.info(' %s', std.strip_colors(line))
def mount_raw_image(path):
"""Mount raw image using OS specific methods, returns pathlib.Path."""
loopback_path = None
if PLATFORM == 'Darwin':
loopback_path = mount_raw_image_macos(path)
elif PLATFORM == 'Linux':
loopback_path = mount_raw_image_linux(path)
# Check
if not loopback_path:
std.print_error(f'Failed to mount image: {path}')
# Register unmount atexit
atexit.register(unmount_loopback_device, loopback_path)
# Done
return loopback_path
def mount_raw_image_linux(path):
"""Mount raw image using losetup, returns pathlib.Path."""
loopback_path = None
# Mount using losetup
cmd = [
'sudo',
'losetup',
'--find',
'--partscan',
'--show',
path,
]
proc = exe.run_program(cmd, check=False)
# Check result
if proc.returncode == 0:
loopback_path = proc.stdout.strip()
# Done
return loopback_path
def mount_raw_image_macos(path):
"""Mount raw image using hdiutil, returns pathlib.Path."""
loopback_path = None
plist_data = {}
# Mount using hdiutil
# plistdata['system-entities'][{}...]
cmd = [
'hdiutil', 'attach',
'-imagekey', 'diskimage-class=CRawDiskImage',
'-nomount',
'-plist',
'-readonly',
path,
]
proc = exe.run_program(cmd, check=False, encoding=None, errors=None)
# Check result
try:
plist_data = plistlib.loads(proc.stdout)
except plistlib.InvalidFileException:
return None
for dev in plist_data.get('system-entities', []):
dev_path = dev.get('dev-entry', '')
if re.match(r'^/dev/disk\d+$', dev_path):
loopback_path = dev_path
# Done
return loopback_path
def run_ddrescue(state, block_pair, pass_name, settings, dry_run=True):
# pylint: disable=too-many-statements
"""Run ddrescue using passed settings."""
cmd = build_ddrescue_cmd(block_pair, pass_name, settings)
poweroff_source_after_idle = True
state.update_progress_pane('Active')
std.clear_screen()
warning_message = ''
def _poweroff_source_drive(idle_minutes):
"""Power off source drive after a while."""
source_dev = state.source.path
# Bail early
if PLATFORM == 'Darwin':
return
# Sleep
i = 0
while i < idle_minutes*60:
if not poweroff_source_after_idle:
# Countdown canceled, exit without powering-down drives
return
if i % 600 == 0 and i > 0:
if i == 600:
std.print_standard(' ', flush=True)
std.print_warning(
f'Powering off source in {int((idle_minutes*60-i)/60)} minutes...',
)
std.sleep(5)
i += 5
# Power off drive
cmd = ['sudo', 'hdparm', '-Y', source_dev]
proc = exe.run_program(cmd, check=False)
if proc.returncode:
std.print_error(f'Failed to poweroff source {source_dev}')
else:
std.print_warning(f'Powered off source {source_dev}')
std.print_standard(
'Press Enter to return to main menu...', end='', flush=True,
)
def _update_smart_pane():
"""Update SMART pane every 30 seconds."""
state.source.update_smart_details()
now = datetime.datetime.now(tz=TIMEZONE).strftime('%Y-%m-%d %H:%M %Z')
with open(f'{state.log_dir}/smart.out', 'w', encoding='utf-8') as _f:
_f.write(
std.color_string(
['SMART Attributes', f'Updated: {now}\n'],
['BLUE', 'YELLOW'],
sep='\t\t',
),
)
_f.write('\n'.join(state.source.generate_report(header=False)))
# Dry run
if dry_run:
LOG.info('ddrescue cmd: %s', cmd)
return
# Start ddrescue
proc = exe.popen_program(cmd)
# ddrescue loop
_i = 0
while True:
if _i % 30 == 0:
# Update SMART pane
_update_smart_pane()
# Check destination
warning_message = check_destination_health(state.destination)
if warning_message:
# Error detected on destination, stop recovery
exe.stop_process(proc)
std.print_error(warning_message)
break
if _i % 60 == 0:
# Clear ddrescue pane
tmux.clear_pane()
_i += 1
# Update progress
block_pair.update_progress(pass_name)
state.update_progress_pane('Active')
# Check if complete
try:
proc.wait(timeout=1)
break
except KeyboardInterrupt:
# Wait a bit to let ddrescue exit safely
LOG.warning('ddrescue stopped by user')
warning_message = 'Aborted'
std.sleep(2)
exe.stop_process(proc, graceful=False)
break
except subprocess.TimeoutExpired:
# Continue to next loop to update panes
pass
else:
# Done
std.sleep(1)
break
# Update progress
# NOTE: Using 'Active' here to avoid flickering between block pairs
block_pair.update_progress(pass_name)
state.update_progress_pane('Active')
# Check result
if proc.poll():
# True if return code is non-zero (poll() returns None if still running)
poweroff_thread = exe.start_thread(
_poweroff_source_drive,
[cfg.ddrescue.DRIVE_POWEROFF_TIMEOUT],
)
warning_message = 'Error(s) encountered, see message above'
state.update_top_panes()
if warning_message:
print(' ')
print(' ')
std.print_error('DDRESCUE PROCESS HALTED')
print(' ')
std.print_warning(warning_message)
# Needs attention?
if str(proc.poll()) != '0':
state.update_progress_pane('NEEDS ATTENTION')
std.pause('Press Enter to return to main menu...')
# Stop source poweroff countdown
std.print_standard('Stopping device poweroff countdown...', flush=True)
poweroff_source_after_idle = False
poweroff_thread.join()
# Done
raise std.GenericAbort()
def run_recovery(state, main_menu, settings_menu, dry_run=True):
# pylint: disable=too-many-branches
"""Run recovery passes."""
atexit.register(state.save_debug_reports)
attempted_recovery = False
auto_continue = False
# Bail early
if is_missing_source_or_destination(state):
std.print_standard('')
std.pause('Press Enter to return to main menu...')
return
if source_or_destination_changed(state):
std.print_standard('')
std.abort()
# Get settings
for name, details in main_menu.toggles.items():
if 'Auto continue' in name and details['Selected']:
auto_continue = True
if 'Retry' in name and details['Selected']:
details['Selected'] = False
state.retry_all_passes()
settings = get_ddrescue_settings(settings_menu)
# Start SMART/Journal
state.panes['SMART'] = tmux.split_window(
behind=True, lines=12, vertical=True,
watch_file=f'{state.log_dir}/smart.out',
)
if PLATFORM != 'Darwin':
state.panes['Journal'] = tmux.split_window(
lines=4, vertical=True, cmd='journalctl --dmesg --follow',
)
# Run pass(es)
for pass_name in ('read', 'trim', 'scrape'):
abort = False
# Skip to next pass
if state.pass_complete(pass_name):
# NOTE: This bypasses auto_continue
state.skip_pass(pass_name)
continue
# Run ddrescue
for pair in state.block_pairs:
if not pair.pass_complete(pass_name):
attempted_recovery = True
state.mark_started()
try:
run_ddrescue(state, pair, pass_name, settings, dry_run=dry_run)
except (FileNotFoundError, KeyboardInterrupt, std.GenericAbort):
is_missing_source_or_destination(state)
abort = True
break
# Continue or return to menu
all_complete = state.pass_complete(pass_name)
all_above_threshold = state.pass_above_threshold(pass_name)
if abort or not (all_complete and all_above_threshold and auto_continue):
LOG.warning('Recovery halted')
break
# Stop SMART/Journal
for pane in ('SMART', 'Journal'):
if pane in state.panes:
tmux.kill_pane(state.panes.pop(pane))
# Show warning if nothing was done
if not attempted_recovery:
std.print_warning('No actions performed')
std.print_standard(' ')
std.pause('Press Enter to return to main menu...')
# Done
state.save_debug_reports()
atexit.unregister(state.save_debug_reports)
state.update_progress_pane('Idle')
def select_disk(prompt, skip_disk=None):
"""Select disk from list, returns Disk()."""
std.print_info('Scanning disks...')
disks = hw_obj.get_disks()
menu = std.Menu(
title=std.color_string(f'ddrescue TUI: {prompt} Selection', 'GREEN'),
)
menu.disabled_str = 'Already selected'
menu.separator = ' '
menu.add_action('Quit')
for disk in disks:
disable_option = False
size = disk.details["size"]
# Check if option should be disabled
if skip_disk:
parent = skip_disk.details.get('parent', None)
if (disk.path.samefile(skip_disk.path)
or (parent and disk.path.samefile(parent))):
disable_option = True
# Add to menu
menu.add_option(
name=(
f'{str(disk.path):<12} '
f'{disk.details["bus"]:<5} '
f'{std.bytes_to_string(size, decimals=1, use_binary=False):<8} '
f'{disk.details["model"]} '
f'{disk.details["serial"]}'
),
details={'Disabled': disable_option, 'Object': disk},
)
# Get selection
selection = menu.simple_select()
if 'Quit' in selection:
raise std.GenericAbort()
# Done
return selection[-1]['Object']
def select_disk_parts(prompt, disk):
"""Select disk parts from list, returns list of Disk()."""
title = std.color_string('ddrescue TUI: Partition Selection', 'GREEN')
title += f'\n\nDisk: {disk.path} {disk.description}'
menu = std.Menu(title)
menu.separator = ' '
menu.add_action('All')
menu.add_action('None')
menu.add_action('Proceed', {'Separator': True})
menu.add_action('Quit')
object_list = []
def _select_parts(menu):
"""Loop over selection menu until at least one partition selected."""
while True:
selection = menu.advanced_select(
f'Please select the parts to {prompt.lower()}: ',
)
if 'All' in selection:
for option in menu.options.values():
option['Selected'] = True
elif 'None' in selection:
for option in menu.options.values():
option['Selected'] = False
elif 'Proceed' in selection:
if any(option['Selected'] for option in menu.options.values()):
# At least one partition/device selected/device selected
break
elif 'Quit' in selection:
raise std.GenericAbort()
# Bail early if running under macOS
if PLATFORM == 'Darwin':
return [disk]
# Bail early if child device selected
if disk.details.get('parent', False):
return [disk]
# Add parts
whole_disk_str = f'{str(disk.path):<14} (Whole device)'
for part in disk.details.get('children', []):
size = part["size"]
name = (
f'{str(part["path"]):<14} '
f'({std.bytes_to_string(size, decimals=1, use_binary=False):>6})'
)
menu.add_option(name, details={'Selected': True, 'Path': part['path']})
# Add whole disk if necessary
if not menu.options:
menu.add_option(whole_disk_str, {'Selected': True, 'Path': disk.path})
menu.title += '\n\n'
menu.title += std.color_string(' No partitions detected.', 'YELLOW')
# Get selection
_select_parts(menu)
# Build list of Disk() object_list
for option in menu.options.values():
if option['Selected']:
object_list.append(option['Path'])
# Check if whole disk selected
if len(object_list) == len(disk.details.get('children', [])):
# NOTE: This is not true if the disk has no partitions
msg = f'Preserve partition table and unused space in {prompt.lower()}?'
if std.ask(msg):
# Replace part list with whole disk obj
object_list = [disk.path]
# Convert object_list to hw_obj.Disk() objects
print(' ')
std.print_info('Getting disk/partition details...')
object_list = [hw_obj.Disk(path) for path in object_list]
# Done
return object_list
def select_path(prompt):
"""Select path, returns pathlib.Path."""
invalid = False
menu = std.Menu(
title=std.color_string(f'ddrescue TUI: {prompt} Path Selection', 'GREEN'),
)
menu.separator = ' '
menu.add_action('Quit')
menu.add_option('Current directory')
menu.add_option('Enter manually')
path = None
# Make selection
selection = menu.simple_select()
if 'Current directory' in selection:
path = os.getcwd()
elif 'Enter manually' in selection:
path = std.input_text('Please enter path: ')
elif 'Quit' in selection:
raise std.GenericAbort()
# Check
try:
path = pathlib.Path(path).resolve()
except TypeError:
invalid = True
if invalid or not path.is_dir():
std.print_error(f'Invalid path: {path}')
raise std.GenericAbort()
# Done
return path
def set_mode(docopt_args):
"""Set mode from docopt_args or user selection, returns str."""
mode = None
# Check docopt_args
if docopt_args['clone']:
mode = 'Clone'
elif docopt_args['image']:
mode = 'Image'
# Ask user if necessary
if not mode:
answer = std.choice(['C', 'I'], 'Are we cloning or imaging?')
if answer == 'C':
mode = 'Clone'
else:
mode = 'Image'
# Done
return mode
def unmount_loopback_device(path):
"""Unmount loopback device using OS specific methods."""
cmd = []
# Build OS specific cmd
if PLATFORM == 'Darwin':
cmd = ['hdiutil', 'detach', path]
elif PLATFORM == 'Linux':
cmd = ['sudo', 'losetup', '--detach', path]
# Unmount loopback device
exe.run_program(cmd, check=False)
if __name__ == '__main__':
print("This file is not meant to be called directly.")
```
#### File: wk/kit/tools.py
```python
from datetime import datetime, timedelta
import logging
import pathlib
import platform
import requests
from wk.cfg.main import ARCHIVE_PASSWORD
from wk.cfg.sources import DOWNLOAD_FREQUENCY, SOURCES
from wk.exe import popen_program, run_program
from wk.std import GenericError
# STATIC VARIABLES
ARCH = '64' if platform.architecture()[0] == '64bit' else '32'
LOG = logging.getLogger(__name__)
# "GLOBAL" VARIABLES
CACHED_DIRS = {}
# Functions
def download_file(out_path, source_url, as_new=False, overwrite=False):
"""Download a file using requests, returns pathlib.Path."""
out_path = pathlib.Path(out_path).resolve()
name = out_path.name
download_failed = None
download_msg = f'Downloading {name}...'
if as_new:
out_path = out_path.with_suffix(f'{out_path.suffix}.new')
print(download_msg, end='', flush=True)
# Avoid clobbering
if out_path.exists() and not overwrite:
raise FileExistsError(f'Refusing to clobber {out_path}')
# Create destination directory
out_path.parent.mkdir(parents=True, exist_ok=True)
# Request download
try:
response = requests.get(source_url, stream=True)
except requests.RequestException as _err:
download_failed = _err
else:
if not response.ok:
download_failed = response
# Download failed
if download_failed:
LOG.error('Failed to download file: %s', download_failed)
raise GenericError(f'Failed to download file: {name}')
# Write to file
with open(out_path, 'wb') as _f:
for chunk in response.iter_content(chunk_size=128):
_f.write(chunk)
# Done
print(f'\033[{len(download_msg)}D\033[0K', end='', flush=True)
return out_path
def download_tool(folder, name, suffix=None):
"""Download tool."""
name_arch = f'{name}{ARCH}'
out_path = get_tool_path(folder, name, check=False, suffix=suffix)
up_to_date = False
# Check if tool is up to date
try:
ctime = datetime.fromtimestamp(out_path.stat().st_ctime)
up_to_date = datetime.now() - ctime < timedelta(days=DOWNLOAD_FREQUENCY)
except FileNotFoundError:
# Ignore - we'll download it below
pass
if out_path.exists() and up_to_date:
LOG.info('Skip downloading up-to-date tool: %s', name)
return
# Get ARCH specific URL if available
if name_arch in SOURCES:
source_url = SOURCES[name_arch]
out_path = out_path.with_name(f'{name_arch}{out_path.suffix}')
else:
source_url = SOURCES[name]
# Download
LOG.info('Downloading tool: %s', name)
try:
new_file = download_file(out_path, source_url, as_new=True)
new_file.replace(out_path)
except GenericError:
# Ignore as long as there's still a version present
if not out_path.exists():
raise
def extract_archive(archive, out_path, *args, mode='x', silent=True):
"""Extract an archive to out_path."""
out_path = pathlib.Path(out_path).resolve()
out_path.parent.mkdir(parents=True, exist_ok=True)
cmd = [get_tool_path('7-Zip', '7z'), mode, archive, f'-o{out_path}', *args]
if silent:
cmd.extend(['-bso0', '-bse0', '-bsp0'])
# Extract
run_program(cmd)
def extract_tool(folder):
"""Extract tool."""
extract_archive(
find_kit_dir('.cbin').joinpath(folder).with_suffix('.7z'),
find_kit_dir('.bin').joinpath(folder),
'-aos', f'-p{ARCHIVE_PASSWORD}',
)
def find_kit_dir(name=None):
"""Find folder in kit, returns pathlib.Path.
Search is performed in the script's path and then recursively upwards.
If name is given then search for that instead."""
cur_path = pathlib.Path(__file__).resolve().parent
search = name if name else '.bin'
# Search
if name in CACHED_DIRS:
return CACHED_DIRS[name]
while not cur_path.match(cur_path.anchor):
if cur_path.joinpath(search).exists():
break
cur_path = cur_path.parent
# Check
if cur_path.match(cur_path.anchor):
raise FileNotFoundError(f'Failed to find kit dir, {name=}')
if name:
cur_path = cur_path.joinpath(name)
# Done
CACHED_DIRS[name] = cur_path
return cur_path
def get_tool_path(folder, name, check=True, suffix=None):
"""Get tool path, returns pathlib.Path"""
bin_dir = find_kit_dir('.bin')
if not suffix:
suffix = 'exe'
name_arch = f'{name}{ARCH}'
# "Search"
tool_path = bin_dir.joinpath(f'{folder}/{name_arch}.{suffix}')
if not (tool_path.exists() or name_arch in SOURCES):
# Use "default" path instead
tool_path = tool_path.with_name(f'{name}.{suffix}')
# Missing?
if check and not tool_path.exists():
raise FileNotFoundError(f'Failed to find tool, {folder=}, {name=}')
# Done
return tool_path
def run_tool(
folder, name, *run_args,
cbin=False, cwd=False, download=False, popen=False,
**run_kwargs,
):
"""Run tool from the kit or the Internet, returns proc obj.
proc will be either subprocess.CompletedProcess or subprocess.Popen."""
proc = None
# Extract from .cbin
if cbin:
extract_tool(folder)
# Download tool
if download:
download_tool(folder, name)
# Run
tool_path = get_tool_path(folder, name)
cmd = [tool_path, *run_args]
if cwd:
run_kwargs['cwd'] = tool_path.parent
if popen:
proc = popen_program(cmd, **run_kwargs)
else:
proc = run_program(cmd, check=False, **run_kwargs)
# Done
return proc
if __name__ == '__main__':
print("This file is not meant to be called directly.")
```
#### File: wk/os/win.py
```python
import ctypes
import logging
import os
import pathlib
import platform
from contextlib import suppress
import psutil
try:
import winreg
except ImportError as err:
if platform.system() == 'Windows':
raise err
from wk.borrowed import acpi
from wk.cfg.main import KIT_NAME_FULL
from wk.cfg.windows_builds import (
OLDEST_SUPPORTED_BUILD,
OUTDATED_BUILD_NUMBERS,
WINDOWS_BUILDS,
)
from wk.exe import get_json_from_command, run_program
from wk.kit.tools import find_kit_dir
from wk.std import (
GenericError,
GenericWarning,
bytes_to_string,
color_string,
sleep,
)
# STATIC VARIABLES
LOG = logging.getLogger(__name__)
ARCH = '64' if platform.architecture()[0] == '64bit' else '32'
CONEMU = 'ConEmuPID' in os.environ
KNOWN_DATA_TYPES = {
'BINARY': winreg.REG_BINARY,
'DWORD': winreg.REG_DWORD,
'DWORD_LITTLE_ENDIAN': winreg.REG_DWORD_LITTLE_ENDIAN,
'DWORD_BIG_ENDIAN': winreg.REG_DWORD_BIG_ENDIAN,
'EXPAND_SZ': winreg.REG_EXPAND_SZ,
'LINK': winreg.REG_LINK,
'MULTI_SZ': winreg.REG_MULTI_SZ,
'NONE': winreg.REG_NONE,
'QWORD': winreg.REG_QWORD,
'QWORD_LITTLE_ENDIAN': winreg.REG_QWORD_LITTLE_ENDIAN,
'SZ': winreg.REG_SZ,
}
KNOWN_HIVES = {
'HKCR': winreg.HKEY_CLASSES_ROOT,
'HKCU': winreg.HKEY_CURRENT_USER,
'HKLM': winreg.HKEY_LOCAL_MACHINE,
'HKU': winreg.HKEY_USERS,
'HKEY_CLASSES_ROOT': winreg.HKEY_CLASSES_ROOT,
'HKEY_CURRENT_USER': winreg.HKEY_CURRENT_USER,
'HKEY_LOCAL_MACHINE': winreg.HKEY_LOCAL_MACHINE,
'HKEY_USERS': winreg.HKEY_USERS,
}
KNOWN_HIVE_NAMES = {
winreg.HKEY_CLASSES_ROOT: 'HKCR',
winreg.HKEY_CURRENT_USER: 'HKCU',
winreg.HKEY_LOCAL_MACHINE: 'HKLM',
winreg.HKEY_USERS: 'HKU',
}
OS_VERSION = platform.win32_ver()[0]
OS_VERSION = 8.1 if OS_VERSION == '8.1' else int(OS_VERSION)
RAM_OK = 5.5 * 1024**3 # ~6 GiB assuming a bit of shared memory
RAM_WARNING = 3.5 * 1024**3 # ~4 GiB assuming a bit of shared memory
REG_MSISERVER = r'HKLM\SYSTEM\CurrentControlSet\Control\SafeBoot\Network\MSIServer'
SLMGR = pathlib.Path(f'{os.environ.get("SYSTEMROOT")}/System32/slmgr.vbs')
# Activation Functions
def activate_with_bios():
"""Attempt to activate Windows with a key stored in the BIOS."""
# Code borrowed from https://github.com/aeruder/get_win8key
#####################################################
#script to query windows 8.x OEM key from PC firmware
#ACPI -> table MSDM -> raw content -> byte offset 56 to end
#ck, 03-Jan-2014 (<EMAIL>)
#####################################################
bios_key = None
table = b"MSDM"
# Check if activation is needed
if is_activated():
raise GenericWarning('System already activated')
# Get BIOS key
if acpi.FindAcpiTable(table) is True:
rawtable = acpi.GetAcpiTable(table)
#http://msdn.microsoft.com/library/windows/hardware/hh673514
#byte offset 36 from beginning
# = Microsoft 'software licensing data structure'
# / 36 + 20 bytes offset from beginning = Win Key
bios_key = rawtable[56:len(rawtable)].decode("utf-8")
if not bios_key:
raise GenericError('BIOS key not found.')
# Install Key
cmd = ['cscript', '//nologo', SLMGR, '/ipk', bios_key]
run_program(cmd, check=False)
sleep(5)
# Attempt activation
cmd = ['cscript', '//nologo', SLMGR, '/ato']
run_program(cmd, check=False)
sleep(5)
# Check status
if not is_activated():
raise GenericError('Activation Failed')
def get_activation_string():
"""Get activation status, returns str."""
cmd = ['cscript', '//nologo', SLMGR, '/xpr']
proc = run_program(cmd, check=False)
act_str = proc.stdout
act_str = act_str.splitlines()[1]
act_str = act_str.strip()
return act_str
def is_activated():
"""Check if Windows is activated via slmgr.vbs and return bool."""
act_str = get_activation_string()
# Check result.
return act_str and 'permanent' in act_str
# Date / Time functions
def get_timezone():
"""Get current timezone using tzutil, returns str."""
cmd = ['tzutil', '/g']
proc = run_program(cmd, check=False)
return proc.stdout
def set_timezone(zone):
"""Set current timezone using tzutil."""
cmd = ['tzutil', '/s', zone]
run_program(cmd, check=False)
# Info Functions
def check_4k_alignment(show_alert=False):
"""Check if all partitions are 4K aligned, returns book."""
cmd = ['WMIC', 'partition', 'get', 'StartingOffset']
# Check offsets
proc = run_program(cmd)
for offset in proc.stdout.splitlines():
offset = offset.strip()
if not offset.isnumeric():
continue
if int(offset) % 4096 != 0:
# Not aligned
if show_alert:
show_alert_box('One or more partitions are not 4K aligned')
raise GenericError('One or more partitions are not 4K aligned')
def get_installed_antivirus():
"""Get list of installed antivirus programs, returns list."""
cmd = [
'WMIC', r'/namespace:\\root\SecurityCenter2',
'path', 'AntivirusProduct',
'get', 'displayName', '/value',
]
products = []
report = []
# Get list of products
proc = run_program(cmd)
for line in proc.stdout.splitlines():
line = line.strip()
if '=' in line:
products.append(line.split('=')[1])
# Check product(s) status
for product in sorted(products):
cmd = [
'WMIC', r'/namespace:\\root\SecurityCenter2',
'path', 'AntivirusProduct',
'where', f'displayName="{product}"',
'get', 'productState', '/value',
]
proc = run_program(cmd)
state = proc.stdout.split('=')[1]
state = hex(int(state))
if str(state)[3:5] not in ['10', '11']:
report.append(color_string(f'[Disabled] {product}', 'YELLOW'))
else:
report.append(product)
# Final check
if not report:
report.append(color_string('No products detected', 'RED'))
# Done
return report
def get_installed_ram(as_list=False, raise_exceptions=False):
"""Get installed RAM."""
mem = psutil.virtual_memory()
mem_str = bytes_to_string(mem.total, decimals=1)
# Raise exception if necessary
if raise_exceptions:
if RAM_OK > mem.total >= RAM_WARNING:
raise GenericWarning(mem_str)
if mem.total < RAM_WARNING:
raise GenericError(mem_str)
# Done
return [mem_str] if as_list else mem_str
def get_os_activation(as_list=False, check=True):
"""Get OS activation status, returns str.
NOTE: If check=True then raise an exception if OS isn't activated.
"""
act_str = get_activation_string()
if check and not is_activated():
if 'unavailable' in act_str.lower():
raise GenericWarning(act_str)
# Else
raise GenericError(act_str)
# Done
return [act_str] if as_list else act_str
def get_os_name(as_list=False, check=True):
"""Build OS display name, returns str.
NOTE: If check=True then an exception is raised if the OS version is
outdated or unsupported.
"""
key = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion'
build_version = int(reg_read_value("HKLM", key, "CurrentBuild"))
build_version_full = platform.win32_ver()[1]
details = WINDOWS_BUILDS.get(build_version_full, f'Build {build_version}')
display_name = (
f'{reg_read_value("HKLM", key, "ProductName")} {ARCH}-bit {details}'
)
# Check for support issues
if check:
if build_version in OUTDATED_BUILD_NUMBERS:
raise GenericWarning(f'{display_name} (outdated)')
if build_version < OLDEST_SUPPORTED_BUILD:
raise GenericError(f'{display_name} (unsupported)')
# Done
return [display_name] if as_list else display_name
def get_raw_disks():
"""Get all disks without a partiton table, returns list."""
script_path = find_kit_dir('Scripts').joinpath('get_raw_disks.ps1')
cmd = ['PowerShell', '-ExecutionPolicy', 'Bypass', '-File', script_path]
json_data = get_json_from_command(cmd)
raw_disks = []
# Bail early
if not json_data:
# No RAW disks detected
return raw_disks
# Fix JSON if only one disk was detected
if isinstance(json_data, dict):
json_data = [json_data]
# Parse JSON
for disk in json_data:
size_str = bytes_to_string(int(disk["Size"]), use_binary=False)
raw_disks.append(f'{disk["FriendlyName"]} ({size_str})')
# Done
return raw_disks
def get_volume_usage(use_colors=False):
"""Get space usage info for all fixed volumes, returns list."""
report = []
for disk in psutil.disk_partitions():
if 'fixed' not in disk.opts:
continue
total, _, free, percent = psutil.disk_usage(disk.device)
color = None
if percent > 85:
color = 'RED'
elif percent > 75:
color = 'YELLOW'
display_str = (
f'{free/total:>5.2%} Free'
f' ({bytes_to_string(free, 2):>10} / {bytes_to_string(total, 2):>10})'
)
if use_colors:
display_str = color_string(display_str, color)
report.append(f'{disk.device} {display_str}')
# Done
return report
def show_alert_box(message, title=None):
"""Show Windows alert box with message."""
title = title if title else f'{KIT_NAME_FULL} Warning'
message_box = ctypes.windll.user32.MessageBoxW
message_box(None, message, title, 0x00001030)
# Registry Functions
def reg_delete_key(hive, key, recurse=False):
# pylint: disable=raise-missing-from
"""Delete a key from the registry.
NOTE: If recurse is False then it will only work on empty keys.
"""
hive = reg_get_hive(hive)
hive_name = KNOWN_HIVE_NAMES.get(hive, '???')
# Delete subkeys first
if recurse:
with suppress(OSError), winreg.OpenKey(hive, key) as open_key:
while True:
subkey = fr'{key}\{winreg.EnumKey(open_key, 0)}'
reg_delete_key(hive, subkey, recurse=recurse)
# Delete key
try:
winreg.DeleteKey(hive, key)
LOG.warning(r'Deleting registry key: %s\%s', hive_name, key)
except FileNotFoundError:
# Ignore
pass
except PermissionError:
LOG.error(r'Failed to delete registry key: %s\%s', hive_name, key)
if recurse:
# Re-raise exception
raise
# recurse is not True so assuming we tried to remove a non-empty key
msg = fr'Refusing to remove non-empty key: {hive_name}\{key}'
raise FileExistsError(msg)
def reg_delete_value(hive, key, value):
"""Delete a value from the registry."""
access = winreg.KEY_ALL_ACCESS
hive = reg_get_hive(hive)
hive_name = KNOWN_HIVE_NAMES.get(hive, '???')
# Delete value
with winreg.OpenKey(hive, key, access=access) as open_key:
try:
winreg.DeleteValue(open_key, value)
LOG.warning(
r'Deleting registry value: %s\%s "%s"', hive_name, key, value,
)
except FileNotFoundError:
# Ignore
pass
except PermissionError:
LOG.error(
r'Failed to delete registry value: %s\%s "%s"', hive_name, key, value,
)
# Re-raise exception
raise
def reg_get_hive(hive):
"""Get winreg HKEY constant from string, returns HKEY constant."""
if isinstance(hive, int):
# Assuming we're already a winreg HKEY constant
pass
else:
hive = KNOWN_HIVES[hive.upper()]
# Done
return hive
def reg_get_data_type(data_type):
"""Get registry data type from string, returns winreg constant."""
if isinstance(data_type, int):
# Assuming we're already a winreg value type constant
pass
else:
data_type = KNOWN_DATA_TYPES[data_type.upper()]
# Done
return data_type
def reg_key_exists(hive, key):
"""Test if the specified hive/key exists, returns bool."""
exists = False
hive = reg_get_hive(hive)
# Query key
try:
winreg.QueryValue(hive, key)
except FileNotFoundError:
# Leave set to False
pass
else:
exists = True
# Done
return exists
def reg_read_value(hive, key, value, force_32=False, force_64=False):
"""Query value from hive/hey, returns multiple types.
NOTE: Set value='' to read the default value.
"""
access = winreg.KEY_READ
data = None
hive = reg_get_hive(hive)
# Set access
if force_32:
access = access | winreg.KEY_WOW64_32KEY
elif force_64:
access = access | winreg.KEY_WOW64_64KEY
# Query value
with winreg.OpenKey(hive, key, access=access) as open_key:
# Returning first part of tuple and ignoreing type
data = winreg.QueryValueEx(open_key, value)[0]
# Done
return data
def reg_write_settings(settings):
"""Set registry values in bulk from a custom data structure.
Data structure should be as follows:
EXAMPLE_SETTINGS = {
# See KNOWN_HIVES for valid hives
'HKLM': {
r'Software\\2Shirt\\WizardKit': (
# Value tuples should be in the form:
# (name, data, data-type, option),
# See KNOWN_DATA_TYPES for valid types
# The option item is optional
('Sample Value #1', 'Sample Data', 'SZ'),
('Sample Value #2', 14, 'DWORD'),
),
# An empty key will be created if no values are specified
r'Software\\2Shirt\\WizardKit\\Empty': (),
r'Software\\2Shirt\\WizardKit\\Test': (
('Sample Value #3', 14000000000000, 'QWORD'),
),
},
'HKCU': {
r'Software\\2Shirt\\WizardKit': (
# The 4th item forces using the 32-bit registry
# See reg_set_value() for valid options
('Sample Value #4', 'Sample Data', 'SZ', '32'),
),
},
}
"""
for hive, keys in settings.items():
hive = reg_get_hive(hive)
for key, values in keys.items():
if not values:
# Create an empty key
winreg.CreateKey(hive, key)
for value in values:
reg_set_value(hive, key, *value)
def reg_set_value(hive, key, name, data, data_type, option=None):
# pylint: disable=too-many-arguments
"""Set value for hive/key."""
access = winreg.KEY_WRITE
data_type = reg_get_data_type(data_type)
hive = reg_get_hive(hive)
option = str(option)
# Safety check
if not name and option in ('32', '64'):
raise NotImplementedError(
'Unable to set default values using alternate registry views',
)
# Set access
if option == '32':
access = access | winreg.KEY_WOW64_32KEY
elif option == '64':
access = access | winreg.KEY_WOW64_64KEY
# Create key
winreg.CreateKeyEx(hive, key, access=access)
# Set value
if name:
with winreg.OpenKey(hive, key, access=access) as open_key:
winreg.SetValueEx(open_key, name, 0, data_type, data)
else:
# Set default value instead
winreg.SetValue(hive, key, data_type, data)
# Safe Mode Functions
def disable_safemode():
"""Edit BCD to remove safeboot value."""
cmd = ['bcdedit', '/deletevalue', '{default}', 'safeboot']
run_program(cmd)
def disable_safemode_msi():
"""Disable MSI access under safemode."""
cmd = ['reg', 'delete', REG_MSISERVER, '/f']
run_program(cmd)
def enable_safemode():
"""Edit BCD to set safeboot as default."""
cmd = ['bcdedit', '/set', '{default}', 'safeboot', 'network']
run_program(cmd)
def enable_safemode_msi():
"""Enable MSI access under safemode."""
cmd = ['reg', 'add', REG_MSISERVER, '/f']
run_program(cmd)
cmd = [
'reg', 'add', REG_MSISERVER, '/ve',
'/t', 'REG_SZ',
'/d', 'Service', '/f',
]
run_program(cmd)
# Secure Boot Functions
def is_booted_uefi():
"""Check if booted UEFI or legacy, returns bool."""
kernel = ctypes.windll.kernel32
firmware_type = ctypes.c_uint()
# Get value from kernel32 API (firmware_type is updated by the call)
try:
kernel.GetFirmwareType(ctypes.byref(firmware_type))
except Exception: # pylint: disable=broad-except
# Ignore and set firmware_type back to zero
firmware_type = ctypes.c_uint(0)
# Check result
return firmware_type.value == 2
def is_secure_boot_enabled(raise_exceptions=False, show_alert=False):
"""Check if Secure Boot is enabled, returns bool.
If raise_exceptions is True then an exception is raised with details.
If show_alert is True a popup alert box is shown if it's not enabled.
"""
booted_uefi = is_booted_uefi()
cmd = ['PowerShell', '-Command', 'Confirm-SecureBootUEFI']
enabled = False
msg_error = None
msg_warning = None
# Bail early
if OS_VERSION < 8:
if raise_exceptions:
raise GenericWarning(f'Secure Boot not available for {OS_VERSION}')
return False
# Check results
proc = run_program(cmd, check=False)
if proc.returncode:
# Something went wrong
if booted_uefi:
msg_warning = 'UNKNOWN'
else:
msg_warning = 'DISABLED\n\nOS installed LEGACY'
else:
# Command completed
if 'True' in proc.stdout:
enabled = True
elif 'False' in proc.stdout:
msg_error = 'DISABLED'
else:
msg_warning = 'UNKNOWN'
# Show popup and/or raise exceptions as necessary
for msg, exc in ((msg_error, GenericError), (msg_warning, GenericWarning)):
if not msg:
continue
if show_alert:
show_alert_box(f'Secure Boot {msg}')
if raise_exceptions:
raise exc(msg)
break
# Done
return enabled
# Service Functions
def disable_service(service_name):
"""Set service startup to disabled."""
cmd = ['sc', 'config', service_name, 'start=', 'disabled']
run_program(cmd, check=False)
# Verify service was disabled
if get_service_start_type(service_name) != 'disabled':
raise GenericError(f'Failed to disable service {service_name}')
def enable_service(service_name, start_type='auto'):
"""Enable service by setting start type."""
cmd = ['sc', 'config', service_name, 'start=', start_type]
psutil_type = 'automatic'
if start_type == 'demand':
psutil_type = 'manual'
# Enable service
run_program(cmd, check=False)
# Verify service was enabled
if get_service_start_type(service_name) != psutil_type:
raise GenericError(f'Failed to enable service {service_name}')
def get_service_status(service_name):
"""Get service status using psutil, returns str."""
status = 'unknown'
try:
service = psutil.win_service_get(service_name)
status = service.status()
except psutil.NoSuchProcess:
status = 'missing?'
return status
def get_service_start_type(service_name):
"""Get service startup type using psutil, returns str."""
start_type = 'unknown'
try:
service = psutil.win_service_get(service_name)
start_type = service.start_type()
except psutil.NoSuchProcess:
start_type = 'missing?'
return start_type
def start_service(service_name):
"""Stop service."""
cmd = ['net', 'start', service_name]
run_program(cmd, check=False)
# Verify service was started
if not get_service_status(service_name) in ('running', 'start_pending'):
raise GenericError(f'Failed to start service {service_name}')
def stop_service(service_name):
"""Stop service."""
cmd = ['net', 'stop', service_name]
run_program(cmd, check=False)
# Verify service was stopped
if not get_service_status(service_name) == 'stopped':
raise GenericError(f'Failed to stop service {service_name}')
if __name__ == '__main__':
print("This file is not meant to be called directly.")
``` |
{
"source": "2silver/collective.bbcodesnippets",
"score": 2
} |
#### File: collective/bbcodesnippets/demo.py
```python
from .interfaces import IFormatterFactory
from plone import api
from Products.Five.browser import BrowserView
from zope.component import getUtilitiesFor
class DemoView(BrowserView):
def docsnippets(self):
enabled = api.portal.get_registry_record("bbcodesnippets.formatters")
for name, factory in getUtilitiesFor(IFormatterFactory):
if not factory.__doc__:
continue
yield {
"name": name,
"snippet": factory.__bbcode_copy_snippet__,
"demo": factory.__doc__,
"enabled": name in enabled,
}
```
#### File: collective/bbcodesnippets/formatters.py
```python
from .interfaces import IFormatterFactory
from zope.interface import provider
import bbcode
import re
class copy_snippet(object):
def __init__(self, snippet):
self.__bbcode_copy_snippet__ = snippet
def __call__(self, func):
func.__bbcode_copy_snippet__ = self.__bbcode_copy_snippet__
return func
class template_snippet(object):
def __init__(self, snippet):
self.__bbcode_template_snippet__ = snippet
def __call__(self, func):
func.__bbcode_template_snippet__ = self.__bbcode_template_snippet__
return func
# parts of this code are inspired by and partly taken from
# https://github.com/dcwatson/bbcode/blob/master/bbcode.py
# b [b]test[/b] <strong>test</strong>
# i [i]test[/i] <em>test</em>
# u [u]test[/u] <u>test</u>
# s [s]test[/s] <strike>test</strike>
# hr [hr] <hr />
# br [br] <br />
# sub x[sub]3[/sub] x<sub>3</sub>
# sup x[sup]3[/sup] x<sup>3</sup>
# list/* [list][*] item[/list] <ul><li>item</li></ul>
# quote [quote]hello[/quote] <blockquote>hello</blockquote>
# code [code]x = 3[/code] <code>x = 3</code>
# center [center]hello[/center] <div style="text-align:center;">hello</div>
# color [color=red]red[/color] <span style="color:red;">red</span>
# url [url=www.apple.com]Apple[/url] <a href="http://www.apple.com">Apple</a>
TEMPLATES = {
"url": '<a rel="nofollow" href="{href}">{text}</a>',
}
def make_simple_formatter(tag_name, format_string, **kwargs):
"""
Creates a formatter that takes the tag options dictionary, puts a value key
in it, and uses it as a format dictionary to the given format string.
"""
def _render(name, value, options, parent, context):
fmt = {}
if options:
fmt.update(options)
fmt.update({"value": value})
return format_string % fmt
return _render
@provider(IFormatterFactory)
@copy_snippet("[b][/b]")
@template_snippet("[b]$TEXT[/b]$CURSOR")
def b_factory():
"""Bold or strong text: <pre>A [b]bold[/b] text.</pre><br />
Example:<br />
A [b]bold[/b] text.
"""
return make_simple_formatter("b", "<strong>%(value)s</strong>"), {}
@provider(IFormatterFactory)
@copy_snippet("[i][/i]")
@template_snippet("[i]$TEXT[/i]$CURSOR")
def i_factory():
"""Italic or emphasised text: <pre>An [i]italic/emphasised[/i] text.</pre><br />
Example:<br />
An [i]italic/emphasised[/i] text.
"""
return make_simple_formatter("i", "<em>%(value)s</em>"), {}
@provider(IFormatterFactory)
@copy_snippet("[u][/u]")
@template_snippet("[u]$TEXT[/u]$CURSOR")
def u_factory():
"""Underlined or unarticulated text: <pre>An [u]underlined[u] text.</pre><br />
Example:<br />
An [u]underlined[/u] text.
"""
return make_simple_formatter("u", "<u>%(value)s</u>"), {}
@provider(IFormatterFactory)
@copy_snippet("[s][/s]")
@template_snippet("[s]$TEXT[/s]$CURSOR")
def s_factory():
"""Strike through or deleted text: [s]test[/s]"""
return make_simple_formatter("s", "<del>%(value)s</del>"), {}
@provider(IFormatterFactory)
@copy_snippet("[sub][/sub]")
@template_snippet("[sub]$TEXT[/sub]$CURSOR")
def sub_factory():
"""Subscript text: <pre>H[sub]2[/sub]O</pre><br />
Example:<br />
H[sub]2[/sub]O
"""
return make_simple_formatter("sub", "<sub>%(value)s</sub>"), {}
@provider(IFormatterFactory)
@copy_snippet("[sup][/sup]")
@template_snippet("[sup]$TEXT[/sup]$CURSOR")
def sup_factory():
"""Superscript text: <pre>r[sup]2[/sup]</pre><br />
Example:<br />
r[sup]2[/sup]
"""
return make_simple_formatter("sup", "<sup>%(value)s</sup>"), {}
@provider(IFormatterFactory)
@copy_snippet("[hr]")
@template_snippet("$TEXT[hr]$CURSOR")
def hr_factory():
"""Horizontal ruler: <pre>Above ruler[hr]Below ruler</pre><br />
Example:<br />
Above ruler[hr]Below ruler
"""
return make_simple_formatter("hr", "<hr />"), {"standalone": True}
@provider(IFormatterFactory)
@copy_snippet("[br]")
@template_snippet("$TEXT[br]$CURSOR")
def br_factory():
"""Line break in text: <pre>A line[br]break in the text.</pre><br />
Example:<br />
A line[br]break in the text.
"""
return make_simple_formatter("br", "<br />"), {"standalone": True}
@provider(IFormatterFactory)
@copy_snippet(
"""\
[list]
[*] item
[/list]
"""
)
@template_snippet(
"""\
[list]
[*] $TEXT
[*] $CURSOR
[/list]
"""
)
def list_factory():
"""List with bullets or numbers. Use '*' for bullet points or for numbers one out of '1', '01, 'a', 'A', 'i' or 'I'.
Bullet points: [list][*] item[/list]
Numbered: [list][1] item[/list]
"""
def _render_list(name, value, options, parent, context):
list_type = options["list"] if (options and "list" in options) else "*"
css_opts = {
"1": "decimal",
"01": "decimal-leading-zero",
"a": "lower-alpha",
"A": "upper-alpha",
"i": "lower-roman",
"I": "upper-roman",
}
tag = "ol" if list_type in css_opts else "ul"
css = (
' style="list-style-type:%s;"' % css_opts[list_type]
if list_type in css_opts
else ""
)
return "<%s%s>%s</%s>" % (tag, css, value, tag)
return _render_list, {
"transform_newlines": False,
"strip": True,
"swallow_trailing_newline": True,
}
@provider(IFormatterFactory)
def list_item_factory():
# no doc string, so will not appear in documentation, helper for list
def _render_list_item(name, value, options, parent, context):
if not parent or parent.tag_name != "list":
return "[*]%s<br />" % value
return "<li>%s</li>" % value
return _render_list_item, {
"newline_closes": True,
"transform_newlines": False,
"same_tag_closes": True,
"strip": True,
"level": 1,
}
@provider(IFormatterFactory)
@copy_snippet("[quote][/quote]")
@template_snippet("[quote]$TEXT[/quote]$CURSOR")
def quote_factory():
return make_simple_formatter("quote", "<blockquote>%(value)s</blockquote>"), {
"strip": True,
"swallow_trailing_newline": True,
}
@provider(IFormatterFactory)
@copy_snippet("[code][/code]")
@template_snippet("[code]$TEXT[/code]$CURSOR")
def code_factory():
"""Code text: <pre>Some random Code: [code]print("Hello World")[/code]</pre><br />
Example:<br />
Some random Code: [code]print("Hello World")[/code]
"""
return make_simple_formatter("code", "<code>%(value)s</code>"), {
"render_embedded": False,
"transform_newlines": False,
"swallow_trailing_newline": True,
"replace_cosmetic": False,
}
@provider(IFormatterFactory)
@copy_snippet("[color=red][/color]")
@template_snippet("[color=$CURSOR]$TEXT[/color]")
def color_factory():
def _render_color(name, value, options, parent, context):
if "color" in options:
color = options["color"].strip()
elif options:
color = list(options.keys())[0].strip()
else:
return value
match = re.match(r"^([a-z]+)|^(#[a-f0-9]{3,6})", color, re.I)
color = match.group() if match else "inherit"
return '<span style="color:%(color)s;">%(value)s</span>' % {
"color": color,
"value": value,
}
return _render_color(), {}
@provider(IFormatterFactory)
@copy_snippet("[center][/center]")
@template_snippet("[center]$TEXT[/center]$CURSOR")
def center_factory():
"""Centered text: <pre>[center]centered text[/center]</pre><br />
Example:<br />
[center]centered text[/center]
"""
return (
make_simple_formatter(
"center", '<div style="text-align:center;">%(value)s</div>'
),
{},
)
@provider(IFormatterFactory)
@copy_snippet("[url=https://plone.org]Plone[/url]")
@template_snippet("[url=$CURSOR]$TEXT[/url]")
def url_factory():
"""A hyper link in the text: <pre>Welcome to [url=www.plone.org]Plone[/url]!</pre><br />
Example:<br />
Welcome to [url=www.plone.org]Plone[/url]!
"""
def _render_url(name, value, options, parent, context):
if options and "url" in options:
href = options["url"]
# Option values are not escaped for HTML output.
for find, repl in bbcode.Parser.REPLACE_ESCAPE:
value = value.replace(find, repl)
else:
href = value
# Completely ignore javascript: and data: "links".
if re.sub(r"[^a-z0-9+]", "", href.lower().split(":", 1)[0]) in (
"javascript",
"data",
"vbscript",
):
return ""
# Only add the missing https:// if it looks like it starts with a domain name.
if "://" not in href and bbcode._domain_re.match(href):
href = "https://" + href
return TEMPLATES["url"].format(href=href.replace('"', "%22"), text=value)
return _render_url, {"replace_links": False, "replace_cosmetic": False}
```
#### File: collective/bbcodesnippets/interfaces.py
```python
from zope.browsermenu.interfaces import IBrowserMenu
from zope.interface import Interface
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
class IBBCodeSnippetsLayer(IDefaultBrowserLayer):
"""Marker interface that defines a browser layer."""
class IFormatterFactory(Interface):
def __call__():
"""create a new bbcode formatter."""
class IBBCodeSnippetesMainMenuItem(IBrowserMenu):
"""The main BBCode menu item."""
class IBBCodeSnippetesMenu(IBrowserMenu):
"""The BBCode menu."""
``` |
{
"source": "2silver/yafowil",
"score": 2
} |
#### File: src/yafowil/compound.py
```python
from node.utils import UNSET
from odict import odict
from yafowil.base import factory
from yafowil.utils import as_data_attrs
from yafowil.utils import attr_value
from yafowil.utils import css_managed_props
from yafowil.utils import cssclasses
from yafowil.utils import cssid
from yafowil.utils import managedprops
###############################################################################
# compound
###############################################################################
@managedprops('structural')
def compound_extractor(widget, data):
"""Delegates extraction to children.
"""
for child in widget.values():
# regular child widget, extract
if not attr_value('structural', child, data):
child.extract(data.request, parent=data)
continue
# structural child widget, go one level deeper
for subchild in child.values():
# sub child widget may be structural as well
structural = attr_value('structural', subchild, data)
# use compound extractor if sub child widget has children and is
# structural
if len(subchild) and structural:
compound_extractor(subchild, data)
# call extract on sub child widget directly if not structural
elif not structural:
subchild.extract(data.request, parent=data)
return odict([(k, v.extracted) for k, v in data.items()])
def compound_renderer(widget, data):
"""Delegates rendering to children.
"""
value = widget.getter
result = u''
for childname in widget:
child = widget[childname]
if attr_value('structural', child, data):
subdata = data
if value is not UNSET and child.getter is UNSET:
child.getter = value
else:
subdata = data.get(childname, None)
if callable(value):
value = value(widget, data)
if value is not UNSET and childname in value:
# XXX: if compound renderer is called multiple times on the
# same widget within one form processing cycle
# ``child.getter`` has been set, so the condition is True
# and ``ValueError`` is raised. Think about widget
# instance annotations to mark value delegation already
# processed.
if child.getter is UNSET:
child.getter = value[childname]
else:
raise ValueError(
u"Both compound and compound member "
u"provide a value for '{0}'".format(childname)
)
if subdata is None:
result += child(request=data.request)
else:
result += child(data=subdata)
return result
factory.register(
'compound',
extractors=[compound_extractor],
edit_renderers=[compound_renderer],
display_renderers=[compound_renderer])
factory.doc['blueprint']['compound'] = """\
A blueprint to create a compound of widgets. This blueprint creates a node. A
node can contain sub-widgets.
"""
factory.defaults['structural'] = False
factory.doc['props']['structural'] = """\
If a compound is structural, it will be omitted in the dotted-path levels and
will not have an own runtime-data.
"""
###############################################################################
# hybrid
###############################################################################
@managedprops('leaf')
def hybrid_extractor(widget, data):
"""This extractor can be used if a blueprint can act as compound or leaf.
"""
if len(widget) and not attr_value('leaf', widget, data):
return compound_extractor(widget, data)
return data.extracted
@managedprops('leaf')
def hybrid_renderer(widget, data):
"""This renderer can be used if a blueprint can act as compound or leaf.
"""
if len(widget) and not attr_value('leaf', widget, data):
rendered = compound_renderer(widget, data)
else:
rendered = data.rendered
if data.rendered is None:
rendered = u''
return rendered
factory.defaults['leaf'] = None
factory.doc['props']['leaf'] = """\
Leaf property can be used in conjunction with ``hybrid_extractor`` and
``hybrid_renderer`` using blueprints in order to mark compound widgets as leaf.
If set True, it causes bypassing auto delegation of extraction and rendering
to ``compound_renderer`` respective ``compound_extractor`` if widget contains
children.
This is useful if mixing blueprints which renders and handles compounds on it's
own with blueprints using hybrid rendering and extraction in order to prevent
side effects due to multiple child rendering and extraction.
"""
###############################################################################
# div
###############################################################################
@managedprops('id', *css_managed_props)
def div_renderer(widget, data):
attrs = {
'id': attr_value('id', widget, data),
'class_': cssclasses(widget, data)
}
attrs.update(as_data_attrs(attr_value('data', widget, data)))
return data.tag('div', data.rendered, **attrs)
factory.register(
'div',
extractors=[hybrid_extractor],
edit_renderers=[
hybrid_renderer,
div_renderer
],
display_renderers=[
hybrid_renderer,
div_renderer
])
factory.doc['blueprint']['div'] = """\
Blueprint rendering a '<div>' element.
This is a hybrid blueprint. Check ``leaf`` property for details.
"""
factory.defaults['div.id'] = None
factory.doc['props']['div.id'] = """\
Id attribute for div tag.
"""
factory.defaults['div.class'] = None
factory.doc['props']['div.class'] = """\
CSS classes for div tag.
"""
factory.defaults['div.data'] = None
factory.doc['props']['div.data'] = """\
Dict containing data attributes for div tag.
"""
###############################################################################
# fieldset
###############################################################################
@managedprops('legend', *css_managed_props)
def fieldset_renderer(widget, data):
fs_attrs = {
'id': cssid(widget, 'fieldset'),
'class_': cssclasses(widget, data)
}
rendered = data.rendered
legend = attr_value('legend', widget, data)
if legend:
rendered = data.tag('legend', legend) + rendered
return data.tag('fieldset', rendered, **fs_attrs)
factory.register(
'fieldset',
extractors=[compound_extractor],
edit_renderers=[
compound_renderer,
fieldset_renderer
],
display_renderers=[
compound_renderer,
fieldset_renderer
])
factory.doc['blueprint']['fieldset'] = """\
Renders a fieldset around the prior rendered output.
"""
factory.defaults['fieldset.legend'] = False
factory.doc['props']['fieldset.legend'] = """\
Content of legend tag if legend should be rendered.
"""
factory.defaults['fieldset.class'] = None
###############################################################################
# form
###############################################################################
@managedprops('action', 'method', 'enctype', 'novalidate', *css_managed_props)
def form_edit_renderer(widget, data):
method = attr_value('method', widget, data)
enctype = method == 'post' and attr_value('enctype', widget, data) or None
noval = attr_value('novalidate', widget, data) and 'novalidate' or None
form_attrs = {
'action': attr_value('action', widget, data),
'method': method,
'enctype': enctype,
'novalidate': noval,
'class_': cssclasses(widget, data),
'id': 'form-{0}'.format('-'.join(widget.path)),
}
form_attrs.update(as_data_attrs(attr_value('data', widget, data)))
return data.tag('form', data.rendered, **form_attrs)
def form_display_renderer(widget, data):
return data.tag('div', data.rendered)
factory.register(
'form',
extractors=[compound_extractor],
edit_renderers=[
compound_renderer,
form_edit_renderer
],
display_renderers=[
compound_renderer,
form_display_renderer
])
factory.doc['blueprint']['form'] = """\
A html-form element as a compound of widgets.
"""
factory.defaults['form.method'] = 'post'
factory.doc['props']['form.method'] = """\
One out of ``get`` or ``post``.
"""
factory.doc['props']['form.action'] = """\
Target web address (URL) to send the form to.
"""
factory.defaults['form.enctype'] = 'multipart/form-data'
factory.doc['props']['form.enctype'] = """\
Encryption type of the form. Only relevant for method ``post``. Expect one out
of ``application/x-www-form-urlencoded`` or ``multipart/form-data``.
"""
factory.defaults['form.novalidate'] = True
factory.doc['props']['form.novalidate'] = """\
Flag whether HTML5 form validation should be suppressed.
"""
```
#### File: yafowil/tests/__init__.py
```python
from __future__ import print_function
from node.tests import NodeTestCase
from yafowil.base import factory
from yafowil.compat import IS_PY2
import lxml.etree as etree
import sys
import unittest
import yafowil.common
import yafowil.compound
import yafowil.persistence
import yafowil.table
if not IS_PY2:
from importlib import reload
class YafowilTestCase(NodeTestCase):
def setUp(self):
super(YafowilTestCase, self).setUp()
factory.clear()
reload(yafowil.persistence)
reload(yafowil.common)
reload(yafowil.compound)
reload(yafowil.table)
def fxml(xml):
et = etree.fromstring(xml)
return etree.tostring(et, pretty_print=True).decode('utf-8')
def pxml(xml):
print(fxml(xml))
def test_suite():
from yafowil.tests import test_base
from yafowil.tests import test_common
from yafowil.tests import test_compound
from yafowil.tests import test_controller
from yafowil.tests import test_persistence
from yafowil.tests import test_resources
from yafowil.tests import test_table
from yafowil.tests import test_tsf
from yafowil.tests import test_utils
suite = unittest.TestSuite()
suite.addTest(unittest.findTestCases(test_base))
suite.addTest(unittest.findTestCases(test_common))
suite.addTest(unittest.findTestCases(test_compound))
suite.addTest(unittest.findTestCases(test_controller))
suite.addTest(unittest.findTestCases(test_persistence))
suite.addTest(unittest.findTestCases(test_resources))
suite.addTest(unittest.findTestCases(test_table))
suite.addTest(unittest.findTestCases(test_tsf))
suite.addTest(unittest.findTestCases(test_utils))
return suite
def run_tests():
from zope.testrunner.runner import Runner
runner = Runner(found_suites=[test_suite()])
runner.run()
sys.exit(int(runner.failed))
if __name__ == '__main__':
run_tests()
```
#### File: yafowil/tests/test_compound.py
```python
from odict import odict
from node.utils import UNSET
from yafowil.base import ExtractionError
from yafowil.base import factory
from yafowil.controller import Controller
from yafowil.tests import YafowilTestCase
from yafowil.tests import fxml
from yafowil.utils import Tag
import yafowil.common
import yafowil.compound # noqa
###############################################################################
# Helpers
###############################################################################
tag = Tag(lambda msg: msg)
###############################################################################
# Tests
###############################################################################
class TestCompound(YafowilTestCase):
def test_compound_blueprint_value_via_compound(self):
# Render Compound with values set via compound widget
value = {
'inner': 'Value 1 from parent',
'inner2': 'Value 2 from parent',
}
compound = factory(
'compound',
name='COMPOUND',
value=value)
compound['inner'] = factory('text')
compound['inner2'] = factory(
'text',
props={
'required': True
})
self.check_output("""
<div>
<input class="text" id="input-COMPOUND-inner" name="COMPOUND.inner"
type="text" value="Value 1 from parent"/>
<input class="required text" id="input-COMPOUND-inner2"
name="COMPOUND.inner2" required="required" type="text"
value="Value 2 from parent"/>
</div>
""", fxml(tag('div', compound())))
def test_compound_blueprint_value_via_members(self):
# Render Compound with values set via compound members
compound = factory(
'compound',
name='COMPOUND')
compound['inner'] = factory(
'text',
value='value1')
compound['inner2'] = factory(
'error:text',
value='value2',
props={
'required': True
})
self.check_output("""
<div>
<input class="text" id="input-COMPOUND-inner" name="COMPOUND.inner"
type="text" value="value1"/>
<input class="required text" id="input-COMPOUND-inner2"
name="COMPOUND.inner2" required="required" type="text"
value="value2"/>
</div>
""", fxml(tag('div', compound())))
def test_compound_blueprint_value_conflict(self):
# ValueError if value for a compound member is defined both
value = {'inner': 'Value 1 from parent'}
compound = factory(
'compound',
name='COMPOUND',
value=value)
compound['inner'] = factory(
'text',
value='value1')
err = self.expect_error(
ValueError,
compound
)
msg = "Both compound and compound member provide a value for 'inner'"
self.assertEqual(str(err), msg)
def test_compound_blueprint_extraction(self):
compound = factory('compound', name='COMPOUND')
compound['inner'] = factory('text', value='value1')
compound['inner2'] = factory(
'error:text',
value='value2',
props={
'required': True
})
# Extract Compound with empty request
data = compound.extract({})
self.assertEqual(data.name, 'COMPOUND')
self.assertEqual(data.value, UNSET)
expected = odict()
expected['inner'] = UNSET
expected['inner2'] = UNSET
self.assertEqual(data.extracted, expected)
self.assertEqual(data.errors, [])
inner_data = data['inner']
self.assertEqual(inner_data.name, 'inner')
self.assertEqual(inner_data.value, 'value1')
self.assertEqual(inner_data.extracted, UNSET)
self.assertEqual(inner_data.errors, [])
# Extract with a value in request
request = {
'COMPOUND.inner': 'newvalue',
'COMPOUND.inner2': '',
}
data = compound.extract(request)
data_inner = data['inner']
self.assertEqual(data_inner.name, 'inner')
self.assertEqual(data_inner.value, 'value1')
self.assertEqual(data_inner.extracted, 'newvalue')
self.assertEqual(data_inner.errors, [])
data_inner2 = data['inner2']
self.assertEqual(data_inner2.name, 'inner2')
self.assertEqual(data_inner2.value, 'value2')
self.assertEqual(data_inner2.extracted, '')
self.assertEqual(
data_inner2.errors,
[ExtractionError('Mandatory field was empty')]
)
expected = odict()
expected['inner'] = 'newvalue'
expected['inner2'] = ''
self.assertEqual(data.extracted, expected)
self.check_output("""
<div>
<input class="text" id="input-COMPOUND-inner" name="COMPOUND.inner"
type="text" value="newvalue"/>
<div class="error">
<div class="errormessage">Mandatory field was empty</div>
<input class="required text" id="input-COMPOUND-inner2"
name="COMPOUND.inner2" required="required"
type="text" value=""/>
</div>
</div>
""", fxml('<div>' + compound(data=data) + '</div>'))
def test_compound_blueprint_display_rendering(self):
# Compound display renderers, same as edit renderers
compound = factory(
'compound',
name='COMPOUND',
mode='display')
self.assertEqual(tag('div', compound()), '<div></div>')
def test_compound_blueprint_structural_children(self):
# Compound with structural compound as child
value = {
'inner': 'Value 1 from parent',
'inner2': 'Value 2 from parent',
}
compound = factory(
'compound',
name='COMPOUND',
value=value)
structural = compound['STRUCTURAL'] = factory(
'compound',
props={
'structural': True
})
structural['inner'] = factory('text')
structural['inner2'] = factory(
'text',
props={
'required': True
})
self.check_output("""
<div>
<input class="text" id="input-COMPOUND-inner" name="COMPOUND.inner"
type="text" value="Value 1 from parent"/>
<input class="required text" id="input-COMPOUND-inner2"
name="COMPOUND.inner2" required="required" type="text"
value="Value 2 from parent"/>
</div>
""", fxml(tag('div', compound())))
self.assertEqual(compound.treerepr().split('\n'), [
"<class 'yafowil.base.Widget'>: COMPOUND",
" <class 'yafowil.base.Widget'>: STRUCTURAL",
" <class 'yafowil.base.Widget'>: inner",
" <class 'yafowil.base.Widget'>: inner2",
""
])
data = compound.extract({
'COMPOUND.inner': 'newvalue',
'COMPOUND.inner2': '',
})
self.assertEqual(data.name, 'COMPOUND')
self.assertEqual(data.value, {
'inner2': 'Value 2 from parent',
'inner': 'Value 1 from parent'
})
expected = odict()
expected['inner'] = 'newvalue'
expected['inner2'] = ''
self.assertEqual(data.extracted, expected)
data_inner = data['inner']
self.assertEqual(data_inner.name, 'inner')
self.assertEqual(data_inner.value, 'Value 1 from parent')
self.assertEqual(data_inner.extracted, 'newvalue')
self.assertEqual(data_inner.errors, [])
data_inner2 = data['inner2']
self.assertEqual(data_inner2.name, 'inner2')
self.assertEqual(data_inner2.value, 'Value 2 from parent')
self.assertEqual(data_inner2.extracted, '')
self.assertEqual(
data_inner2.errors,
[ExtractionError('Mandatory field was empty')]
)
def test_compound_blueprint_compound_children(self):
# Compound with compound as child
value = {
'CHILD_COMPOUND': {
'inner': 'Value 1 from parent',
'inner2': 'Value 2 from parent',
}
}
compound = factory(
'compound',
name='COMPOUND',
value=value)
child_compound = compound['CHILD_COMPOUND'] = factory('compound')
child_compound['inner'] = factory('text')
child_compound['inner2'] = factory(
'text',
props={
'required': True
})
self.check_output("""
<div>
<input class="text" id="input-COMPOUND-CHILD_COMPOUND-inner"
name="COMPOUND.CHILD_COMPOUND.inner" type="text"
value="Value 1 from parent"/>
<input class="required text" id="input-COMPOUND-CHILD_COMPOUND-inner2"
name="COMPOUND.CHILD_COMPOUND.inner2" required="required"
type="text" value="Value 2 from parent"/>
</div>
""", fxml(tag('div', compound()))) # noqa
self.assertEqual(compound.treerepr().split('\n'), [
"<class 'yafowil.base.Widget'>: COMPOUND",
" <class 'yafowil.base.Widget'>: CHILD_COMPOUND",
" <class 'yafowil.base.Widget'>: inner",
" <class 'yafowil.base.Widget'>: inner2",
""
])
data = compound.extract({
'COMPOUND.CHILD_COMPOUND.inner': 'newvalue',
'COMPOUND.CHILD_COMPOUND.inner2': 'newvalue2',
})
self.assertEqual(data.name, 'COMPOUND')
self.assertEqual(data.value, {
'CHILD_COMPOUND': {
'inner2': 'Value 2 from parent',
'inner': 'Value 1 from parent'
}
})
expected = odict()
expected['CHILD_COMPOUND'] = odict()
expected['CHILD_COMPOUND']['inner'] = 'newvalue'
expected['CHILD_COMPOUND']['inner2'] = 'newvalue2'
self.assertEqual(data.extracted, expected)
self.assertEqual(data.errors, [])
data_compound = data['CHILD_COMPOUND']
self.assertEqual(data_compound.name, 'CHILD_COMPOUND')
self.assertEqual(data_compound.value, {
'inner2': 'Value 2 from parent',
'inner': 'Value 1 from parent'
})
expected = odict()
expected['inner'] = 'newvalue'
expected['inner2'] = 'newvalue2'
self.assertEqual(data_compound.extracted, expected)
self.assertEqual(data_compound.errors, [])
data_inner = data['CHILD_COMPOUND']['inner']
self.assertEqual(data_inner.name, 'inner')
self.assertEqual(data_inner.value, 'Value 1 from parent')
self.assertEqual(data_inner.extracted, 'newvalue')
self.assertEqual(data_inner.errors, [])
data_inner2 = data['CHILD_COMPOUND']['inner2']
self.assertEqual(data_inner2.name, 'inner2')
self.assertEqual(data_inner2.value, 'Value 2 from parent')
self.assertEqual(data_inner2.extracted, 'newvalue2')
self.assertEqual(data_inner2.errors, [])
def test_compound_blueprint_structural_and_compound_children(self):
# Compound with structural compound with compound as children
value = {
'CHILD_COMPOUND': {
'inner': 'Value 1 from parent',
'inner2': 'Value 2 from parent',
}
}
compound = factory(
'compound',
name='COMPOUND',
value=value)
structural = compound['STRUCTURAL'] = factory(
'compound',
props={
'structural': True
})
child_compound = structural['CHILD_COMPOUND'] = factory('compound')
child_compound['inner'] = factory('text')
child_compound['inner2'] = factory(
'text',
props={
'required': True
})
self.check_output("""
<div>
<input class="text" id="input-COMPOUND-CHILD_COMPOUND-inner"
name="COMPOUND.CHILD_COMPOUND.inner" type="text"
value="Value 1 from parent"/>
<input class="required text" id="input-COMPOUND-CHILD_COMPOUND-inner2"
name="COMPOUND.CHILD_COMPOUND.inner2" required="required"
type="text" value="Value 2 from parent"/>
</div>
""", fxml(tag('div', compound()))) # noqa
self.assertEqual(compound.treerepr().split('\n'), [
"<class 'yafowil.base.Widget'>: COMPOUND",
" <class 'yafowil.base.Widget'>: STRUCTURAL",
" <class 'yafowil.base.Widget'>: CHILD_COMPOUND",
" <class 'yafowil.base.Widget'>: inner",
" <class 'yafowil.base.Widget'>: inner2",
""
])
self.assertEqual(
compound['STRUCTURAL'].attrs.storage,
{'structural': True}
)
self.assertEqual(
compound['STRUCTURAL']['CHILD_COMPOUND'].attrs.storage,
{}
)
data = compound.extract({
'COMPOUND.CHILD_COMPOUND.inner': 'newvalue',
'COMPOUND.CHILD_COMPOUND.inner2': 'newvalue2',
})
self.assertEqual(data.name, 'COMPOUND')
self.assertEqual(data.value, {
'CHILD_COMPOUND': {
'inner2': 'Value 2 from parent',
'inner': 'Value 1 from parent'
}
})
expected = odict()
expected['CHILD_COMPOUND'] = odict()
expected['CHILD_COMPOUND']['inner'] = 'newvalue'
expected['CHILD_COMPOUND']['inner2'] = 'newvalue2'
self.assertEqual(data.extracted, expected)
self.assertEqual(data.errors, [])
data_compound = data['CHILD_COMPOUND']
self.assertEqual(data_compound.name, 'CHILD_COMPOUND')
self.assertEqual(data_compound.value, {
'inner2': 'Value 2 from parent',
'inner': 'Value 1 from parent'
})
expected = odict()
expected['inner'] = 'newvalue'
expected['inner2'] = 'newvalue2'
self.assertEqual(data_compound.extracted, expected)
self.assertEqual(data_compound.errors, [])
data_inner = data['CHILD_COMPOUND']['inner']
self.assertEqual(data_inner.name, 'inner')
self.assertEqual(data_inner.value, 'Value 1 from parent')
self.assertEqual(data_inner.extracted, 'newvalue')
self.assertEqual(data_inner.errors, [])
data_inner2 = data['CHILD_COMPOUND']['inner2']
self.assertEqual(data_inner2.name, 'inner2')
self.assertEqual(data_inner2.value, 'Value 2 from parent')
self.assertEqual(data_inner2.extracted, 'newvalue2')
self.assertEqual(data_inner2.errors, [])
def test_compound_blueprint_address_compound_value_parent(self):
# Address different compounds with value on parent
value = {
'c1': {
'f1': 'Foo',
},
'c2': {
'f2': 'Bar',
'f3': 'Baz',
},
}
compound = factory(
'compound',
'comp',
value=value)
compound['c1'] = factory('compound')
compound['c1']['f1'] = factory('text')
compound['c2'] = factory('compound')
compound['c2']['f2'] = factory('text')
compound['c2']['f3'] = factory('text')
compound['c3'] = factory('compound')
compound['c3']['f4'] = factory('text')
self.check_output("""
<div>
<input class="text" id="input-comp-c1-f1" name="comp.c1.f1"
type="text" value="Foo"/>
<input class="text" id="input-comp-c2-f2" name="comp.c2.f2"
type="text" value="Bar"/>
<input class="text" id="input-comp-c2-f3" name="comp.c2.f3"
type="text" value="Baz"/>
<input class="text" id="input-comp-c3-f4" name="comp.c3.f4"
type="text" value=""/>
</div>
""", fxml(tag('div', compound())))
self.assertEqual(compound.treerepr().split('\n'), [
"<class 'yafowil.base.Widget'>: comp",
" <class 'yafowil.base.Widget'>: c1",
" <class 'yafowil.base.Widget'>: f1",
" <class 'yafowil.base.Widget'>: c2",
" <class 'yafowil.base.Widget'>: f2",
" <class 'yafowil.base.Widget'>: f3",
" <class 'yafowil.base.Widget'>: c3",
" <class 'yafowil.base.Widget'>: f4",
""
])
data = compound.extract({
'comp.c1.f1': 'Foo 1',
'comp.c2.f2': 'Bar 2',
'comp.c2.f3': 'Baz 1',
})
self.assertEqual(data.name, 'comp')
self.assertEqual(data.value, {
'c2': {
'f2': 'Bar',
'f3': 'Baz'
},
'c1': {
'f1': 'Foo'
}
})
expected = odict()
expected['c1'] = odict()
expected['c1']['f1'] = 'Foo 1'
expected['c2'] = odict()
expected['c2']['f2'] = 'Bar 2'
expected['c2']['f3'] = 'Baz 1'
expected['c3'] = odict()
expected['c3']['f4'] = UNSET
self.assertEqual(data.extracted, expected)
self.assertEqual(data.errors, [])
# c1
data_c1 = data['c1']
self.assertEqual(data_c1.name, 'c1')
self.assertEqual(data_c1.value, {'f1': 'Foo'})
expected = odict()
expected['f1'] = 'Foo 1'
self.assertEqual(data_c1.extracted, expected)
self.assertEqual(data_c1.errors, [])
data_f1 = data['c1']['f1']
self.assertEqual(data_f1.name, 'f1')
self.assertEqual(data_f1.value, 'Foo')
self.assertEqual(data_f1.extracted, 'Foo 1')
self.assertEqual(data_f1.errors, [])
# c2
data_c2 = data['c2']
self.assertEqual(data_c2.name, 'c2')
self.assertEqual(data_c2.value, {
'f2': 'Bar',
'f3': 'Baz'
})
expected = odict()
expected['f2'] = 'Bar 2'
expected['f3'] = 'Baz 1'
self.assertEqual(data_c2.extracted, expected)
self.assertEqual(data_c2.errors, [])
data_f2 = data['c2']['f2']
self.assertEqual(data_f2.name, 'f2')
self.assertEqual(data_f2.value, 'Bar')
self.assertEqual(data_f2.extracted, 'Bar 2')
self.assertEqual(data_f2.errors, [])
data_f3 = data['c2']['f3']
self.assertEqual(data_f3.name, 'f3')
self.assertEqual(data_f3.value, 'Baz')
self.assertEqual(data_f3.extracted, 'Baz 1')
self.assertEqual(data_f3.errors, [])
# c3
data_c3 = data['c3']
self.assertEqual(data_c3.name, 'c3')
self.assertEqual(data_c3.value, UNSET)
expected = odict()
expected['f4'] = UNSET
self.assertEqual(data_c3.extracted, expected)
self.assertEqual(data_c3.errors, [])
data_f4 = data['c3']['f4']
self.assertEqual(data_f4.name, 'f4')
self.assertEqual(data_f4.value, UNSET)
self.assertEqual(data_f4.extracted, UNSET)
self.assertEqual(data_f4.errors, [])
def test_compound_blueprint_value_callbacks(self):
# Check compound with value callbacks
def val(widget, data):
return 'val F1'
value = {
'f1': val,
}
compound = factory(
'compound',
'comp',
value=value)
compound['f1'] = factory('text')
self.assertEqual(compound(), (
'<input class="text" id="input-comp-f1" name="comp.f1" '
'type="text" value="val F1" />'
))
data = compound.extract({'comp.f1': 'New val 1'})
self.assertEqual(data.name, 'comp')
self.assertEqual(data.value, {'f1': val})
expected = odict()
expected['f1'] = 'New val 1'
self.assertEqual(data.extracted, expected)
self.assertEqual(data.errors, [])
def value(widget, data):
return {
'f1': 'F1 Val'
}
compound = factory(
'compound',
'comp',
value=value)
compound['f1'] = factory('text')
self.assertEqual(compound(), (
'<input class="text" id="input-comp-f1" name="comp.f1" '
'type="text" value="F1 Val" />'
))
data = compound.extract({'comp.f1': 'New val 1'})
self.assertEqual(data.name, 'comp')
self.assertEqual(data.value, {'f1': 'F1 Val'})
expected = odict()
expected['f1'] = 'New val 1'
self.assertEqual(data.extracted, expected)
self.assertEqual(data.errors, [])
def test_div_blueprint_compound(self):
# Div blueprint as compound
div = factory(
'div',
name='DIV_COMPOUND')
div['inner'] = factory(
'text',
value='value1')
div['inner2'] = factory(
'text',
value='value2',
props={
'required': True
})
self.check_output("""
<div>
<input class="text" id="input-DIV_COMPOUND-inner"
name="DIV_COMPOUND.inner" type="text" value="value1"/>
<input class="required text" id="input-DIV_COMPOUND-inner2"
name="DIV_COMPOUND.inner2" required="required" type="text"
value="value2"/>
</div>
""", fxml(div()))
data = div.extract({
'DIV_COMPOUND.inner': '1',
'DIV_COMPOUND.inner2': '2',
})
self.assertEqual(data.name, 'DIV_COMPOUND')
self.assertEqual(data.value, UNSET)
expected = odict()
expected['inner'] = '1'
expected['inner2'] = '2'
self.assertEqual(data.extracted, expected)
self.assertEqual(data.errors, [])
data_inner = data['inner']
self.assertEqual(data_inner.name, 'inner')
self.assertEqual(data_inner.value, 'value1')
self.assertEqual(data_inner.extracted, '1')
self.assertEqual(data_inner.errors, [])
data_inner2 = data['inner2']
self.assertEqual(data_inner2.name, 'inner2')
self.assertEqual(data_inner2.value, 'value2')
self.assertEqual(data_inner2.extracted, '2')
self.assertEqual(data_inner2.errors, [])
def test_div_blueprint_compound_leaf(self):
# Div blueprint as compound, but with ``leaf`` property set. Causes
# ``hybrid_renderer`` and ``hybrid_extractor`` to skip auto delegating
# to ``compound_renderer`` and ``compound_extractor``
div = factory(
'div',
name='DIV_COMPOUND_LEAF',
props={
'leaf': True
})
div['inner'] = factory(
'text',
value='value1')
div['inner2'] = factory(
'text',
value='value2',
props={
'required': True
})
self.assertEqual(div(), '<div></div>')
data = div.extract({
'DIV_COMPOUND_LEAF.inner': '1',
'DIV_COMPOUND_LEAF.inner2': '2',
})
self.assertEqual(data.name, 'DIV_COMPOUND_LEAF')
self.assertEqual(data.value, UNSET)
self.assertEqual(data.extracted, UNSET)
self.assertEqual(data.errors, [])
self.assertEqual(data.keys(), [])
def test_div_blueprint_as_leaf(self):
# Div blueprint as leaf
input = factory(
'div:text',
name='DIV',
value='1')
self.check_output("""
<div>
<input class="text" id="input-DIV" name="DIV"
type="text" value="1"/>
</div>
""", fxml(input()))
data = input.extract({
'DIV': '2',
})
self.assertEqual(data.name, 'DIV')
self.assertEqual(data.value, '1')
self.assertEqual(data.extracted, '2')
self.assertEqual(data.errors, [])
# Empty div
input = factory(
'div',
name='DIV')
self.assertEqual(input(), '<div></div>')
# Div with data attributes
input = factory(
'div',
name='DIV',
props={
'data': {
'foo': 'bar'
}
})
self.assertEqual(input(), "<div data-foo='bar'></div>")
# Display mode
div = factory(
'div',
name='DIV',
props={
'class': 'foo'
},
mode='display')
self.assertEqual(div(), '<div class="foo"></div>')
input = factory(
'div:text',
name='DIV',
value='1',
mode='display')
self.check_output("""
<div>
<div class="display-text" id="display-DIV">1</div>
</div>
""", fxml(input()))
def test_fieldset_blueprint(self):
compound = factory(
'fieldset',
'COMPOUND',
props={
'legend': 'Some Test'
})
compound['inner'] = factory('text', 'inner', 'value')
compound['inner2'] = factory('text', 'inner2', 'value2')
self.check_output("""
<fieldset id="fieldset-COMPOUND">
<legend>Some Test</legend>
<input class="text" id="input-COMPOUND-inner" name="COMPOUND.inner"
type="text" value="value"/>
<input class="text" id="input-COMPOUND-inner2" name="COMPOUND.inner2"
type="text" value="value2"/>
</fieldset>
""", fxml(compound()))
# Structural fieldset renders without id attribute
compound = factory(
'fieldset',
'COMPOUND',
props={
'structural': True
})
self.assertEqual(compound(), '<fieldset></fieldset>')
# Fieldset display renderers are the same as fieldset edit renderers
compound = factory(
'fieldset',
'COMPOUND',
props={
'legend': 'Some Test'
},
mode='display')
self.check_output("""
<fieldset id="fieldset-COMPOUND">
<legend>Some Test</legend>
</fieldset>
""", fxml(compound()))
def test_form_blueprint(self):
# Test Form
form = factory(
'form',
name='FORM',
props={
'action': 'http://fubar.com'
})
self.assertEqual(form(), (
'<form action="http://fubar.com" enctype="multipart/form-data" '
'id="form-FORM" method="post" novalidate="novalidate"></form>'
))
# Form action as callable
def action(widget, data):
return 'http://fubar.com'
form = factory(
'form',
name='FORM',
props={
'action': action
})
self.assertEqual(form(), (
'<form action="http://fubar.com" enctype="multipart/form-data" '
'id="form-FORM" method="post" novalidate="novalidate"></form>'
))
# Form display renderer
form = factory(
'form',
name='FORM',
props={
'action': 'http://fubar.com'
},
mode='display')
self.assertEqual(form(), '<div></div>')
# Create a form with some children
form = factory(
'form',
name='myform',
props={
'action': 'http://www.domain.tld/someform'
})
form['someinput'] = factory(
'label:text',
props={
'label': 'Your Text'
})
self.form_data = None
def formaction(widget, data):
self.form_data = data
def formnext(request):
return 'http://www.domain.tld/result'
form['submit'] = factory(
'submit',
props={
'handler': formaction,
'next': formnext,
'action': True
})
# Render an empty form
self.check_output("""
<form action="http://www.domain.tld/someform"
enctype="multipart/form-data" id="form-myform" method="post"
novalidate="novalidate">
<label for="input-myform-someinput">Your Text</label>
<input class="text" id="input-myform-someinput"
name="myform.someinput" type="text" value=""/>
<input id="input-myform-submit" name="action.myform.submit"
type="submit" value="submit"/>
</form>
""", fxml(form()))
# Get form data out of request (request is expected dict-like)
request = {
'myform.someinput': 'Hello World',
'action.myform.submit': 'submit'
}
Controller(form, request)
form_data = self.form_data
self.assertEqual(form_data.name, 'myform')
self.assertEqual(form_data.value, UNSET)
expected = odict()
expected['someinput'] = 'Hello World'
expected['submit'] = UNSET
self.assertEqual(form_data.extracted, expected)
self.assertEqual(form_data.errors, [])
input_data = form_data['someinput']
self.assertEqual(input_data.name, 'someinput')
self.assertEqual(input_data.value, UNSET)
self.assertEqual(input_data.extracted, 'Hello World')
self.assertEqual(input_data.errors, [])
# submit blueprint gets a runtime data as well, but it's never needed
# or used so far
submit_data = form_data['submit']
self.assertEqual(submit_data.name, 'submit')
self.assertEqual(submit_data.value, UNSET)
self.assertEqual(submit_data.extracted, UNSET)
self.assertEqual(submit_data.errors, [])
del self.form_data
# Form action property can be callable
def action(widget, data):
return 'actionfromcall'
form = factory(
'form',
name='form',
props={
'action': action,
})
self.assertEqual(form(), (
'<form action="actionfromcall" enctype="multipart/form-data" '
'id="form-form" method="post" novalidate="novalidate"></form>'
))
# Create label for field in other compound
form = factory(
'form',
name='form',
props={
'action': 'action'
})
form['label'] = factory(
'label',
props={
'label': 'Foo',
'for': 'field'
})
form['field'] = factory('text')
self.check_output("""
<form action="action" enctype="multipart/form-data" id="form-form"
method="post" novalidate="novalidate">
<label for="input-form-field">Foo</label>
<input class="text" id="input-form-field" name="form.field"
type="text" value=""/>
</form>
""", fxml(form()))
```
#### File: yafowil/tests/test_tsf.py
```python
from node.tests import NodeTestCase
from yafowil.tsf import DummyTranslationStringFactory
class TestTsf(NodeTestCase):
def test_tsf(self):
# Test dummy translation string factory
_ = DummyTranslationStringFactory('yafowil')
self.assertEqual(_.domain, 'yafowil')
self.assertEqual(_('foo'), 'foo')
self.assertEqual(_('bar', default=u'Bar'), 'Bar')
self.assertEqual(
_('baz', default=u'Baz ${bam}', mapping={'bam': 42}),
'Baz 42'
)
```
#### File: src/yafowil/tsf.py
```python
import os
class DummyTranslationStringFactory(object):
"""Dummy Translations string factory.
"""
def __init__(self, domain):
self.domain = domain
def __call__(self, message, default='', mapping={}):
"""Directly create message and return it as is.
"""
message = default or message
if mapping:
for k, v in mapping.items():
message = message.replace('${' + k + '}', str(v))
return message
# try to import framework corresponding translation string factories if no
# test run
if not os.environ.get('TESTRUN_MARKER'):
# pyramid related
try:
from pyramid.i18n import TranslationStringFactory as TSF
except ImportError:
# zope related
try:
from zope.i18nmessageid import MessageFactory as TSF
# fallback ti dummy
except ImportError:
TSF = DummyTranslationStringFactory
# test run, use dummy translation string factory
else:
TSF = DummyTranslationStringFactory #pragma NO COVER
_ = TSF('yafowil')
``` |
{
"source": "2skilled4you/pianokingnftdashboards",
"score": 3
} |
#### File: 2skilled4you/pianokingnftdashboards/app.py
```python
import pandas as pd
from etherscan import Etherscan
import streamlit as st
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
def fmt(x):
print(x)
return '{:.4f}%\n({:.0f})'.format(x, total*x/100)
st.title('Piano King NFT Dashboard')
df = pd.read_csv('resources/output/pianoking_data.csv', sep=',')
values = pd.Series(df['FirstTimeOwner'])
v_counts = values.value_counts()
total = len(values)
fig = plt.figure()
plt.pie(v_counts, labels=v_counts.index, autopct=fmt, shadow=False)
plt.show()
st.subheader('Piano King Primo Wallet Holders')
st.pyplot(fig)
st.subheader('Pa')
``` |
{
"source": "2snchan/roomalloc",
"score": 3
} |
#### File: 2snchan/roomalloc/Room Assignment.py
```python
import random, csv
def myCmp(x, y):
a = x[0]
b = y[0]
return (a>b)-(a<b)
def readCsv(filename):
matrix = []
f = open(filename, 'r')
csvReader = csv.reader(f)
for row in csvReader:
matrix.append(row)
f.close()
n = len(matrix)
if filename == 'team.csv':
for i in range(n):
matrix[i][0] = int(matrix[i][0])
matrix[i][3] = int(matrix[i][3])
matrix.sort()
elif filename == 'data.csv':
for i in range(n):
matrix[i][0] = int(matrix[i][0])
matrix[i][1] = int(matrix[i][1])
matrix.sort()
return matrix
def convert(roomsM, roomsF):
rooms = []
for room in roomsM:
rooms.append('A' + str(room))
for room in roomsF:
rooms.append('B' + str(room))
rooms.sort()
return rooms
def arrangeTickets(rooms, tno, tickets):
r = len(rooms)
t = len(tno)
ticketsList = [[0]*t for i in range(r)]
for ticket in tickets:
try:
j = rooms.index(ticket[2])
k = tno.index(ticket[1])
ticketsList[j][k] += 1
except:
pass
return ticketsList
def countTickets(ticketsList):
ticketsNum = []
for ticketList in ticketsList:
ticketsNum.append(sum(ticketList))
return ticketsNum
def indexMax(nums):
index = [0]
maximum = nums[0]
for i in range(len(nums)):
if nums[i] > maximum:
maximum = nums[i]
index = [i]
elif nums[i] == maximum:
index.append(i)
n = random.randint(0, len(index)-1)
return index[n]
def eraseRow(ticketsList, row):
for j in range(len(ticketsList[row])):
ticketsList[row][j] = 0
def eraseCol(ticketsList, col):
for i in range(len(ticketsList)):
ticketsList[i][col] = 0
def findLucky(ticketList):
ball = []
for i in range(len(ticketList)):
ball.extend([i]*ticketList[i])
lucky = random.randint(0, len(ball)-1)
return ball[lucky]
def drawCompleted(ticketsList):
for i in range(len(ticketsList)):
for j in range(len(ticketsList[0])):
if ticketsList[i][j] > 0:
return False
return True
def drawTickets(ticketsList, rooms, randomAssign):
r = len(ticketsList)
t = len(ticketsList[0])
results = [None] * t
remainedRooms = list(range(r))
while not drawCompleted(ticketsList):
ticketsNum = countTickets(ticketsList)
room = indexMax(ticketsNum)
pair = findLucky(ticketsList[room])
results[pair] = rooms[room]
eraseRow(ticketsList, room)
eraseCol(ticketsList, pair)
temp = remainedRooms.index(room)
del remainedRooms[temp]
if randomAssign:
for i in range(len(results)):
if results[i] == None:
temp = random.randint(0, len(remainedRooms)-1)
room = remainedRooms[temp]
results[i] = rooms[room]
del remainedRooms[temp]
return results
def exportResults(results, teams):
t = len(teams)
matrix = []
for i in range(t):
matrix.append(teams[i] + [results[i]])
f = open('result.csv', 'w')
cw = csv.writer(f, delimiter=',')
for i in range(len(matrix)):
cw.writerow(matrix[i])
f.close()
def exportSQL(results, teams):
f = open('SQL.txt', 'w')
for i in range(len(teams)):
r = results[i]
if r != None:
t1 = teams[i][1]
sn1 = t1[3]+t1[4]+"-"+t1[5]+t1[6]+t1[7]
if t1:
f.write("UPDATE `gaonnuri`.`xe_dorm` SET `rno` = '%s' WHERE `xe_dorm`.`sid` = '%s';" % (r, sn1))
f.write("\n")
t2 = teams[i][2]
sn2 = t2[3]+t2[4]+"-"+t2[5]+t2[6]+t2[7]
if t2:
f.write("UPDATE `gaonnuri`.`xe_dorm` SET `rno` = '%s' WHERE `xe_dorm`.`sid` = '%s';" % (r, sn2))
f.write("\n")
f.close()
def main():
randomAssign = False
roomsM = [103,108,109,110,113,114,118,241,242,309,310,319,323,329,330,342]
roomsF = [206,208,211,212,216,308,312]
rooms = convert(roomsM, roomsF)
teams = readCsv('team.csv')
tno = [teams[i][0] for i in range(len(teams))]
tickets = readCsv('data.csv')
ticketsList = arrangeTickets(rooms, tno, tickets)
results = drawTickets(ticketsList, rooms, randomAssign)
exportResults(results, teams)
exportSQL(results, teams)
print("Room Assignment Completed!!!")
if __name__ == '__main__':
main()
``` |
{
"source": "2spmohanty/NSX-V",
"score": 2
} |
#### File: NSX-V/NSX/NsxOperation.py
```python
__author__ = 'smrutim'
from subprocess import Popen,PIPE,STDOUT,call
import requests
import time
from NsxConfiguration.Vcenter import Datacenter, VDS, Cluster, VCOps
import xmltodict
import json
import paramiko
from pyVmomi import vim
"""
For Any Code changes.
Please update the READ.md file and here also for quick reference.
"""
def get_certificate_value(logger,vcUrl,root_user,root_pass):
command = "openssl x509 -in /etc/vmware-vpx/ssl/rui.crt -fingerprint -sha256 -noout"
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(vcUrl, username=root_user, password=<PASSWORD>)
cert_cmd = "openssl x509 -in /etc/vmware-vpx/ssl/rui.crt -fingerprint -sha256 -noout"
stdin, stdout, stderr = ssh.exec_command(cert_cmd)
while not stdout.channel.exit_status_ready():
time.sleep(2)
certValue = stdout.readlines()[0].strip().split('=')[-1]
logger.info("THREAD - get_certificate_value - The Certificate for VC %s "%certValue)
return certValue
except Exception, e:
logger.error("THREAD - get_certificate_value - Error while Certificate for VC %s "%str(e))
finally:
ssh.close()
def Register_Nsx_To_VC(logger,nsxmanager,vcUrl,username,password,root_user,root_pass):
uri = "https://" + nsxmanager + "/api/2.0/services/vcconfig"
ceryificateValue = get_certificate_value(logger,vcUrl,root_user,root_pass)
ceryificateValue = ceryificateValue.rstrip()
request_body = '''
<vcInfo>
<ipAddress>%(vcUrl)s</ipAddress>
<userName><EMAIL></userName>
<password><PASSWORD></password>
<certificateThumbprint>%(ceryificateValue)s</certificateThumbprint>
<assignRoleToUser>true</assignRoleToUser>
<pluginDownloadServer></pluginDownloadServer>
<pluginDownloadPort></pluginDownloadPort>
</vcInfo>
'''
request_body = request_body % {'vcUrl': vcUrl, 'ceryificateValue': ceryificateValue}
logger.info("THREAD - Register_Nsx_To_VC - The xml body is %s"%request_body)
# Request Body Format
body_format = {'Content-Type': 'application/xml'}
response = requests.put(uri, data=request_body, auth=(username, password), verify=False, headers=body_format)
status = response.status_code
logger.info("THREAD - Register_Nsx_To_VC - Status Code for VC Registration %s"%str(status))
logger.info("THREAD - Register_Nsx_To_VC - Status Response for VC Registration %s "%response.text)
logger.info("THREAD - Register_Nsx_To_VC - Status Content for VC Registration %s"%response.content)
logger.info("THREAD - Register_Nsx_To_VC - Waiting for 60 seconds post VC registration with NSX.")
time.sleep(60)
return status
def Install_VIBs(logger,nsxmanager,clusterObj,username,password):
logger.info("Installing VIBS")
installNwVlzCompURI = "https://" + nsxmanager + "/api/2.0/nwfabric/configure"
request_body = '''
<nwFabricFeatureConfig>
<resourceConfig>
<resourceId>%(CLUSTERMOID)s</resourceId>
</resourceConfig>
</nwFabricFeatureConfig>
'''
logger.info("Getting all clusters managed object reference")
for entity in clusterObj:
clustrName = entity.name
moId = str(entity).strip('\'').split(':')[1]
logger.info("Installing Network Virtualization Components in cluster " + clustrName)
nwComponentRequest = request_body % {'CLUSTERMOID': moId}
# Request Body Format
body_format = {'Content-Type': 'application/xml'}
try:
# API Call
logger.info("THREAD - vibInstall - Initiating vib install on " + clustrName)
response = requests.post(installNwVlzCompURI, data=nwComponentRequest, auth=(username, password),
verify=False, headers=body_format)
logger.info("THREAD - vibInstall - The Status of vib install on %s is %s"% (clustrName,str(response.status_code)))
logger.info("THREAD - vibInstall - The Detail of vib install on %s is %s" % (clustrName, str(response.text)))
except Exception, e:
logger.error("THREAD - vibInstall - The Error during vib install on %s is %s"% (clustrName,str(e)))
return True
######################## Haritej Code ############################################################
def Put_All_Hosts_In_Maintenance(logger,dcMor,clusterNames):
logger.info("THREAD - Put_All_Hosts_In_Maintenance - Getting all Hosts in clusters.")
host_list = Cluster.GetHostsInCluster(dcMor, clusterName=clusterNames)
logger.info("THREAD - Put_All_Hosts_In_Maintenance - Putting all Host in Maintenance Mode.")
try:
for j in host_list:
j.EnterMaintenanceMode_Task(timeout=60, evacuatePoweredOffVms=False)
return True
except Exception, e:
logger.error("THREAD - Put_All_Hosts_In_Maintenance - Putting all Host in Maintenance Mode failed due to %s."%str(e))
return False
def Exit_All_Hosts_In_Maintenance(logger,dcMor,clusterNames):
logger.info("THREAD - Put_All_Hosts_In_Maintenance - Getting all Hosts in clusters.")
host_list = Cluster.GetHostsInCluster(dcMor, clusterName=clusterNames)
logger.info("THREAD - Put_All_Hosts_In_Maintenance - Putting all Host in Maintenance Mode.")
try:
for j in host_list:
j.ExitMaintenanceMode_Task(timeout=60)
return True
except Exception, e:
logger.error("THREAD - Put_All_Hosts_In_Maintenance - Putting all Host in Maintenance Mode failed due to %s."%str(e))
return False
def wait_for_nwfabric_green_new(logger,clusterObject,nsxmanager,username,password):
flag_vxlanstatus = False
vib_status = None
host_list = []
max_retry_count = 0
clustrName = clusterObject.name
cluster_moid = str(clusterObject).strip('\'').split(':')[1]
installNwVlzCompURI = "https://" + nsxmanager + "/api/2.0/nwfabric/configure"
body_format = {'Content-Type': 'application/xml'}
manintenance = None
#15 Attempts (2 Resolves) to check if the Cluster VIB install is Green.(Total would be 15*20 = 300+90 seconds 6 mins 30 secs in worst case)
while (max_retry_count < 16) and (flag_vxlanstatus == False):
CheckNwVlzCompURI = "https://" + nsxmanager + "/api/2.0/nwfabric/" + "status" + "/child/" + cluster_moid
logger.info("THREAD - wait_for_nwfabric_green_new - The URL is " + CheckNwVlzCompURI)
response = requests.get(CheckNwVlzCompURI, auth=(username, password), verify=False)
jsonoutput_status = json.dumps(xmltodict.parse(response.text))
jsonoutput_status = json.loads(jsonoutput_status)
if jsonoutput_status["resourceStatuses"] is None:
logger.info("THREAD - wait_for_nwfabric_green_new - No Host in cluster %s. " % (clustrName))
#In case there is no host in the
return cluster_moid
# The cluster Status
vib_install_array = jsonoutput_status["resourceStatuses"]["resourceStatus"]["nwFabricFeatureStatus"]
for feature in vib_install_array:
if feature["featureId"] == "com.vmware.vshield.vsm.nwfabric.hostPrep":
vib_status = feature["status"]
logger.info("THREAD - wait_for_nwfabric_green_new - VIB Install status is %s " % vib_status)
if feature["status"] == 'GREEN':
flag_vxlanstatus = True
break
elif feature["status"] == 'RED' and (max_retry_count == 7 or max_retry_count == 14):
#Trying to Resolve the Cluster by initiating reinstall after putting Host in maintenance mode
#This attempt would be made twice. If the Hosts doesnot get resolved, Then it would be dropped.
logger.info("THREAD - Putting Hosts in cluster %s Maintenance Mode to initiate Resolve." % clustrName)
try:
manintenance = True
host_list = [h for cl in [clusterObject] for h in cl.host]
for j in host_list:
print("THREAD - Putting Host %s in cluster %s Maintenance Mode to initiate Resolve." % (j.name,clustrName))
j.EnterMaintenanceMode_Task(timeout=60, evacuatePoweredOffVms=False)
except Exception,e:
pass
request_body = '''
<nwFabricFeatureConfig>
<resourceConfig>
<resourceId>%(CLUSTERMOID)s</resourceId>
</resourceConfig>
</nwFabricFeatureConfig>
'''
logger.info("THREAD - wait_for_nwfabric_green_new - Resolving Cluster %s ." % clustrName)
nwComponentRequest = request_body % {'CLUSTERMOID': cluster_moid}
response = requests.post(installNwVlzCompURI, data=nwComponentRequest, auth=(username, password),
verify=False, headers=body_format)
status = str(response.status_code)
if status == "200":
logger.info("THREAD - wait_for_nwfabric_green_new - Reinstall of Vibs in "
"progress for %s."%clustrName)
time.sleep(45)
else:
logger.info("THREAD - wait_for_nwfabric_green_new - "
"Resolving Failed for cluster %s"%clustrName)
return
else:
time.sleep(20)
max_retry_count = max_retry_count + 1
if vib_status == "GREEN":
if manintenance: #This would trigger if the hosts have been put on maintenenance for resolving
try:
for j in host_list:
j.ExitMaintenanceMode_Task(timeout=60)
except Exception, e:
pass
return cluster_moid
else:
return
def Check_Install_Vib_Status(logger,clusterObj,nsxmanager,username,password):
success_cluster_moid = []
for entity in clusterObj:
clustrName = entity.name
logger.info("Checking VIB install status on cluster %s."%clustrName)
#moId = str(entity).strip('\'').split(':')[1]
successMoid = wait_for_nwfabric_green_new(logger,entity,nsxmanager,username,password)
if successMoid:
success_cluster_moid.append(successMoid)
return success_cluster_moid
### VXLAN Configuration
# Create IP Pool
def Create_IP_Pool(logger,nsxmanager,poolname,prefix,gateway,dnsSuffix,dns1,dns2,
startAddress,endAddress,username,password):
ipPoolURI = "https://" + nsxmanager + "/api/2.0/services/ipam/pools/scope/globalroot-0"
ipPoolrequest = '''
<ipamAddressPool>
<name>%(poolname)s</name>
<prefixLength>%(prefix)s</prefixLength>
<gateway>%(gateway)s</gateway>
<dnsSuffix>%(dnsSuffix)s</dnsSuffix>
<dnsServer1>%(dns1)s</dnsServer1>
<dnsServer2>%(dns2)s</dnsServer2>
<ipRanges>
<ipRangeDto>
<startAddress>%(startAddress)s</startAddress>
<endAddress>%(endAddress)s</endAddress>
</ipRangeDto>
</ipRanges>
</ipamAddressPool>
'''
ipPoolRequestBody = ipPoolrequest % {'poolname':poolname,'prefix':prefix,'gateway':gateway,
'dnsSuffix':dnsSuffix,'dns1':dns1,'dns2':dns2,
'startAddress':startAddress,'endAddress':endAddress}
# Request Body Format
body_format = {'Content-Type': 'application/xml'}
try:
# API Call
logger.info("THREAD - Create IP Pool - Initiating Request ")
response = requests.post(ipPoolURI, data=ipPoolRequestBody, auth=(username, password), verify=False,
headers=body_format)
logger.info("THREAD - Create IP Pool - Status code " + str(response.status_code))
ipPoolId = str(response.text)
logger.info("THREAD - Create IP Pool - Response Text or ip pool id is " + ipPoolId)
time.sleep(60)
return ipPoolId
except Exception, e:
logger.error((str(e)))
return None
# Create VXLAN
def Create_VXLAN(logger,nsxmanager,clusterMoidArray,dvsID,ipoolId,username,password):
vxlan_created_array = []
requestVXLANUri = "https://" + nsxmanager + "/api/2.0/nwfabric/configure"
vxlan_request_body = '''
<nwFabricFeatureConfig>
<featureId>com.vmware.vshield.vsm.vxlan</featureId>
<resourceConfig>
<resourceId>%(CLUSTERMOID)s</resourceId>
<configSpec class="clusterMappingSpec">
<switch><objectId>%(DVSMOID)s</objectId></switch>
<vlanId>0</vlanId>
<vmknicCount>1</vmknicCount>
<!-- ipPoolId is optional and if none is specified will assume DHCP for VTEP address assignment.-->
<ipPoolId>%(IPADDRESSPOOLID)s</ipPoolId>
</configSpec>
</resourceConfig>
<resourceConfig>
<resourceId>%(DVSMOID)s</resourceId>
<configSpec class="vdsContext">
<switch><objectId>%(DVSMOID)s</objectId></switch>
<mtu>1600</mtu>
<!-- teaming value can be one of FAILOVER_ORDER|ETHER_CHANNEL|LACP_ACTIVE|LACP_PASSIVE|LOADBALANCE_LOADBASE |LOADBALANCE_SRCID|LOADBALANCE_SRCMAC|LACP_V2 -->
<teaming>FAILOVER_ORDER</teaming>
</configSpec>
</resourceConfig>
</nwFabricFeatureConfig>
'''
body_format = {'Content-Type': 'application/xml'}
for moid in clusterMoidArray:
#clustrName = entity.name
logger.info("THREAD - Config VXLAN - Creating vxlan in cluster %s"%moid)
#clusterMoId = str(entity).strip('\'').split(':')[1]
vxlan_request_data = vxlan_request_body % {'CLUSTERMOID': moid, 'DVSMOID': dvsID,
'IPADDRESSPOOLID': ipoolId}
try:
# API Call
response = requests.post(requestVXLANUri, data=vxlan_request_data, auth=(username, password), verify=False,
headers=body_format)
logger.info("THREAD - Config VXLAN -The Status of vxlan config on %s is %s"%(moid,
str(response.status_code)))
if str(response.status_code) == "200":
vxlan_created_array.append(moid)
except Exception, e:
logger.info("THREAD - Config VXLAN -The vxlan config on %s failed due to %s" % (moid,
str(e)))
return vxlan_created_array
### VXLAN Status
def wait_for_vxlan_green_new(logger,clusterObject,nsxmanager,username,password):
flag_vxlanstatus = False
vib_status = None
host_list = []
max_retry_count = 0
clustrName = clusterObject.name
cluster_moid = str(clusterObject).strip('\'').split(':')[1]
installNwVlzCompURI = "https://" + nsxmanager + "/api/2.0/nwfabric/configure"
body_format = {'Content-Type': 'application/xml'}
manintenance = None
#15 Attempts (2 Resolves) to check if the Cluster VIB install is Green.(Total would be 15*20 = 300+90 seconds 6 mins 30 secs in worst case)
while (max_retry_count < 16) and (flag_vxlanstatus == False):
CheckNwVlzCompURI = "https://" + nsxmanager + "/api/2.0/nwfabric/" + "status" + "/child/" + cluster_moid
logger.info("THREAD - wait_for_vxlan_green_new - The URL is " + CheckNwVlzCompURI)
response = requests.get(CheckNwVlzCompURI, auth=(username, password), verify=False)
jsonoutput_status = json.dumps(xmltodict.parse(response.text))
jsonoutput_status = json.loads(jsonoutput_status)
if jsonoutput_status["resourceStatuses"] is None:
logger.info("THREAD - wait_for_vxlan_green_new - No Host in cluster %s. " % (clustrName))
#In case there is no host in the
return cluster_moid
# The cluster Status
vib_install_array = jsonoutput_status["resourceStatuses"]["resourceStatus"]["nwFabricFeatureStatus"]
for feature in vib_install_array:
if feature["featureId"] == "com.vmware.vshield.vsm.vxlan":
vib_status = feature["status"]
logger.info("THREAD - wait_for_nwfabric_green_new - VIB Install status is %s " % vib_status)
if feature["status"] == 'GREEN':
flag_vxlanstatus = True
break
else:
time.sleep(30)
max_retry_count = max_retry_count + 1
if vib_status == "GREEN":
if manintenance: #This would trigger if the hosts have been put on maintenenance for resolving
try:
for j in host_list:
j.ExitMaintenanceMode_Task(timeout=60)
except Exception, e:
pass
logger.info("THREAD - wait_for_vxlan_green_new - The VXLAN Status is green for " + cluster_moid)
return cluster_moid
else:
return
def Check_VXLAN_Vib_Status(logger,clusterObj,nsxmanager,username,password):
success_vxlan_cluster_moid = []
for moId in clusterObj:
logger.info("THREAD - Check_VXLAN_Vib_Status - Checking VXLAN config status on cluster %s." % moId)
successMoid = wait_for_vxlan_green_new(logger, moId, nsxmanager, username, password)
if successMoid:
success_vxlan_cluster_moid.append(successMoid)
return success_vxlan_cluster_moid
def Create_Transport_Zone(logger,nsxmanager,transportZone,clusterMoids,username,password):
transportZoneURI = "https://" + nsxmanager + "/api/2.0/vdn/scopes"
logger.info("THREAD - Create_Transport_Zone - Create Transport Zone initiated.")
transportZoneRequestBody = '''
<vdnScope>
<name>%(TransportZoneName)s</name>
<clusters>
%(clusterSequenceMoid)s
</clusters>
<virtualWireCount>1</virtualWireCount>
<controlPlaneMode>MULTICAST_MODE</controlPlaneMode>
</vdnScope>
'''
clusterMoid = '''<cluster><cluster><objectId>%(clusmorid)s</objectId></cluster></cluster>
'''
clusterData = ""
body_format = {'Content-Type': 'application/xml'}
for moId in clusterMoids:
clusterData = clusterData + clusterMoid % {'clusmorid': moId}
transportZoneRequestBodyData = transportZoneRequestBody % {'TransportZoneName':transportZone, 'clusterSequenceMoid': clusterData.rstrip(' \n')}
response = requests.post(transportZoneURI, data=transportZoneRequestBodyData, auth=(username, password),
verify=False, headers=body_format)
logger.info("THREAD - Create_Transport_Zone - The Status of transport zone creation config is " + str(response.status_code))
scopeID = response.text # to be used by Logical Switch Creation
logger.debug("THREAD - Create_Transport_Zone - The details of transport zone creation (scope ID) request is " + response.text)
time.sleep(30)
if scopeID and str(response.status_code)=="201":
return scopeID.strip()
else:
return
def Create_Segment(logger,nsxmanager,username,password):
segment_requestURI = "https://" + nsxmanager + "/api/2.0/vdn/config/segments"
logger.info("THREAD - Create_Segment - Creating Segment")
segment_request_Body = '''
<segmentRange>
<id>1</id>
<name>nsx-segment</name>
<desc>Segment for NSX ST Test</desc>
<begin>5000</begin>
<end>10000</end>
</segmentRange>
'''
body_format = {'Content-Type': 'application/xml'}
response = requests.post(segment_requestURI, data=segment_request_Body, auth=(username, password), verify=False,
headers=body_format)
response_status = str(response.status_code)
logger.info("THREAD - Create_Segment - The Status of Segment creation config is " + response_status)
return str(response_status)
def Configure_Multicast(logger,nsxmanager,username,password):
multicast_uri = "https://" + nsxmanager + "/api/2.0/vdn/config/multicasts"
multicast_request_Body = '''
<multicastRange>
<id>2</id>
<name>nsxv-mac</name>
<desc>Multicast Address Range for VCST NSX Tests</desc>
<begin>172.16.17.32</begin>
<end>172.16.58.3</end>
</multicastRange>
'''
body_format = {'Content-Type': 'application/xml'}
response = requests.post(multicast_uri, data=multicast_request_Body, auth=(username, password), verify=False,
headers=body_format)
multicast_status = response.status_code
logger.info("The Status of Multicast creation config is " + str(multicast_status))
logger.debug("The details of Multicast creation request is " + response.text)
time.sleep(30)
return str(multicast_status)
def Create_Logical_Switch(logger,nsxmanager,vdnscopeValue,logicalSwitch,username,password):
virtual_wire_request_URI = "https://" + nsxmanager + "/api/2.0/vdn/scopes/" + vdnscopeValue + "/virtualwires"
logger.info("THREAD - Create_Logical_Switch - Starting creation of Logical Wire")
virtual_wire_request = '''
<virtualWireCreateSpec>
<name>%(logicalSwitch)s</name>
<description>Logical switch creation</description>
<tenantId>virtual wire tenant</tenantId>
<controlPlaneMode>MULTICAST_MODE</controlPlaneMode>
</virtualWireCreateSpec>
'''
virtual_wire_request_body = virtual_wire_request % {'logicalSwitch': logicalSwitch}
body_format = {'Content-Type': 'application/xml'}
response = requests.post(virtual_wire_request_URI, data=virtual_wire_request_body,
auth=(username, password), verify=False, headers=body_format)
virtual_wire_response = str(response.status_code)
virtual_wire = response.text.strip()
logger.info("THREAD - Create_Logical_Switch - The Status of virtual wire " + logicalSwitch + " creation is " + virtual_wire_response)
logger.info("THREAD - Create_Logical_Switch - The details of virtual wire " + logicalSwitch + " creation is " + virtual_wire)
time.sleep(30)
if virtual_wire_response == "200" or virtual_wire_response == "201":
return virtual_wire
else:
return
#Deploy Edge
def Deploy_Edge(logger,nsxmanager,username,password,clusterObjs,clusterName,dcMor,dataStoreName,
portGroupMor,primaryAddressIp,subNet,edgeName):
edge_request_URI = "https://" + nsxmanager + "/api/4.0/edges/"
logger.info("THREAD - Deploy_Edge - Deployment of Edge Started %s"%edge_request_URI)
resourcePoolMor = None
for clusterObj in clusterObjs:
if clusterObj.name == clusterName:
resourcePoolMor = clusterObj.resourcePool
resourcePoolMor = str(resourcePoolMor).strip('\'').split(':')[1]
logger.info("THREAD - Deploy_Edge - The resource pool is " + str(resourcePoolMor))
datastoreMor = None
datastoresMors = dcMor.datastore
for datastore in datastoresMors:
if datastore.info.name in dataStoreName:
datastoreMor = datastore
datastoreMor = str(datastoreMor).strip('\'').split(':')[1]
logger.info("THREAD - Deploy_Edge - The datacenter is " + str(datastoreMor))
edge_request_body = '''
<edge>
<name>%(edgeName)s</name>
<datacenterMoid>%(dataCenter)s</datacenterMoid>
<description>Smruti Router</description>
<appliances>
<applianceSize>compact</applianceSize>
<appliance>
<resourcePoolId>%(resourcePoolMor)s</resourcePoolId>
<datastoreId>%(datastoreMor)s</datastoreId>
</appliance>
</appliances>
<vnics>
<vnic>
<index>0</index>
<type>internal</type>
<portgroupId>%(portGroupMor)s</portgroupId>
<addressGroups>
<addressGroup>
<primaryAddress>%(primaryAddressIp)s</primaryAddress>
<subnetMask>%(subNet)s</subnetMask>
</addressGroup>
</addressGroups>
<mtu>1500</mtu>
<isConnected>true</isConnected>
</vnic>
</vnics>
<features>
<firewall>
<defaultPolicy>
<action>accept</action>
<loggingEnabled>false</loggingEnabled>
</defaultPolicy>
</firewall>
</features>
</edge>
'''
dataCenter = str(dcMor).strip('\'').split(':')[1]
edge_request_body = edge_request_body % {'edgeName': edgeName, 'dataCenter': dataCenter,
'resourcePoolMor': resourcePoolMor,
'datastoreMor': datastoreMor,
'portGroupMor': portGroupMor, 'primaryAddressIp': primaryAddressIp,
'subNet': subNet}
#logger.info("THREAD - Deploy_Edge - The XML requestbody of Edge Installation is \n" + edge_request_body_x)
body_format = {'Content-Type': 'application/xml'}
response = requests.post(edge_request_URI, data=edge_request_body, auth=(username, password), verify=False, headers=body_format)
edge_response_status = str(response.status_code)
logger.info("THREAD - Deploy_Edge - The Status of Edge Installation is " + str(edge_response_status))
if edge_response_status == "200" or edge_response_status == "201":
header = response.headers
location = header.get('Location', None)
return location
else:
return None
def Configure_Ospf_Routing(logger,routerId,nsxmanager,username,password,location):
logger.info("THREAD - Configure_Ospf_Routing - Configuring OSPF routing.")
ospf_xml = '''
<routing>
<routingGlobalConfig>
<routerId>%(routerId)s</routerId>
</routingGlobalConfig>
<ospf>
<enabled>true</enabled>
<ospfAreas>
<ospfArea>
<areaId>100</areaId>
</ospfArea>
</ospfAreas>
<ospfInterfaces>
<ospfInterface>
<vnic>0</vnic>
<areaId>100</areaId>
<mtuIgnore>false</mtuIgnore>
</ospfInterface>
</ospfInterfaces>
<redistribution>
<enabled>true</enabled>
<rules>
<rule>
<from>
<isis>false</isis>
<ospf>true</ospf>
<bgp>true</bgp>
<static>true</static>
<connected>true</connected>
</from>
<action>permit</action>
</rule>
</rules>
</redistribution>
</ospf>
</routing>
'''
body_format = {'Content-Type': 'application/xml'}
ospf_xml = ospf_xml % {'routerId': routerId}
ospf_config_uri = "https://" + nsxmanager + location + "/routing/config"
response = requests.put(ospf_config_uri, data=ospf_xml, auth=(username, password),
verify=False, headers=body_format)
status_code = str(response.status_code)
logger.info("THREAD - Configure_Ospf_Routing - The Status of ospf config is " + status_code)
if status_code == "204":
return True
else:
return False
def Enable_DHCP(logger,nsxmanager,username, password,location,ipRange,defaultGateway,subnetMask):
dhcp_uri = "https://" + nsxmanager + location + "/dhcp/config"
dhcp_edge_req_body = '''
<dhcp>
<enabled>true</enabled>
<ipPools>
<ipPool>
<ipRange>%(ipRange)s</ipRange>
<defaultGateway>%(defaultGateway)s</defaultGateway>
<subnetMask>%(subnetMask)s</subnetMask>
</ipPool>
</ipPools>
</dhcp>
'''
dhcp_edge_req_body = dhcp_edge_req_body % {'ipRange': ipRange, 'defaultGateway': defaultGateway,
'subnetMask': subnetMask}
logger.info("THREAD - Enable_DHCP - Deploying DHCP Service on edge")
body_format = {'Content-Type': 'application/xml'}
response = requests.put(dhcp_uri, data=dhcp_edge_req_body,
auth=(username, password), verify=False, headers=body_format)
status_code = str(response.status_code)
logger.info("THREAD - Enable_DHCP - The Status of DHCP Installation is " + status_code)
if status_code == "204":
return True
else:
return False
def Add_nic_dhcp_enable_vmotion(logger,dcMor,dvSwitch,dvsMor,virtualwire,clusterNameArray):
try:
pg = VDS.getPortName(dcMor, virtualwire, vdsName=dvSwitch)
pgKey = None
for item in pg:
pgKey = item.key
vms = vim.host.VMotionSystem
dvsUuid = dvsMor.uuid
host_list = Cluster.GetHostsInClusters(dcMor, clusterNameArray, connectionState='connected')
for h in host_list:
logger.info("THREAD - Add_nic_enable_vmotion - Trying to add nic to " + h.name)
hostNetworkSys = h.configManager.networkSystem
vMotionSystem = h.configManager.vmotionSystem
vmnicSpec = vim.host.VirtualNic.Specification()
# Nic Specification
ipSpec = vim.host.IpConfig()
ipSpec.dhcp = True
vmnicSpec.ip = ipSpec
# DVPort Specification
dvpgSpec = vim.dvs.PortConnection()
dvpgSpec.switchUuid = dvsUuid
dvpgSpec.portgroupKey = pgKey
vmnicSpec.distributedVirtualPort = dvpgSpec
try:
vmkid = hostNetworkSys.AddVirtualNic("", vmnicSpec)
logger.info("THREAD - Add_nic_enable_vmotion - Enabling vmotion on vmknic " + vmkid + " for " + h.name)
vMotionSystem.SelectVnic(vmkid)
except Exception,e:
logger.error("Failure while Adding Nic and Enabling Vmotion for Host "+ h.name)
except Exception,e:
logger.error("Failure while Geeting DVS or port details for adding to Host " + str(e))
return False
return True
def Add_nic_static_enable_vmotion(logger,dcMor,dvSwitch,dvsMor,virtualwire,clusterNameArray,staticIpArray,subnetMask):
try:
pg = VDS.getPortName(dcMor, virtualwire, vdsName=dvSwitch)
pgKey = None
for item in pg:
pgKey = item.key
vms = vim.host.VMotionSystem
dvsUuid = dvsMor.uuid
host_list = Cluster.GetHostsInClusters(dcMor, clusterNameArray, connectionState='connected')
i = 0
for h in host_list:
logger.info("THREAD - Add_nic_enable_vmotion - Trying to add nic to " + h.name)
hostNetworkSys = h.configManager.networkSystem
vMotionSystem = h.configManager.vmotionSystem
vmnicSpec = vim.host.VirtualNic.Specification()
# Nic Specification
ipSpec = vim.host.IpConfig()
ipSpec.ipAddress = staticIpArray[i]
ipSpec.subnetMask = subnetMask
ipSpec.dhcp = False
vmnicSpec.ip = ipSpec
# DVPort Specification
dvpgSpec = vim.dvs.PortConnection()
dvpgSpec.switchUuid = dvsUuid
dvpgSpec.portgroupKey = pgKey
vmnicSpec.distributedVirtualPort = dvpgSpec
try:
vmkid = hostNetworkSys.AddVirtualNic("", vmnicSpec)
logger.info("THREAD - Add_nic_enable_vmotion - Enabling vmotion on vmknic " + vmkid + " for " + h.name)
vMotionSystem.SelectVnic(vmkid)
except Exception,e:
logger.error("Failure while Adding Nic and Enabling Vmotion for Host "+ h.name)
i = i+1 # Incrementing the IpArray for next IP
except Exception,e:
logger.error("Failure while Geeting DVS or port details for adding to Host " + str(e))
return False
return True
``` |
{
"source": "2spmohanty/Performance",
"score": 2
} |
#### File: framework/common/TaskAnalyzer.py
```python
__author__ = '<NAME>'
"""
Company : VMWare Inc.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
"""
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship,sessionmaker
from sqlalchemy import create_engine,Table, Column, Integer, String, MetaData
import time
from dateutil.parser import parse
from pyVmomi import vim, vmodl
import datetime
import pytz
Base = declarative_base()
class VMStats(Base):
__tablename__ = 'vmstatus'
id = Column(Integer, primary_key=True)
time = Column(Integer)
vmname = Column(String(40))
progress = Column(Integer)
legend = Column(String(100))
def Analyze(logger,si,vm_name,task):
vmsession = None
complete_time = None
epoch = datetime.datetime.utcfromtimestamp(0)
utc = pytz.UTC
begin = epoch.replace(tzinfo=utc)
try:
engine = create_engine('sqlite:///framework/db/%s.db' % vm_name)
# remove_db(engine, db_name)
Base.metadata.create_all(engine)
time.sleep(1)
Base.metadata.bind = engine
vmDbSession = sessionmaker(bind=engine)
vmsession = vmDbSession()
tzinfos = {"UTC": 0}
queue_time = None
# Record The Queue Time
#queue_time = parse(str(task.info.queueTime),tzinfos=tzinfos).strftime('%s')
q_time = task.info.queueTime
logger.info("VM %s Operation Queue Time %s" % (vm_name, q_time))
q_time = q_time.replace(tzinfo=utc)
queue_time = (q_time - begin).total_seconds() * 1000.0
vmStatQ = VMStats(time=queue_time, vmname=vm_name, legend="Queued",progress = 0)
vmsession.add(vmStatQ)
vmsession.commit()
#logger.debug("VM %s Operation Queue Time %s" % (vm_name, task.info.queueTime))
start_time = None
run_loop = True
# Record Progress
startRecorded = False
while run_loop:
#logger.debug("VM %s Operation progress Time %s" % (vm_name, si.CurrentTime()))
#progress_time = parse(str(si.CurrentTime()), tzinfos=tzinfos).strftime('%s')
#migrationProgress = task.info.progress if task.info.progress else 0
p_time = si.CurrentTime()
p_time = p_time.replace(tzinfo=utc)
progress_time = (p_time - begin).total_seconds() * 1000.0
if task.info.state == vim.TaskInfo.State.running:
if not startRecorded:
# Record The Start Time
s_time = task.info.startTime
s_time = s_time.replace(tzinfo=utc)
start_time = (s_time - begin).total_seconds() * 1000.0
#start_time = parse(str(task.info.startTime), tzinfos=tzinfos).strftime('%s')
vmStatS = VMStats(time=start_time, vmname=vm_name, legend="Started", progress=task.info.progress)
vmsession.add(vmStatS)
vmsession.commit()
# clone_stamp["StartTime"] = str(start_time)
logger.info("VM %s Operation Start Time %s" % (vm_name, start_time))
startRecorded = True
#logger.info("VM %s Migration Start Time %s" % (vm_name, start_time))
else:
vmStatS = VMStats(time=progress_time, vmname=vm_name, legend="Running", progress=task.info.progress)
vmsession.add(vmStatS)
vmsession.commit()
time.sleep(5)
if task.info.state == vim.TaskInfo.State.success:
if task.info.result is not None:
out = '%s Operation completed successfully, result: %s' % (vm_name, task.info.result)
logger.info(out)
"""
#complete_time = parse(str(task.info.completeTime),tzinfos=tzinfos).strftime('%s')
#clone_stamp["CompleteTime"] = str(complete_time)
vmStatC = VMStats(time=str(complete_time), vmname=vm_name, legend="Completed", progress=100 if not task.info.progress else task.info.progress)
vmsession.add(vmStatC)
vmsession.commit()
time.sleep(5)
run_loop = False
"""
else:
out = '%s Operation completed successfully.' % vm_name
logger.info(out)
"""
complete_time = parse(str(task.info.completeTime),tzinfos=tzinfos).strftime('%s')
#clone_stamp["CompleteTime"] = str(complete_time)
vmStatE = VMStats(time=complete_time, vmname=vm_name, legend="Completed", progress=100 if not task.info.progress else task.info.progress)
vmsession.add(vmStatE)
vmsession.commit()
time.sleep(5)
"""
c_time = task.info.completeTime
c_time = c_time.replace(tzinfo=utc)
complete_time = (c_time - begin).total_seconds() * 1000.0
vmStatE = VMStats(time=complete_time, vmname=vm_name, legend="Completed",
progress=100 if not task.info.progress else task.info.progress)
vmsession.add(vmStatE)
vmsession.commit()
time.sleep(5)
run_loop = False
elif task.info.error is not None:
out = '%s Operation did not complete successfully: %s' % (vm_name, task.info.error)
logger.error(out)
"""
complete_time = parse(str(task.info.completeTime),tzinfos=tzinfos).strftime('%s')
vmStatE = VMStats(time=complete_time, vmname=vm_name, legend=str(task.info.error), progress=0)
vmsession.add(vmStatE)
vmsession.commit()
"""
c_time = task.info.completeTime
c_time = c_time.replace(tzinfo=utc)
complete_time = (c_time - begin).total_seconds() * 1000.0
vmStatE = VMStats(time=complete_time, vmname=vm_name, legend=str(task.info.error),
progress=100 if not task.info.progress else task.info.progress)
vmsession.add(vmStatE)
vmsession.commit()
run_loop = False
else:
logger.info('%s Operation status: %s' % (vm_name, task.info.state))
except Exception, e:
logger.error("%s - Error while entring data to Task DB %s"%(vm_name,e))
finally:
vmsession.close()
logger.info("VM %s Operation Complete Time %s" % (vm_name, complete_time))
#cloning_time_stamp[vm_name] = clone_stamp
```
#### File: framework/vcenter/simpleTimer.py
```python
__author__ = 'smrutim'
## @file Timer.py
## @brief Simple timer utility to calculate the time interval for certain function
"""
For Any Code changes.
Please update the READ.md file and here also for quick reference.
"""
import time
class Timer:
def __enter__(self):
self.start = time.time()
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
def reset(self):
self.interval = 0
def start(self):
self.start = time.time()
def stop(self):
self.end = time.time()
self.interval = self.end - self.start
def getInterval(self):
return self.interval
``` |
{
"source": "2spmohanty/vcenter-automation",
"score": 2
} |
#### File: rip/modules/ah64Master.py
```python
import time
import re
import traceback
from customSSH import RunCmdOverSSH
from misc import DownloadFileFromVC,DownloadVCCore
from pyVim.connect import SmartConnect
from pyVim.connect import Disconnect
import atexit
import ssl
from vcenter import GetObjectsCountInVCInventory, CompareCounts
import datetime
def _AlternateAH64Install(host,username,password):
ah64Path = '/root/ah64'
altah64Url = "wget -O %s https://10.172.46.209/rip/static/Corefiles/ah64 --no-check-certificate"%ah64Path
(ret, stdout, stderr) = RunCmdOverSSH(altah64Url, host, username, password)
if ret == 0:
#print("THREAD - MAIN - AH64 import successful from local server.")
try:
changePermissionDir = "chmod 777 %s"%ah64Path
(ret, stdout, stderr) = RunCmdOverSSH(changePermissionDir, host, username, password)
if ret == 0:
#print("THREAD - main - Granting permission to ah64.")
(ret, stdout, stderr) = RunCmdOverSSH(changePermissionDir, host, username, password)
if ret == 0:
#print("THREAD - main - Granting permission to ah64 success.")
return ah64Path
except Exception, e:
#print("THREAD - MAIN - Permission to ah64 failed %s."%str(e))
return None
else:
raise Exception(
"THREAD -ERROR- MAIN - Failure while getting ah64 from local server. %s" % (stderr))
def _DownloadAH64ToVC(host, username, password):
'''Download ah64 to the local machine'''
ah64_url = 'http://engweb.eng.vmware.com/~tim/ah64'
ah64Path = '/root/ah64'
ah64_download_cmd = "wget -O %s %s"%(ah64Path,ah64_url)
ah64_grant_perm = 'chmod 777 %s'%ah64Path
try:
startTime = time.time()
#print("THREAD - main - Downloading ah64 from Tim server to VC %s"%host)
(ret, stdout, stderr) = RunCmdOverSSH(ah64_download_cmd, host, username, password)
#print("THREAD - main - %s"%str(stdout))
if ret == 0:
DownloadTime = time.time() - startTime
#print("THREAD - main - Time taken to download ah64 : %d sec" % DownloadTime)
#print("THREAD - main - Granting permission to ah64")
(ret, stdout, stderr) = RunCmdOverSSH(ah64_grant_perm, host, username, password)
if ret == 0:
#print("THREAD - main - Granting permission to ah64 success")
return ah64Path
else:
#print("THREAD - main - Chap downloading failed from Tim Server. Following alternate path")
_AlternateAH64Install(host, username, password)
except Exception as e:
#print(" Error while retrieving ah64 from Tim's Server %s : %s" % (ah64_url,str(e)))
return None
#os.chmod(chapPath, 0777)
return ah64Path
def RunAh64(ah64Cmd, vc, vcUser, vcPwd, vcVersion,vcBuild,corefile,getSymReqs=False):
'''Run ah64 on VC Host and print the output'''
runTime = datetime.datetime.now().strftime("%d-%m-%y:%H:%M:%S")
#print("%s VC %s vcBuild is %s" % (runTime,vc,vcBuild))
numRetry = 1
TotalTry = 3
ret = None
stdout = None
stderr = None
while (numRetry <= TotalTry):
(ret, stdout, stderr) = RunCmdOverSSH(ah64Cmd, vc, vcUser,vcPwd, timeout=3600)
# Remove below comment
#print("ah64 command ran: %s" % ah64Cmd)
s = "Symbolic information is not yet available"
#print("VC %s Returned: %s" %(vc,str(ret)))
#print("VC %s Output: %s" % (vc,str(stdout)))
#print("VC %s Error: %s" % (vc,str(stderr)))
if stdout and s in stdout:
#print("VC %s Found string in the ah64 output: '%s'. Will attempt to generate symbols if needed." % (vc,s))
if getSymReqs is True:
try:
GetVpxdSymbols(vc, vcUser, vcPwd, corefile,vcVersion,vcBuild)
except Exception as e:
#print("VC %s Exception raised while getting symbols for vpxd: %s " % (vc,str(e)))
#print("VC %s Traceback: %s" % (vc,traceback.format_exc()))
raise
if (s not in str(stdout)) or getSymReqs is False:
##print ('\n' + ('*'*25) + ' VC %s AH64 OUTPUT'%vc + ('*'*25) + '\n')
#print (" RETURN VALUE = %d" %ret)
##print ("STDOUT : \n %s" %stdout)
print (("STDERR: \n %s \n" % stderr)+ ('*' *60))
break
numRetry += 1
if numRetry > TotalTry:
#print ("Could not run Ah64 successfully on VC %s with necessary symbols after %s attempts, GIVING UP." % (vc,TotalTry))
raise Exception("Could not run Ah64 successfully with necessary symbols ")
return (ret, stdout, stderr)
def instalVpxdSymbol(vc, vcUser, vcPwd,version,build):
installState = False
debugFileName = "VMware-vpxd-debuginfo-"+str(version)+"-"+str(build)+".x86_64.rpm"
debugFilePath = "http://build-squid.eng.vmware.com/build/mts/release/bora-"+str(build)+\
"/publish/"+debugFileName
getDebugFileCmd = 'wget -O /var/core/%s %s'%(debugFileName,debugFilePath)
#print("VC %s Trying to get debug files from buildweb :%s " % (vc,getDebugFileCmd))
(ret, stdout, stderr) = RunCmdOverSSH(getDebugFileCmd, vc, vcUser, vcPwd, timeout=3600)
if ret != 0:
raise Exception("Failed to get debug file %s to VC %s due to %s" % (debugFileName,vc, str(stderr)))
else:
pass
#print("VC %s Debug file downloaded"%vc)
installRpmCmd = "rpm -i /var/core/" + debugFileName
#print("Installing Debug Symbols : " + debugFileName)
(ret, stdout, stderr) = RunCmdOverSSH(installRpmCmd, vc, vcUser, vcPwd, timeout=1800)
if ret != 0:
raise Exception("Failed to install %s in VC %s due to %s" % (debugFileName, vc, str(stderr)))
else:
installState = True
return installState
def GetDebugFileType(f, vc, vcUser, vcPwd):
'''Get file type for a given file in VC'''
checkFileCmd = 'file %s' % f
#print("Check for file type in VC (developers build). cmd: %s" % checkFileCmd)
(ret, stdout, stderr) = RunCmdOverSSH(checkFileCmd, vc, vcUser,vcPwd, timeout=1800)
#print("ret=%d, stdout=%s, stderr=%s" % (ret, stdout, stderr))
fileInfo = {'name':'%s'%f, 'exists':True, 'ftype':''}
if 'No such file or directory' in stdout:
fileInfo['exists'] = False
fileInfo['ftype'] = None
elif 'broken symbolic link' in stdout:
fileInfo['ftype'] = 'brokenSymbolicLink'
elif stdout.startswith('symbolic link to'):
fileInfo['ftype'] = 'symbolicLink'
else:
fileInfo['ftype'] = 'regular'
return fileInfo
def CheckDebugFilesInDevBuild(vc, vcUser, vcPwd,version,build,corefile):
'''Check if dev build style VC has required files to generate debug
symbols'''
vpxdInfo = GetDebugFileType('/usr/lib/vmware-vpx/vpxd',vc,vcUser,vcPwd)
vpxdDebugInfo = GetDebugFileType('/usr/lib/debug/usr/lib/vmware-vpx/vpxd.debug',
vc,vcUser,vcPwd)
"""
buildVpxdMsg = "Please make sure to build vpxd target. For vcenter: "\
"'scons PRODUCT=vcenter vpxd'. Run load-vc after building"\
" vpxd"
"""
buildVpxdMsg = "Attempting to Install vpxd Symbols."
#If vpxd exists as a file and vpxd.debug does not, suggest that symbols
#should be installed
if vpxdInfo['exists'] and not vpxdDebugInfo['exists']:
"""
#print('**File %s exists but file %s does not exist. Please make sure '\
'symbols are installed. %s **' %(vpxdInfo['name'],
vpxdDebugInfo['name'],buildVpxdMsg))
"""
installState = instalVpxdSymbol(vc, vcUser, vcPwd,version,build)
return installState
#If vpxd is a link and vpxd.debug does not exist, that probably means that
#load-vc was run but didn't complete properly.
if vpxdInfo['ftype'] == 'symbolicLink' and not vpxdDebugInfo['exists']:
"""
#print('**%s file is a link, %s does not exist.load-vc probably failed'\
'to set up links properly. %s**' % (vpxdInfo['name'],
vpxdDebugInfo['name'],buildVpxdMsg))
"""
installState = instalVpxdSymbol(vc, vcUser, vcPwd, version, build)
return installState
#If either symbolic link is broken, flag that broken link
if vpxdInfo['ftype'] == 'brokenSymbolicLink':
"""
#print('**Symbolic link broken for %s. Please check your tree**' %
vpxdInfo['name'])
"""
installState = instalVpxdSymbol(vc, vcUser, vcPwd,version,build)
return installState
if vpxdDebugInfo['ftype'] == 'brokenSymbolicLink':
"""
#print('**Symbolic link broken for %s. Please check your tree**' %
vpxdDebugInfo['name'])
"""
installState = instalVpxdSymbol(vc, vcUser, vcPwd, version, build)
return installState
#If one is a file and one is a link,the symbols are probably not consistent
#with the binaries
if vpxdInfo['ftype'] != vpxdDebugInfo['ftype']:
"""
#print('**The file type for files are not same.File type for file %s'\
' is %s. File type for file %s is %s.This suggests that the '\
'symbols are probably not consistent with the binaries**' %
(vpxdInfo['name'], vpxdInfo['ftype'], vpxdDebugInfo['name'],
vpxdDebugInfo['ftype']))
"""
installState = instalVpxdSymbol(vc, vcUser, vcPwd, version, build)
return installState
#If both the files are either symbolic link or both are regular files,can
#proceed with the checks
if ((vpxdInfo['ftype'] != '') and (vpxdInfo['ftype'] == vpxdDebugInfo['ftype'])):
#print("Both files have same file type: %s.Will try to generate debugsymbols on this VC" % vpxdInfo['ftype'])
symDefGenCmd = "echo source %s.symreqs | gdb -c %s /usr/lib/vmware-vpx/vpxd" % (corefile, corefile)
(ret, stdout, stderr) = RunCmdOverSSH(symDefGenCmd, vc, vcUser, vcPwd, timeout=600)
print "Coming Here"
if ret==0:
return True
else:
return False
#////////////////////////////////////////////////////////////////////////////////#
def GetVpxdSymbols(vc, vcUser, vcPwd, corefile,version,vcBuild):
# check if symdefs file for the pid already exists, use that file.
#print("Checking if there is an existing usable symdef file....")
pidCmd = 'pidof vpxd'
#print("Get pid of vpxd. cmd: %s" % pidCmd)
(ret, stdout, stderr) = RunCmdOverSSH(pidCmd, vc, vcUser,vcPwd, timeout=3600)
#print("ret=%d, stdout=%s, stderr=%s" % (ret, stdout, stderr))
vpxdPid = stdout
#vpxdPid = "9020" #Remove this .. Debug Only
dirListCmd = 'ls /var/core'
#print("Listing files in remote dir. cmd: %s" % dirListCmd)
(ret, stdout, stderr) = RunCmdOverSSH(dirListCmd, vc, vcUser,vcPwd, timeout=3600)
#(ret, stdout, stderr) = RunCmdOverSSH(vc,vcLocalUser,vcLocalPwd,dirListCmd)
#print("ret=%d, stdout=%s, stderr=%s" % (ret, stdout, stderr))
files = stdout.split('\n')
symDefFound = False
symDefFile = None
for f in files:
if re.match('livecore(.)*\.%s\.symdefs'%vpxdPid, f):
symDefFound = True
#print("Found an existing symdefs file:%s for pid=%s. Will try touse it." % (f,vpxdPid))
symDefFile = f
break
if symDefFile:
createSymlinkCmd = 'ln -s /var/core/%s %s.symdefs' % (symDefFile,corefile)
#print("Creating symlink to existing symdef file. cmd: %s"% createSymlinkCmd)
(ret, stdout, stderr) = RunCmdOverSSH(createSymlinkCmd, vc, vcUser,vcPwd, timeout=3600)
#print("ret=%d, stdout=%s, stderr=%s" % (ret, stdout, stderr))
return True
if vcBuild and version:
#Check if the correct debug files exists
#print("This is developer's build...")
#print("Initiating Symdef file generation..")
reqdFileExists = CheckDebugFilesInDevBuild(vc, vcUser,vcPwd,version,vcBuild,corefile)
if not reqdFileExists:
raise Exception("Files necessary on the dev build VC does not exist,"\
"Please check logs for details")
else:
raise Exception("VC Build is not specified for VC %s. Symdefs file could not be generated."
"Memory growth Analysis is quitting now.")
return True
#change the user in below function to root user
def GetVCMoCounts(vc, vcUser, vcPwd, remoteAh64Path, corefile, vcVersion, vcBuild, invtObjMap):
'''Get MoCounts running ah64 cmd '''
ah64Cmd = "echo summarize allocated | %s %s"%(remoteAh64Path,corefile)
try:
#print("Getting Allocated chunks in VC by running Debugger tool.")
(ret, stdout, stderr) = RunAh64(ah64Cmd, vc, vcUser, vcPwd,
vcVersion, vcBuild,corefile,getSymReqs=True)
if ret != 0:
raise Exception("ah64 cmd failed to get allocated summary. ret=%d,\
ah64Cmd=%s" % (ret, ah64Cmd))
except Exception as e:
#print("Exception raise when running Ah64: %s" % str(e))
#print("Traceback: %s" % traceback.format_exc())
raise Exception(str(e))
# Getting managed objects to compare with VC Inventory
#print("Getting Managed objects in %s by running parser query."%vc)
managedObjects = {}
# Build the managed objects dict
for obj in invtObjMap.values():
managedObjects[obj + 'Mo'] = 0
moStr = "|".join(str(Mo) for Mo in managedObjects)
##print "Debug: The moStr is "+ str(moStr)
# Parse allocated managed objects
output = stdout.split('\n')
p = re.compile("[A-Za-z0-9\s]* \((%s)\) has (\d+) instances" % moStr)
for line in output:
m = p.match(line)
if m:
managedObjects[m.group(1)] = int(m.group(2))
if not managedObjects:
pass
#print("AH64 RUN DID NOT RETURN VALID OUTPUT ")
#print "Debug: The managed objects are "+ str(managedObjects)
return managedObjects
def GetSI(vc, vcLocalUser, vcLocalPwd):
si = None
try:
context = ssl._create_unverified_context()
si = SmartConnect(host=vc, user=vcLocalUser, pwd=<PASSWORD>, port=443, sslContext=context)
except IOError, e:
pass
except Exception,e1:
raise
return si
def CheckMemGrowth(vc, vcUser, vcPwd, vcLocalUser, vcLocalPwd, vcVersion,vcBuild):
'''Check for memory growth in VC'''
MemGrowthMasterDict={}
try:
try:
remoteAh64Path = _DownloadAH64ToVC(vc, vcUser, vcPwd)
except Exception as e:
#print("Exception raised while getting ah64 : %s" % str(e))
#print("Traceback: %s" % traceback.format_exc())
raise
# Inventory Object map with (obj type , obj name) records
invtObjMap = {'vim.Datastore': 'Datastore', 'vim.Folder': 'Folder', \
'vim.VirtualMachine': 'Vm', 'vim.HostSystem': 'Host', 'vim.Network': 'Network'}
try:
#print("Getting connection to Vcenter")
si = GetSI(vc, vcLocalUser, vcLocalPwd)
atexit.register(Disconnect, si)
#print("Successfully got connection to VC %s"% vc)
except Exception, e:
return "Error while connecting: " + str(e)
#print("Getting Inventory Objects count in VC using VMODL Query %s" % vc)
invtCounts, moIdList = GetObjectsCountInVCInventory(si, invtObjMap)
#print("Inventory Object count in %s VC is %s" % (vc, str(invtCounts)))
totalRetry = 2
numRetry = 1
moCounts = None
while(numRetry <= totalRetry):
try:
generate_core_cmd = "/usr/lib/vmware-vmon/vmon-cli -d vpxd"
(ret, stdout, stderr) = RunCmdOverSSH(generate_core_cmd, vc, vcUser, vcPwd,timeout=1800)
s = "Completed dump service livecore request"
corefile = None
if stdout and s in stdout and ret == 0:
corefile = stdout.split()[-1]
#print("THREAD- %s - The core file for service is at %s" % (vc, corefile))
except Exception as e:
return "Exception raised while generating VC %s core: %s" % (vc,str(e))
#print("Getting Managed Object count in VC from core file %s" % vc)
moCounts = GetVCMoCounts(vc, vcUser, vcPwd, remoteAh64Path,corefile, vcVersion,vcBuild,invtObjMap)
#print("Managed Object count in VC %s is %s" % (vc, moCounts))
if not moCounts:
errMsg = ('\nFailed to run ah64 on %s, Managed Objects were returned '\
'as None' % vc)
#print("%s" % errMsg)
return "%s" % errMsg
countsMismatch, diffCounts = CompareCounts(moCounts, invtCounts)
MemGrowthDict = {}
if countsMismatch:
#print("Managed Object counts and Inventory counts did not match, ATTEMPT# %s" %numRetry)
#print("Extra objects found at the end of ATTEMPT# %s: %s" % (numRetry, diffCounts))
MemGrowthDict["MOR in VC"] = sorted(invtCounts.items())
MemGrowthDict["MOR in Core"] = vc,sorted(moCounts.items())
MemGrowthMasterDict[numRetry] = MemGrowthDict
time.sleep(5)
numRetry += 1
else:
MemGrowthDict["MOR in VC"] = sorted(invtCounts.items())
MemGrowthDict["MOR in Core"] = vc, sorted(moCounts.items())
MemGrowthMasterDict[numRetry] = MemGrowthDict
break
if numRetry > totalRetry:
memoryGrowthMsg = "MEMORY GROWTH FOUND"
MemGrowthMasterDict["Analysis"] = memoryGrowthMsg
#print("%s" % memoryGrowthMsg)
else:
noMemoryGrowthMsg = ('VC: %s - No Memory Growth found after ATTEMPT# %s' % (vc,numRetry))
MemGrowthMasterDict["Analysis"] = noMemoryGrowthMsg
#print("%s" % noMemoryGrowthMsg)
finally:
#print("The Memory growth Test is over for VC %s."%vc)
return MemGrowthMasterDict
```
#### File: rip/modules/Analytics.py
```python
__author__ = 'smrutim'
import requests
import json
def PostVpxData():
URI = "https://vcsa.vmware.com/ph-stg/api/hyper/send?_c=cpbu_vcst_vac_staging.v0&_i=RIP_STAGING_DATA"
def PostHeapAnalysisDate():
pass
def PostMemoryLeakData():
pass
```
#### File: rip/modules/chapMaster.py
```python
import time
import re
import traceback
from misc import DownloadFileFromVC,DownloadVCCore
from pyVim.connect import SmartConnect
from pyVim.connect import Disconnect
import atexit
import ssl
from vcenter import GetObjectsCountInVCInventory, CompareCounts
import urllib
import os
import subprocess
from customSSH import SFTPManager,RunCmdOverSSH
import multiprocessing
from multiprocessing.dummy import Pool as ThreadPool
def _AlternateChapInstall(host,username,password):
chapPath = '/root/chap'
altChapUrl = "wget -O %s https://10.172.46.209/rip/static/Corefiles/chap --no-check-certificate"%chapPath
(ret, stdout, stderr) = RunCmdOverSSH(altChapUrl, host, username, password)
if ret == 0:
#print("THREAD - MAIN - Chap import successful from local server.")
try:
changePermissionDir = "chmod 777 %s"%chapPath
(ret, stdout, stderr) = RunCmdOverSSH(changePermissionDir, host, username, password)
if ret == 0:
#print("THREAD - main - Granting permission to chap.")
(ret, stdout, stderr) = RunCmdOverSSH(changePermissionDir, host, username, password)
if ret == 0:
#print("THREAD - main - Granting permission to chap success.")
return chapPath
except Exception, e:
#print("THREAD - MAIN - Permission to chap failed %s."%str(e))
return None
else:
return
def _DownloadChapToVC(host, username, password):
'''Download chap to the local machine'''
chap_url = 'http://engweb.eng.vmware.com/~tim/chap'
chapPath = '/root/chap'
chap_download_cmd = "wget -O %s %s"%(chapPath,chap_url)
chap_grant_perm = 'chmod 777 %s'%chapPath
try:
startTime = time.time()
#print("THREAD - main - Downloading chap from Tim server to VC %s"%host)
(ret, stdout, stderr) = RunCmdOverSSH(chap_download_cmd, host, username, password)
#print("THREAD - main - %s"%str(stdout))
if ret == 0:
DownloadTime = time.time() - startTime
#print("THREAD - main - Time taken to download chap : %d sec" % DownloadTime)
#print("THREAD - main - Granting permission to chap")
(ret, stdout, stderr) = RunCmdOverSSH(chap_grant_perm, host, username, password)
if ret == 0:
#print("THREAD - main - Granting permission to chap success")
return chapPath
else:
#print("THREAD - main - Chap downloading failed from Tim Server. Following alternate path")
_AlternateChapInstall(host, username, password)
except Exception as e:
#print(" Error while retrieving chap from Tim's Server %s : %s" % (chap_url,str(e)))
return None
#os.chmod(chapPath, 0777)
return chapPath
def RunChap(service,ChapCmd, vc, vcUser, vcPwd):
'''Run chap on VC Host and log the output'''
ret = None
stdout = None
stderr = None
print("Running chap on service %s"%service)
(ret, stdout, stderr) = RunCmdOverSSH(ChapCmd, vc, vcUser,vcPwd, timeout=3600)
# Remove below comment
#log.info("ah64 command ran: %s" % ah64Cmd)
s = "allocations"
#log.debug("THREAD - %s - Returned: %s" % (service,str(ret)))
#log.debug("THREAD - %s - Output: %s" % (service,str(stdout)))
#log.debug("THREAD - %s - Error: %s" % (service,str(stderr)))
if stdout and s in stdout:
#print('\n' + ('*' * 25) + 'CHAP OUTPUT %s'%service + ('*' * 25) + '\n')
#print("THREAD - %s - RETURN VALUE = %d" %(service, ret))
print("THREAD - %s - STDOUT : \n %s" % (service,stdout))
print(("THREAD - %s - STDERR: \n %s \n" % (service,stderr) + ('*' * 60)))
else:
print("THREAD - %s - STDERR: \n %s \n" % (service,"CHAP didn't yield a success result.") + ('*' * 60))
return (ret, stdout, stderr)
######################### VC Operation Memory Leak Multi Threaded Code Begins ###########################
#Synchronized Object to Hold Results
synchObj=multiprocessing.Manager()
mem_result_dict=synchObj.dict()
no_service_running_dict=synchObj.dict()
long_running_dict=synchObj.dict()
exception_service_dict = synchObj.dict()
def core_analysis_handler(service,chapPath,core_file_path,host,username,password):
local_result = {}
#print("THREAD - %s - Triggering CHAP on core %s"%(service,core_file_path))
chapCmd = "echo count leaked | %s %s"%(chapPath,core_file_path)
(ret, stdout, stderr) = RunChap(service,chapCmd,host,username,password)
p = re.compile("\s*(\d+)\s+allocations\s+use(.*)?bytes.*")
m=p.match(stdout)
if m:
#print("THREAD - %s - Analysis Result %s" % (service, m.group(0)))
local_result['Chunks'] = m.group(1)
local_result['Memory Leak (Bytes)'] = m.group(2)
mem_result_dict[service] = local_result
def mem_analysis_handler_wrapper(args):
"""
Wrapping around mem_analysis_handler
"""
return mem_analysis_handler(*args)
def mem_analysis_handler(host,username, password, service, chapPath, core_analysis_pool,
core_analysis_result_pool):
try:
generate_core_cmd = "/usr/lib/vmware-vmon/vmon-cli -d %s" % service
#print("THREAD- %s - Will run command %s" % (service, generate_core_cmd))
(ret, stdout, stderr) = RunCmdOverSSH(generate_core_cmd, host, username, password)
#print("THREAD- %s - Generate core for service returned: %s" % (service,str(ret)))
s = "Completed dump service livecore request"
core_file_path = None
if stdout and s in stdout and ret == 0:
core_file_path = stdout.split()[-1]
#print("THREAD- %s - The core file for service is at %s" % (service, core_file_path))
elif ret is None:
#print("THREAD- %s - The core file for service is taking time." % service)
long_running_dict[service] = "Timeout while generating core. Proceed manually."
else:
#print("THREAD- %s - Error: %s" % (service, str(stderr)))
if ret == 4:
#print("THREAD- %s - It seems the service is not running on the appliance." % (service))
no_service_running_dict[service] = "Service not running on VC"
if core_file_path:
#print('THREAD %s - Starting Analysis of core file ' % service)
core_analysis_result_pool.append(
core_analysis_pool.apply_async(core_analysis_handler, (service,chapPath,core_file_path,host,
username, password)))
else:
exception_service_dict[service] = "Core file could not be generated."
except Exception, e:
#print("THREAD- %s - Exception while Generating cores in VC for %s service %s"%(host,service,str(e)))
exception_service_dict[service] = str(e)
def CheckMemLeak(host, username, password,service_name_array):
finalresults = {}
service_name=[]
implemented_services = ['analytics', 'applmgmt',
'hvc', 'imagebuilder', 'lookupsvc', 'mbcs', 'netdumper', 'perfcharts',
'pschealth', 'rbd', 'rhttpproxy', 'sca', 'statsmonitor', 'trustmanagement',
'updatemgr', 'vcha', 'vmcam', 'vmonapi', 'vmware-postgres-archiver',
'vmware-vpostgres', 'vsan-dps', 'vsan-health', 'vsm','sps']
for s in service_name_array:
if s not in implemented_services:
finalresults["Analysis not implemented"] = finalresults.get("Analysis not implemented", None) + "," + s
else:
service_name.append(s)
exception_services = ["vmdird"]
chapPath = _DownloadChapToVC(host, username, password)
if chapPath is None:
finalresults["Failure"] = "CHAP could not be downloaded to VC."
return finalresults
threads = 10
pool = ThreadPool(threads)
core_analysis_pool = ThreadPool(threads)
core_analysis_result_pool = []
service_specs = []
try:
for service in service_name:
if service not in exception_services:
service_specs.append((host, username, password, service,
chapPath, core_analysis_pool, core_analysis_result_pool))
#print('THREAD - main - Running Memory Analysis Thread pool')
pool.map(mem_analysis_handler_wrapper, service_specs)
#print('THREAD - main - Closing Memory Analysis Thread pool')
pool.close()
pool.join()
# main_logger.debug("THREAD - main - Closing the core analysis thread pool.")
core_analysis_pool.close()
core_analysis_pool.join()
except (KeyboardInterrupt, SystemExit):
print('THREAD - main - Recieved Manual Interrupt Signal. Exiting')
except Exception, e:
finalresults['Internal Error'] = str(e)
return finalresults
mem_result = dict(mem_result_dict)
no_service_running = dict(no_service_running_dict)
long_running = dict(long_running_dict)
exception_service = dict(exception_service_dict)
finalresults['Memory Leaks'] = mem_result
finalresults['Service Not Running'] = no_service_running
finalresults['Cores Generation Failure'] = long_running
finalresults['Failure'] = exception_service
try:
uptimecmd = "uptime -p"
(ret, stdout, stderr) = RunCmdOverSSH(uptimecmd, host, username, password)
if ret != 0:
finalresults["Uptime"] = str(stderr)
else:
finalresults["Uptime"] = str(stdout)
except Exception, e:
finalresults["Uptime"] = "Could not obtain duration of uptime %s." % str(e)
# Get Build
try:
uptimecmd = "grep 'BUILDNUMBER' /etc/vmware/.buildInfo | cut -d\":\" -f2"
(ret, stdout, stderr) = RunCmdOverSSH(uptimecmd, host, username, password)
if ret != 0:
finalresults["Build"] = str(stderr)
else:
finalresults["Build"] = str(stdout)
except Exception, e:
finalresults["Build"] = "Could not obtain Build %s." % str(e)
return finalresults
######################### VC Operation Memory Leak Multi Threaded Code Ends ###########################
```
#### File: rip/modules/exceptions.py
```python
class LogError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return 'Log Error: %s' % str(self.error)
class ConfigError(Exception):
def __init__(self, errors):
self.errors = errors
def __str__(self):
if isinstance(self.errors, basestring):
return 'Config Error: %s' % str(self.errors)
elif len(self.errors) == 1:
return 'Config Error: %s' % str(self.errors[0])
else:
return '\n Config Errors:%s' % \
''.join(['\n * %s' % err for err in self.errors])
class TestManagerError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return 'TestManager Error: %s' % str(self.error)
class NotEnoughESXError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return 'Not Enough ESX Error: %s' % str(self.error)
class VCError(Exception):
def __init__(self, error, object=None):
self.error = error
self.object = object
def __str__(self):
return 'VC Error: %s' % str(self.error)
class APIError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return 'Buildweb API Error: %s' % str(self.error)
class TestFailedError(Exception):
def __init__(self, errors):
self.errors = errors
def __str__(self):
import collections
if isinstance(self.errors, basestring) or \
not isinstance(self.errors, collections.Iterable):
return 'Test failed: %s' % str(self.errors)
elif len(self.errors) == 1:
return 'Test failed: %s' % str(self.errors[0])
else:
return '\n Test failed:%s' % \
''.join(['\n * %s' % err for err in self.errors])
class NimbusError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return 'Nimbus Error: %s' % str(self.error)
class PrereqsError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return 'Prereqs Error: %s' % str(self.error)
class VISLError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return 'vISL Error: %s' % str(self.error)
class CISTestManagerException(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return 'CIS Test Manager Error: %s' % str(self.error)
class InfraError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return 'Infrastructure Error: %s' % str(self.error)
class ProductError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return 'Product Error: %s' % str(self.error)
class TimeoutError(Exception):
def __init__(self, error):
self.error = error
def __str__(self):
return 'Timeout Error: %s' % str(self.error)
```
#### File: site-packages/pyVim/path.py
```python
import re
import traceback
from pyVmomi import Vim
from pyVmomi import VmomiSupport
import pyVim.invt
class FilePathError(Exception):
def __init__(self, text):
self.errorText = text
def __str__(self):
return self.errorText
# Bogus datastore used for unit tests of path conversions
unitTestDsName = 'storage1'
unitTestDsPath = '/vmfs/volumes/%s' % unitTestDsName
unitTestDsUuid = '/vmfs/volumes/cafebabe-cafed00d'
unitTestDsSummary = Vim.Datastore.Summary(name=unitTestDsName,
url=unitTestDsUuid,
accessible=True)
unitTestDsInfo = Vim.Vm.DatastoreInfo(datastore=unitTestDsSummary)
def GetDatastoreList(unitTest=False):
if unitTest:
return [unitTestDsInfo]
else:
envBrowser = pyVim.invt.GetEnv()
cfgTarget = envBrowser.QueryConfigTarget(None)
return cfgTarget.GetDatastore()
def FsPathToTuple(path, unitTest=False):
"""
Transforms fs-like filename to inventory filename tuple
e.g. '/vmfs/volumes/Storage1/foo/bar.vmx' -> ('Storage1','foo/bar.vmx')
e.g. '/vmfs/volumes/some-volume-uuid/a.vmx' -> ('Storage1','a.vmx')
e.g. '/vmfs/volumes/Storage1' -> ('Storage1','')
"""
try:
m = re.match('(/vmfs/volumes/([^/]+))/?(.*)',path)
dsList = GetDatastoreList(unitTest)
for ds in dsList:
datastore = ds.GetDatastore()
if datastore.GetAccessible():
dsname = datastore.GetName()
url = datastore.GetUrl()
(myUrl,myDsname,myFile) = m.groups()
if dsname == myDsname:
print('INFO: Found datastore by name [%s] -> %s' % (dsname,url))
return (dsname, myFile)
if url == myUrl:
print('INFO: Found datastore by url [%s] -> %s' % (dsname,url))
return (dsname, myFile)
raise FilePathError('no datastore found for path "%s"' % path)
except:
traceback.print_exc()
raise FilePathError('path "%s" not valid' % path)
def FsPathToDsPath(path):
"""
Transforms fs-like filename to inventory filename
e.g. '/vmfs/volumes/Storage1/foo/bar.vmx' -> '[Storage1] foo/bar.vmx'
e.g. '/vmfs/volumes/some-fancy-volume-uuid/a.vmx' -> '[Storage1] a.vmx'
"""
try:
(dsname, relative) = FsPathToTuple(path)
return '[%s] %s' % (dsname, relative)
except:
raise FilePathError('path "%s" not valid' % path)
def DsPathToFsPath(path):
"""
Transforms inventory filename to fs-like filename
e.g. '[Storage1] a.vmx' -> '/vmfs/volumes/Storage1/a.vmx'
"""
try:
m = re.match('\[([^\]]+)\] (.*)',path)
return '/vmfs/volumes/%s/%s' % tuple(m.groups())
except:
raise FilePathError('path "%s" not valid' % path)
def DsPathToDsName(path):
"""
Transforms inventory filename to datastore name
e.g. '[Storage1] a.vmx' -> 'Storage1'
"""
try:
m = re.match('\[([^\]]+)\] .*',path)
return m.groups()[0]
except:
raise FilePathError('path "%s" not valid' % path)
def TestFsPathToTuple(fsPath, ds, relPath):
print('TestFsPathToTuple: "%s" -> ("%s", "%s")' % (fsPath, ds, relPath))
(myDs, myRelPath) = FsPathToTuple(fsPath, unitTest=True)
if ds != myDs or relPath != myRelPath:
raise Exception('TestFsPathToTuple: expected ("%s", "%s"), actual ("%s", %s")'
% (ds, relPath, myDs, myRelPath))
def UnitTest():
fileName = 'foo/bar.vmx'
TestFsPathToTuple('%s' % unitTestDsPath, unitTestDsName, '')
TestFsPathToTuple('%s/' % unitTestDsPath, unitTestDsName, '')
TestFsPathToTuple('%s/%s' % (unitTestDsPath, fileName), unitTestDsName, fileName)
TestFsPathToTuple('%s/' % unitTestDsPath, unitTestDsName, '')
TestFsPathToTuple('%s' % unitTestDsUuid, unitTestDsName, '')
TestFsPathToTuple('%s/' % unitTestDsUuid, unitTestDsName, '')
TestFsPathToTuple('%s/%s' % (unitTestDsUuid, fileName), unitTestDsName, fileName)
# Start program
if __name__ == "__main__":
UnitTest()
```
#### File: site-packages/pyVim/vimApiGraph.py
```python
from pyVmomi import types, Vmodl, Vim
import pyVim.vimApiTypeMatrix
import pyVim.moMapDefs
from functools import reduce
_LogLevel = 0
def Log(level, message):
if level <= _LogLevel:
print(message)
##
## @brief Description of constraints that might be placed on a managed
## object instance.
##
## Description of constraints that might be placed on a managed object instance.
## This constraint applies to a managed object instance of a specific type. It
## describes a constraint for a property paths from a managed object indicating
## what other managed objects may be referred to by the property path. The
## property path may either point at another constraint node or it may refer to
## a type of managed object.
##
## The lack of a constraint on a property path indicates that there are no
## constraints on what the path may be associated. Setting the constraint on
## property path to indicate the empty set will effectively disable traversal
## of the path. The list of constraints on a property path matches using logical
## 'or' semantics. Property paths are ignored unless they satisfy one of the
## constraints. Only the first one will be used to satsify the constraint, so
## this is not quite a fully featured constraint propagation system.
##
class NodeConstraint:
##
## @brief Constructs a constraint for a managed object instance node.
##
## The name of the node can be used by other NodeConstraint
## instances to refer to this node.
##
def __init__(self, name, type, traverseConstraints):
self._name = name
self._type = type
self._traverseConstraints = traverseConstraints
##
## @brief Get name of the node.
##
## This is either the the name of the class instance
## or it is the type name.
##
def GetName(self):
return self._name
## Get the type of the managed object.
def GetType(self):
return self._type
## Is there a constraint defined for a property path on the managed object?
def IsConstrained(self, propPath):
return propPath in self._traverseConstraints
##
## @brief List of constraints that apply to property path of managed object.
##
## Gets the list of constraints that apply to the property path of the
## managed object.
##
def GetConstraints(self, propPath):
traverseConstraints = self._traverseConstraints
if propPath in traverseConstraints:
return self._traverseConstraints[propPath]
else:
return []
## Dump the constraint data out to a string.
def ToString(self):
traverseConstraints = self._traverseConstraints
s = "{ name='" + self._name + "' type='" + self._type + "' "
constrings = ["'" + x + "': '" + str(', ').
join(traverseConstraints[x]) + "'" for x in list(traverseConstraints.keys())]
s = s + "traverseConstraints=[" + str(', ').join(constrings) + "]"
s = s + " }"
return s
##
## @brief Traversal constraint with target that is another constraint node.
##
## Describes a traversal constraint that indicates that the target is another
## constraint node. In order for the constraint to be satisifed, the target
## node of traversal specification must exist and the type of managed object
## must match that which is specified on the constraint node.
##
class TraversalConstraintNode:
def __init__(self, nodeName):
self._nodeName = nodeName
def IsNodeConstraint(self):
return True
def IsTypeConstraint(self):
return False
def GetName(self):
return self._nodeName
##
## @brief Traversal constraint with target that must be of a certain type.
##
## Describes a traversal constraint that indicates that the target must be of
## a certain type. In order for the constraint to be satisifed, the target
## node of traversal specification must exist and the type of managed object
## must match that which is specified on the constraint node.
##
class TraversalConstraintType:
def __init__(self, typeName):
self._typeName = typeName
def IsNodeConstraint(self):
return False
def IsTypeConstraint(self):
return True
def GetName(self):
return self._typeName
#
# Description of the top level inventory structure of the VIM API. This
# description of the structure is very concrete compared to the one that can
# derived from the VMODL types.
#
# Constraint on ServiceInstance that indicates that the root folder off the
# service instance contains only datacenters or other folders that themselves
# may on contain datacenters or other folders with datacenters.
_serviceInstanceNode = NodeConstraint(
'serviceInstance', 'vim.ServiceInstance',
{ 'content.rootFolder': [ TraversalConstraintNode('datacenterFolder') ] }
)
# Description of a folder that contains only datacenters or other folders that
# contani datacenters.
_datacenterFolderNode = NodeConstraint(
'datacenterFolder', 'vim.Folder',
{ 'childEntity': [ TraversalConstraintNode('datacenterFolder'),
TraversalConstraintNode('datacenter') ] }
)
# Description of a datacenter that contains four folders -- each one
# containing X or folders that contains X with X being one of
# {virtual machine, compute resource, datastore, network}
_datacenterNode = NodeConstraint(
'datacenter', 'vim.Datacenter',
{ 'vmFolder' : [ TraversalConstraintNode('virtualMachineFolder') ],
'datastoreFolder' : [ TraversalConstraintNode('datastoreFolder') ],
'networkFolder' : [ TraversalConstraintNode('networkFolder') ],
'hostFolder' : [ TraversalConstraintNode('computeResourceFolder') ] }
)
# Description of folder that contains only virtual machines or other folders
# containing virtual machines. Once the virtual machines are reached, no
# further contraints are specified.
_virtualMachineFolderNode = NodeConstraint(
'virtualMachineFolder', 'vim.Folder',
{ 'childEntity': [ TraversalConstraintNode('virtualMachineFolder'),
TraversalConstraintType('vim.VirtualMachine') ] }
)
# Description of folder that contains only compute resources or other folders
# containing compute resourcesd machines. Once a compute resource is reached,
# no further contraints are specified.
_computeResourceFolderNode = NodeConstraint(
'computeResourceFolder', 'vim.Folder',
{ 'childEntity': [ TraversalConstraintNode('computeResourceFolder'),
TraversalConstraintType('vim.ComputeResource') ] }
)
# Description of folder that contains only datastores or other folders
# containing datastores. Once the datastores are reached, no
# further contraints are specified.
_datastoreFolderNode = NodeConstraint(
'datastoreFolder', 'vim.Folder',
{ 'childEntity': [ TraversalConstraintNode('datastoreFolder'),
TraversalConstraintType('vim.Datastore') ] }
)
# Description of folder that contains only networks or other folders
# containing networks. Once the networks are reached, no
# further contraints are specified.
_networkFolderNode = NodeConstraint(
'networkFolder', 'vim.Folder',
{ 'childEntity': [ TraversalConstraintNode('networkFolder'),
TraversalConstraintType('vim.Network') ] }
)
# Set of constraints that describe how the VIM API types are composed.
_defaultGraphConstraints = [_serviceInstanceNode,
_datacenterFolderNode,
_datacenterNode,
_virtualMachineFolderNode,
_datastoreFolderNode,
_networkFolderNode,
_computeResourceFolderNode]
# XXX If it's useful, pull out the Graph, Edge, and GraphTraverser classes into
# a separate file where it can be used for general purposes. Some of the
# traversal generation spec also needs to be teased out of the traverser.
##
## @brief Abstract interface that defines a graph of traversable nodes.
##
## This interface is relied upon by the traversal code to perform an
## exhaustive graph walk.
##
## Graph:
## NodeName GetRootNode()
## Edge[] GetNodeEdges(node)
##
class Graph:
def __init__(self):
pass
# Get the root node of the graph from which to begin traversal.
def GetRootNode(self):
raise Exception('Graph.GetRootNode must be implemented')
# Get the edges of a node used to continue traversal.
def GetNodeEdges(self, nodeName):
raise Exception('Graph.GetNodeEdges must be implemented')
##
## @brief Defines a directional edge in the graph of traversable
## nodes.
##
## Edge:
## string GetName()
## string GetSourceNode()
## string GetTargetNode()
## string GetSourceType() - XXX User specific
## string GetPropPath() - XXX User specific
##
class Edge:
def __init__(self, sourceNode, targetNode, sourceType, propPath):
self._sourceNode = sourceNode
self._targetNode = targetNode
self._sourceType = sourceType
self._propertyPath = propPath
# Get the name of the edge. This property identifies the path and can be
# used to determine duplicate edges.
def GetName(self):
return self._sourceNode + "::" + self._propertyPath + "->" + self._targetNode
# Get the source node of the edge.
def GetSourceNode(self):
return self._sourceNode
# Get the target node of the edge.
def GetTargetNode(self):
return self._targetNode
# VIM API specific property describing the source type of the class
# represented in the source node.
def GetSourceType(self):
return self._sourceType
# VIM API specific property describing the name of the property from the
# source type to the target type.
def GetPropertyPath(self):
return self._propertyPath
##
## @brief Class that traverses the graph.
##
class GraphTraverser:
def __init__(self):
pass
# Helper function for breadth first traversal
def DoesSelectionSpecExistForName(self, selectionSet, name):
match = [x for x in selectionSet if x.GetName() == name]
return len(match) > 0
# Helper function to create a traversal spec
def MakeTraversalSpec(self, name, type, propPath, moList):
spec = Vmodl.Query.PropertyCollector.TraversalSpec()
spec.SetName(name)
spec.SetType(reduce(getattr, type.split('.'), types))
spec.SetPath(propPath)
spec.SetSkip(False)
# Check if we want to capture the property or not.
if moList is not None:
flag = not (type in moList)
spec.SetSkip(flag)
newSelectSet = []
spec.SetSelectSet(newSelectSet)
return spec
# Helper function to create a selection spec
def MakeSelectionSpec(self, name, type, propPath):
spec = Vmodl.Query.PropertyCollector.SelectionSpec()
spec.SetName(name)
return spec
#
# Build an exhaustive traversal spec from a constraint graph. This traversal
# algorithm uses a breadth first search as this heuristic will lead to traversal
# specs of minimal depth, which should be most intuitive since the managed
# object hierarchy is hiearchical in nature although it is technically more
# like a graph.
#
# Exhaustive traversal algorithm:
#
# A node is a managed object class or instance. An edge is directional and
# consists of a property path used to access one managed object class or
# instance node from another.
#
# From a node, enumerate over each edge adding traversal specs for each edge.
# When a node is visited add either a TraversalSpec or a SelectionSpec for
# each edge originating from that node if the edge is possible from the
# constraints. Add a TraversalSpec if the edge was never previously added.
# Add a SelectionSpec if the edge was added. Continue traversal for edges
# that were not yet visited.
#
# @param graph - Mo Graph to traverse to generate the traversal specs
# @param moList - Managed objects we are interested in for traversal.
# if None we do not skip any object in the traversal.
# @return rootselectionset - Selection set pivoted on the root managed
# object.
#
def Traverse(self, graph, moList = None):
rootNode = graph.GetRootNode()
# Selection specs that already have a traversal spec
existingSelectionSpecs = {}
# Traversal specs that were traversed. No need to traverse again.
visitedEdges = {}
# Root selection set that is to be returned
rootSelectionSet = []
# Queue of traversal context
workingQueue = [ {'node': rootNode,
'currentSelectionSet': rootSelectionSet,
'level': 0} ]
while len(workingQueue) > 0:
# Remove work item from front of list
work = workingQueue[0]
node = work['node']
currentSelectionSet = work['currentSelectionSet']
level = work['level']
workingQueue[0:1] = []
Log(3, "------------------------ Start " + node + "-----------------------")
Log(1, "==> Working on " + node + " at level " + str(level))
Log(4, "====> Queue length is " + str(len(workingQueue)))
edges = graph.GetNodeEdges(node)
for edge in edges:
propPath = edge.GetPropertyPath()
nodeType = edge.GetSourceType()
edgeName = edge.GetName()
traverseSpecName = node + '::' + propPath
Log(3, "==> Examining " + edgeName + " (" + traverseSpecName + ")")
# Add spec only if one does not already exist for the property.
# Traversal specs are not target type specific.
if not self.DoesSelectionSpecExistForName(currentSelectionSet, traverseSpecName):
spec = {}
if traverseSpecName not in existingSelectionSpecs:
Log(2, "==> Adding traversal spec for path " + traverseSpecName)
spec = self.MakeTraversalSpec(traverseSpecName, nodeType, propPath,moList)
newSelectSet = spec.GetSelectSet()
existingSelectionSpecs[traverseSpecName] = spec
else:
Log(2, "==> Adding selection spec for name " + traverseSpecName)
spec = self.MakeSelectionSpec(traverseSpecName, nodeType, propPath)
currentSelectionSet.append(spec)
if (_LogLevel >= 5):
Log(5, rootSelectionSet)
else:
Log(4, "==> Skipping path " + propPath + " because spec exists.")
if edgeName not in visitedEdges:
visitedEdges[edgeName] = 1
Log(3, "==> Have not traversed edge " + edgeName)
newNode = edge.GetTargetNode()
# If we've had to add a traversal spec, then we haven't visited this
# node yet.
workingQueue.append({ 'node': newNode,
'currentSelectionSet': newSelectSet,
'level': level + 1 })
Log(4, "====> Working queue length is " + str(len(workingQueue)) + ": " + \
str(', ').join([x['node'] for x in workingQueue]))
else:
Log(3, "==> Already traversed edge " + edgeName)
Log(3, "------------------------ End " + node + "-----------------------")
return rootSelectionSet
##
## @brief Graph of managed objects.
##
## Uses the matrix VIM API types as well as some
## additional semantic constraints to construct a graph where the managed objects
## are nodes and the property paths between them are edges. The graph represents
## not a specific instantiation of the managed objects but more like a schema
## that describes how the classes interact.
##
## The primary use case for this graph is to be able to generate property
## collector traversal specifications in a more general fashion using just
## constraints and the definition of the types.
##
## @param vimGraph a Matrix of managed objects see vimApiTypeMatrix
## @param nodeList set of constraintsfor the graph
## @param moList list of managed objects for which skip flag is unset.
##
class MoGraph(Graph):
def __init__(self, vimGraph, nodeList):
nodes = {}
for node in nodeList:
name = node.GetName()
nodes[name] = node
self._nodes = nodes
self._vimGraph = vimGraph
self._root = None
if len(nodes) > 0:
self._root = nodeList[0].GetName()
# Gets node constraint object by the name of the node
def GetNodeConstraint(self, nodeName):
nodes = self._nodes
if nodeName not in nodes:
return None
return nodes[nodeName]
# Sets node constraint object by name of the node. If node is None, node
# constraint is effectively unset.
def SetNodeConstraint(self, nodeName, node):
if self._root == None:
self._root = nodeName
elif self._root == nodeName:
self._root = None
self._nodes[nodeName] = node
# Sets the root node of the graph. The root node is by default the first
# node in the list. This operation sets it explicitly.
def SetRootNode(self, nodeName):
nodes = self._nodes
if nodeName not in nodes:
raise Exception('Could not find node ' + nodeName)
self._root = nodeName
# Graph.GetRootNode
#
# Get the root node of the graph from which to begin traversal.
def GetRootNode(self):
if self._root == None:
raise Exception('Root node in graph not defined')
return self._root
# Graph.GetNodeEdges
#
# Get the edges of a node used to continue traversal.
def GetNodeEdges(self, nodeName):
vimGraph = self._vimGraph
nodes = self._nodes
# Node name is either a managed object type or a node constraint node.
# First see if it is the latter. Otherwise, treat it as the former.
node = None
nodeType = nodeName
if nodeName in nodes:
node = nodes[nodeName]
nodeType = node.GetType()
# For a type, get the list of edges that are possible candidates.
candidateEdges = vimGraph.GetEdgesForClass(nodeType)
# Filter out edges that do not fit constraints specified by the node and
# traversal constraints. If the edge passes the filter, then box up the
# edge in a format that fits the graph abstraction provided by this class.
edges = []
for ce in candidateEdges:
source = ce['source']
propPath = ce['propPath']
target = ce['target']
if node == None or not node.IsConstrained(propPath):
# No node defined or node is defined but no constraints specified on
# the property path. There are no traversal constraints.
Log(5, "edge(" + source + ", " + propPath + ", " + target + ") is not constrained")
edges.append(Edge(nodeName, target, source, propPath))
continue
constraints = node.GetConstraints(propPath)
# Constraints exist. Check that the node matches one of the traversal
# constraints. Otherwise, it does not meet constraints.
edge = None
for constraint in constraints:
if constraint.IsTypeConstraint():
if constraint.GetName() == target:
edge = Edge(nodeName, target, source, propPath)
elif constraint.IsNodeConstraint():
newNodeName = constraint.GetName()
if newNodeName in nodes and nodes[newNodeName].GetType() == target:
Log(5, "edge(" + source + ", " + propPath + ", " + target + ") meets constraints")
edge= Edge(nodeName, newNodeName, source, propPath)
if edge != None:
Log(5, "edge(" + source + ", " + propPath + ", " + target + ") meets constraints")
edges.append(edge)
else:
Log(5, "edge(" + source + ", " + propPath + ", " + target + ") does not meets constraints")
return edges
# Create a graph that represents the object model of the VIM API.
# @param moList If mo not in moList then the skip flag will be set.
def CreateMoGraph():
vimGraph = pyVim.vimApiTypeMatrix.CreateMoTypeMatrix()
graph = MoGraph(vimGraph, _defaultGraphConstraints)
return graph
# Create a Molist that includes all inherited classes if the
# parent class is present.
# @param a list of managed objects
# @return a list of managed objects that include inherited
# classes.
def GetCompleteMoList(moList, classHierarchy):
newMoList = []
for mo in moList:
if mo in classHierarchy:
newMoList.extend(classHierarchy[mo])
newMoList.extend(moList)
return newMoList
# Compute the selection spec that applies to the managed object graph.
def BuildMoGraphSelectionSpec(moList = None):
if moList:
moList = GetCompleteMoList(moList, pyVim.moMapDefs.ClassHierarchy)
graph = CreateMoGraph()
selectSet = GraphTraverser().Traverse(graph, moList)
return selectSet
# Test function.
def main():
vimGraph = pyVim.vimApiTypeMatrix.CreateMoTypeMatrix()
print(vimGraph.ToString())
moList = ["vim.ManagedEntity"]
moList = GetCompleteMoList(moList, pyVim.moMapDefs.ClassHierarchy)
graph = MoGraph(vimGraph, _defaultGraphConstraints)
selectSet = GraphTraverser().Traverse(graph,moList)
print("\nSelection Set:")
#print selectSet
if __name__ == "__main__":
main()
```
#### File: rip/modules/VmOperations.py
```python
import pyVmomi
from pyVmomi import vim, vmodl
from DatacenterPrac import Login,GetCluster,GetDatacenter,get_obj,GetClusters
from clusterPrac import GetHostsInClusters
import status
from VMPrac import find_obj,get_container_view,collect_properties
import multiprocessing
from multiprocessing.dummy import Pool as ThreadPool
import time
def vm_ops_handler(vm_name, vm_object, operation,final_result_dict,maxwait = 5):
vm = vm_object
if vm and operation.lower() == "off":
power_off_task = vm.PowerOff()
run_loop = True
while run_loop:
info = power_off_task.info
if info.state == vim.TaskInfo.State.success:
run_loop = False
final_result_dict[vm_name] = "Power off success."
break
elif info.state == vim.TaskInfo.State.error:
if info.error:
final_result_dict[vm_name] = "Power off has quit with error: %s"%info.error
else:
final_result_dict[vm_name] = "Power off has quit with cancelation"
run_loop = False
break
time.sleep(maxwait)
elif vm and operation.lower() == "on":
power_off_task = vm.PowerOn()
run_loop = True
while run_loop:
info = power_off_task.info
if info.state == vim.TaskInfo.State.success:
run_loop = False
final_result_dict[vm_name] = "Power on success."
time.sleep(maxwait)
break
elif info.state == vim.TaskInfo.State.error:
if info.error:
final_result_dict[vm_name] = "Power on has quit with error: %s" % (info.error)
else:
final_result_dict[vm_name] = "Power on has quit with cancelation"
run_loop = False
break
time.sleep(maxwait)
elif operation != "on" or operation != "off":
final_result_dict[vm_name] = "Operation %s not implemented."%operation
def vm_ops_handler_wrapper(args):
"""
Wrapping arround vm_ops_handler
"""
return vm_ops_handler(*args)
def executePowerOps(vcIp, vcUser, vcPassword,dcName,clusterName,operation,pattern_array,vm_array,maxwait):
# Synchronized Object to Hold Results
final_result_dict = {}
try:
si = Login(vcIp, vcUser, vcPassword)
except Exception, e:
resp = str(e)
return dict(stat=resp, status=status.HTTP_403_FORBIDDEN)
try:
dcMor = find_obj(si, dcName, [vim.Datacenter], False)
clusterMor = GetCluster(dcMor, clusterName, si)
for pattern in pattern_array:
vm_properties = ["name"]
view = get_container_view(si, obj_type=[vim.VirtualMachine], container=clusterMor)
vm_data = collect_properties(si, view_ref=view, obj_type=vim.VirtualMachine, path_set=vm_properties,include_mors=True, desired_vm=pattern)
if any(vm_data):
pass
else:
resp = 'Finding VM matching pattern %s failed .' % pattern
return dict(stat=resp,status = status.HTTP_412_PRECONDITION_FAILED)
vm_specs = []
pool = ThreadPool(10)
for vm_name, vm_object in vm_data.iteritems():
vm_specs.append((vm_name, vm_object, operation, final_result_dict, maxwait))
pool.map(vm_ops_handler_wrapper, vm_specs)
pool.close()
pool.join()
except Exception,e:
return "Power operation failed due to %s."%(e)
return dict(final_result_dict)
############################### Cloning Operation #####################
synchObj=multiprocessing.Manager()
vm_result_list=synchObj.list()
def vm_clone_operation(si,template_vm,datacenter,clones,specdict):
global vm_result_list
cls = specdict["cluster"]
content = si.RetrieveContent()
cluster = get_obj(content, [vim.ClusterComputeResource], cls)
resource_pool = cluster.resourcePool
folder = datacenter.vmFolder
datastoresMors = datacenter.datastore
dsname = specdict["datastore"]
dsmor = None
for datastore in datastoresMors:
if datastore.info.name == dsname:
dsmor = datastore
break
hostMors = GetHostsInClusters(datacenter, [cls], 'connected')
hostname = specdict.get("host", None)
hostmor = None
if hostname:
for hostitem in hostMors:
if hostitem.name == hostname:
hostmor = hostitem
break
relocate_spec = vim.vm.RelocateSpec()
relocate_spec.pool = resource_pool
relocate_spec.datastore = dsmor
if hostmor:
relocate_spec.host = hostmor
power = False
if specdict["power"] == "on":
power = True
vmresult = {}
basename = specdict["basename"]
for i in range(clones):
vm_name = basename + "-" + str(i)
try:
clone_spec = vim.vm.CloneSpec(powerOn=power, template=False, location=relocate_spec)
task = template_vm.Clone(name=vm_name, folder=folder, spec=clone_spec)
run_loop = True
while run_loop:
info = task.info
if info.state == vim.TaskInfo.State.success:
vm = info.result
run_loop = False
vmresult[vm_name] = "Created"
elif info.state == vim.TaskInfo.State.running:
pass
elif info.state == vim.TaskInfo.State.queued:
pass
elif info.state == vim.TaskInfo.State.error:
errormsg=None
try:
errormsg = info.error
except Exception, e:
vmresult[vm_name] = str(e)
if errormsg:
vmresult[vm_name] = errormsg
else:
vmresult[vm_name] = "Cancelled"
run_loop = False
break
time.sleep(10)
except Exception, e:
vmresult = ["Failure while initiating cloning %s"%str(e)]
vm_result_list.append(vmresult)
def collect_vm_properties(service_instance, view_ref, obj_type, path_set=None,
include_mors=False,desired_vm=None):
"""
Collect properties for managed objects from a view ref
Returns:
A list of properties for the managed objects
"""
collector = service_instance.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = pyVmomi.vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for collection
traversal_spec = pyVmomi.vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = pyVmomi.vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = pyVmomi.vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
properties = {}
try:
for obj in props:
for prop in obj.propSet:
if prop.val == desired_vm:
properties['name'] = prop.val
properties['obj'] = obj.obj
return properties
else:
pass
except Exception, e:
print "The exception inside collector_properties " + str(e)
return properties
def vm_clone_handler_wrapper(args):
return vm_clone_operation(*args)
def VMFullClones(vcitem):
cloneresult = {}
vcname = vcitem["vcname"]
user = vcitem["username"]
passw = vcitem["password"]
dcarray = vcitem["dc"]
for dcitem in dcarray:
dcname = dcitem["dcname"]
templatearray = dcitem["templates"]
pool = ThreadPool(4)
vm_specs = []
for templateitem in templatearray:
templatename = templateitem["template"]
container = templateitem["container"]
clones = templateitem["clones"]
specdict = templateitem["clonespecs"]
#print templatename + " will be cloned to " + str(clones) + " with Base name " + basename+ "-" + " with specs " + "VC " + vcname + " " + str(specdict)
si = Login(vcname,user, passw)
content = si.RetrieveContent()
dcMor = GetDatacenter(name=dcname, si=si)
clusterMorList = GetClusters(dcMor, [container])
desiredClusterMor = None
for item in clusterMorList:
desiredClusterMor = item
template_vm = None
if templatename and desiredClusterMor:
vm_properties = ["name"]
view = get_container_view(si, obj_type=[vim.VirtualMachine], container=desiredClusterMor)
try:
vm_data = collect_vm_properties(si, view_ref=view,
obj_type=vim.VirtualMachine,
path_set=vm_properties,
include_mors=True, desired_vm=templatename)
if vm_data['name'] == templatename:
template_vm = vm_data['obj']
except Exception,e:
cloneresult[templatename] = "Template Not Found due to error %s"%str(e)
if template_vm is None:
template_vm = get_obj(content, [vim.VirtualMachine], templatename)
if template_vm is None:
cloneresult[templatename] = "Template Not Found"
continue
vm_specs.append([si,template_vm,dcMor,clones,specdict])
pool.map(vm_clone_handler_wrapper, vm_specs)
pool.close()
pool.join()
cloneresult["result"] = list(vm_result_list)
return cloneresult
``` |
{
"source": "2syume/telegram-majyobot",
"score": 2
} |
#### File: telegram-majyobot/bot/__init__.py
```python
import time
import logging
from telegram.ext import Updater, MessageHandler, Filters
from .config import config
from .models import save_photo_record, save_text_message
from .handlers import save_photo
logger = logging.getLogger(__name__)
def echo(bot, update):
print("[{}] {}: {}".format(
update.message.chat.title,
update.message.from_user.name,
update.message.text)
)
save_text_message(update.message)
def start():
bot_token = config.get("Bot", "Token")
updater = Updater(token=bot_token)
echo_handler = MessageHandler(Filters.text, echo)
updater.dispatcher.add_handler(echo_handler)
photo_handler = MessageHandler(Filters.photo, save_photo.handler)
updater.dispatcher.add_handler(photo_handler)
updater.start_polling()
``` |
{
"source": "2t0m/ad-spotify-mood-lights-sync",
"score": 2
} |
#### File: ad-spotify-mood-lights-sync/tests/test_utils.py
```python
import pytest
import re
import requests
TRACKS = {
"min_min": {"valence": 0, "energy": 0},
"min_max": {"valence": 0, "energy": 1},
"max_min": {"valence": 1, "energy": 0},
"max_max": {"valence": 1, "energy": 1},
"center": {"valence": 0.5, "energy": 0.5},
}
SONGS = {
("song 1", "artist 1"): "min_min",
("song 2", "artist 2"): "min_max",
("song 1", "artist 2"): "max_max",
}
CUSTOM_PROFILE = [
{'point': [0, 0], 'color': [0, 0, 255]},
{'point': [1, 0], 'color': [0, 255, 0]},
{'point': [0, 1], 'color': [255, 0, 0]},
{'point': [1, 1], 'color': [255, 255, 0]},
]
class NetworkState:
def __init__(self):
self.tries = 0
self.is_on = False
self.n_errors = -1
def reset(self):
self.tries = 0
def inc(self):
self.tries += 1
if self.is_on and (self.n_errors == -1 or self.tries <= self.n_errors):
raise requests.exceptions.ConnectionError
def turn_on_errors(self, n_errors=-1):
self.reset()
self.n_errors = n_errors
self.is_on = True
def turn_off_errors(self):
self.is_on = False
NETWORK_STATE = NetworkState()
def track_to_point(track_uri):
return TRACKS[track_uri]['valence'], TRACKS[track_uri]['energy']
def mock_audio_features(_, track_uri):
NETWORK_STATE.inc()
if track_uri not in TRACKS:
return [None]
return [TRACKS[track_uri]]
def mock_search(_, q, type):
NETWORK_STATE.inc()
groups = re.match(r"artist:(.*)track:(.*)", q).groups()
if len(groups) == 1:
pass
else:
return {'tracks': {
'items': [{
'uri': SONGS[(groups[1].strip(), groups[0].strip())]
}]
}}
@pytest.fixture
def hass_errors(hass_mocks):
return lambda: [call[0][0] for call in hass_mocks.hass_functions["error"].call_args_list]
``` |
{
"source": "2t0m/ha-pioneer_async",
"score": 2
} |
#### File: custom_components/pioneer_async/config_flow.py
```python
import logging
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_TIMEOUT,
)
from homeassistant.core import callback
from .pioneer_avr import PioneerAVR # pylint: disable=import-error
from .const import (
DATA_SCHEMA,
OPTIONS_DEFAULTS,
CONF_UNIQUE_ID,
CONF_COMMAND_DELAY,
CONF_VOLUME_WORKAROUND,
)
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: core.HomeAssistant, data):
"""
Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
_LOGGER.debug(">> validate_input(%s)", data)
try:
pioneer = PioneerAVR(data[CONF_HOST], data[CONF_PORT])
await pioneer.connect()
except:
raise CannotConnect # pylint: disable=raise-missing-from
await pioneer.shutdown()
del pioneer
# Return info that you want to store in the config entry.
device_unique_id = data[CONF_HOST] + ":" + str(data[CONF_PORT])
return {
**data,
CONF_UNIQUE_ID: device_unique_id,
}
class PioneerAVRFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle Pioneer AVR config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
_LOGGER.debug(">> config.async_step_user(%s)", user_input)
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
await self.async_set_unique_id(info[CONF_UNIQUE_ID])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=info[CONF_UNIQUE_ID], data=user_input
)
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return PioneerAVROptionsFlowHandler(config_entry)
class PioneerAVROptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for Harmony."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
_LOGGER.debug(">> options.__init__(%s)", config_entry)
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
_LOGGER.debug(">> options.async_step_init(%s)", user_input)
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
## Get current set of options and build options schema
options = {
**OPTIONS_DEFAULTS,
**(self.config_entry.options if self.config_entry.options else {}),
}
data_schema = vol.Schema(
{
## TODO: add sources option: how to ask the user for a dictionary in config flow?
vol.Optional(
CONF_SCAN_INTERVAL, default=options[CONF_SCAN_INTERVAL]
): int,
vol.Optional(CONF_TIMEOUT, default=options[CONF_TIMEOUT]): vol.Coerce(
float
),
vol.Optional(
CONF_COMMAND_DELAY, default=options[CONF_COMMAND_DELAY]
): vol.Coerce(float),
vol.Optional(
CONF_VOLUME_WORKAROUND, default=options[CONF_VOLUME_WORKAROUND]
): bool,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
``` |
{
"source": "2takker/pdf_header_footer_tool",
"score": 3
} |
#### File: 2takker/pdf_header_footer_tool/pdf_tools.py
```python
import tkinter
from tkinter.filedialog import askopenfilename
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4, letter
from PyPDF2 import PdfFileReader
from PyPDF2 import PdfFileWriter
def choose_pdf_path() -> 'pdf_path':
"""Prompts user to choose a pdf to read. Returns path"""
tkinter.Tk().withdraw()
filename = askopenfilename()
return filename
def generate_header(left_text='',
middle_text='',
right_text=''):
"""Generates a pdf contraining specified header"""
width, height = letter
can = canvas.Canvas('header.pdf', pagesize=letter)
can.drawString(x=0.075*width, y=0.96*height, text=left_text)
can.drawCentredString(x=0.5*width, y=0.96*height, text=middle_text)
can.drawRightString(x=0.925*width, y=0.96*height, text=right_text)
# This might be used later, rather than saving a pdf file each time
#can.getpdfdata()
can.save()
def generate_footer(left_text='',
middle_text='',
right_text=''):
"""Generates a pdf contraining specified footer"""
width, height = letter
can = canvas.Canvas('footer.pdf', pagesize=letter)
can.drawString(x=0.075*width, y=0.03*height, text=left_text)
can.drawCentredString(x=0.5*width, y=0.03*height, text=middle_text)
can.drawRightString(x=0.925*width, y=0.03*height, text=right_text)
# This might be used later, rather than saving a pdf file each time
#can.getpdfdata()
can.save()
def merge_pdf(target_pdf, header='header.pdf', footer=None):
pdf_writer = PdfFileWriter()
header_page = PdfFileReader(header).getPage(0)
if footer is not None:
footer_page = PdfFileReader(footer).getPage(0)
for page in range(target_pdf.getNumPages()):
current_page = target_pdf.getPage(page)
current_page.mergePage(header_page)
if footer is not None:
current_page.mergePage(footer_page)
pdf_writer.addPage(current_page)
with open('merged.pdf', 'wb') as fh:
pdf_writer.write(fh)
if __name__ == "__main__":
#choose_pdf_path()
generate_header('<NAME>', 'ID', '4 pages')
generate_footer('left', 'middle', 'right')
target_pdf = PdfFileReader('Hand_in_2019.pdf')
merge_pdf(target_pdf)
``` |
{
"source": "2To3rdPwr/reaver-pysc2",
"score": 3
} |
#### File: models/base/cnn.py
```python
import gin
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, Concatenate, Dense, Conv2D, Flatten
from reaver.models.base.layers import Squeeze, Rescale, Transpose
@gin.configurable
def build_cnn_nature(obs_spec, act_spec, data_format='channels_first', value_separate=False):
conv_cfg = dict(padding='same', data_format=data_format, activation='relu')
conv_spec = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
inputs = [Input(s.shape, name="input_" + s.name) for s in obs_spec]
inputs_concat = Concatenate()(inputs) if len(inputs) > 1 else inputs[0]
# expected NxCxHxW, but got NxHxWxC
if data_format == 'channels_first' and inputs_concat.shape[1] > 3:
inputs_concat = Transpose([0, 3, 1, 2])(inputs_concat)
inputs_scaled = Rescale(1./255)(inputs_concat)
x = build_cnn(inputs_scaled, conv_spec, conv_cfg, dense=512, prefix='policy_')
outputs = [Dense(s.size(), name="logits_" + s.name)(x) for s in act_spec]
if value_separate:
x = build_cnn(inputs_scaled, conv_spec, conv_cfg, dense=512, prefix='value_')
value = Dense(1, name="value_out")(x)
value = Squeeze(axis=-1)(value)
outputs.append(value)
return Model(inputs=inputs, outputs=outputs)
def build_cnn(input_layer, layers, conv_cfg, dense=None, prefix=''):
x = input_layer
for i, (n_filters, kernel_size, stride) in enumerate(layers):
x = Conv2D(n_filters, kernel_size, stride, name='%sconv%02d' % (prefix, i+1), **conv_cfg)(x)
if dense:
x = Flatten()(x)
x = Dense(dense)(x)
return x
```
#### File: models/base/layers.py
```python
import tensorflow as tf
from tensorflow.keras.layers import Lambda
class Squeeze(Lambda):
def __init__(self, axis=-1):
Lambda.__init__(self, lambda x: tf.squeeze(x, axis=axis))
class Split(Lambda):
def __init__(self, num_splits=2, axis=-1):
Lambda.__init__(self, lambda x: tf.split(x, num_splits, axis=axis))
class Transpose(Lambda):
def __init__(self, dims):
Lambda.__init__(self, lambda x: tf.transpose(x, dims))
class Log(Lambda):
def __init__(self):
Lambda.__init__(self, lambda x: tf.log(x + 1e-10))
class Rescale(Lambda):
def __init__(self, scale):
Lambda.__init__(self, lambda x: tf.cast(x, tf.float32) * scale)
class Broadcast2D(Lambda):
def __init__(self, size):
Lambda.__init__(self, lambda x: tf.tile(tf.expand_dims(tf.expand_dims(x, 2), 3), [1, 1, size, size]))
``` |
{
"source": "2torus/tf-quant-finance",
"score": 2
} |
#### File: math/optimizer/conjugate_gradient_test.py
```python
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
def _norm(x):
return np.linalg.norm(x, np.inf)
# Test functions.
def _rosenbrock(x):
"""See https://en.wikipedia.org/wiki/Rosenbrock_function."""
term1 = 100 * tf.reduce_sum(tf.square(x[1:] - tf.square(x[:-1])))
term2 = tf.reduce_sum(tf.square(1 - x[:-1]))
return term1 + term2
def _himmelblau(coord):
"""See https://en.wikipedia.org/wiki/Himmelblau%27s_function."""
x, y = coord[..., 0], coord[..., 1]
return (x**2 + y - 11)**2 + (x + y**2 - 7)**2
def _mc_cormick(coord):
"""See https://www.sfu.ca/~ssurjano/mccorm.html."""
x = coord[0]
y = coord[1]
return tf.sin(x + y) + tf.square(x - y) - 1.5 * x + 2.5 * y + 1
def _beale(coord):
"""See https://www.sfu.ca/~ssurjano/beale.html."""
x = coord[0]
y = coord[1]
term1 = (1.5 - x + x * y)**2
term2 = (2.25 - x + x * y**2)**2
term3 = (2.625 - x + x * y**3)**2
return term1 + term2 + term3
@test_util.run_all_in_graph_and_eager_modes
class ConjugateGradientTest(tf.test.TestCase):
def _check_algorithm(self,
func=None,
start_point=None,
gtol=1e-4,
expected_argmin=None):
"""Runs algorithm on given test case and verifies result."""
val_grad_func = lambda x: tff.math.value_and_gradient(func, x)
start_point = tf.constant(start_point, dtype=tf.float64)
expected_argmin = np.array(expected_argmin, dtype=np.float64)
f_call_ctr = tf.Variable(0, dtype=tf.int32)
def val_grad_func_with_counter(x):
with tf.compat.v1.control_dependencies(
[tf.compat.v1.assign_add(f_call_ctr, 1)]):
return val_grad_func(x)
result = tff.math.optimizer.conjugate_gradient_minimize(
val_grad_func_with_counter,
start_point,
tolerance=gtol,
max_iterations=200)
self.evaluate(tf.compat.v1.global_variables_initializer())
result = self.evaluate(result)
f_call_ctr = self.evaluate(f_call_ctr)
# Check that minimum is found.
with self.subTest(name="Position"):
self.assertAllClose(result.position, expected_argmin, rtol=1e-3,
atol=1e-3)
# Check that gradient norm is below tolerance.
grad_norm = np.max(result.objective_gradient)
with self.subTest(name="GradientNorm"):
self.assertLessEqual(grad_norm, gtol)
# Check that number of function calls, declared by algorithm, is correct.
with self.subTest(name="NumberOfEvals"):
self.assertEqual(result.num_objective_evaluations, f_call_ctr)
# Check returned function and gradient values.
pos = tf.constant(result.position, dtype=tf.float64)
f_at_pos, grad_at_pos = self.evaluate(val_grad_func(pos))
with self.subTest(name="ObjectiveValue"):
self.assertAllClose(result.objective_value, f_at_pos)
with self.subTest(name="ObjectiveGradient"):
self.assertAllClose(result.objective_gradient, grad_at_pos)
# Check that all converged and none failed.
with self.subTest(name="AllConverged"):
self.assertTrue(np.all(result.converged))
with self.subTest("NoneFailed"):
self.assertFalse(np.any(result.failed))
def test_univariate(self):
self._check_algorithm(
func=lambda x: (x[0] - 20)**2,
start_point=[100.0],
expected_argmin=[20.0])
def test_quadratics(self):
def test_random_quadratic(dim, seed):
"""Generates random test case for function x^T A x + b x."""
np.random.seed(seed)
a = np.random.uniform(size=(dim, dim))
a = np.array(
np.dot(a, a.T), dtype=np.float64) # Must be positive semidefinite.
b = np.array(np.random.uniform(size=(dim,)), dtype=np.float64)
argmin = -np.dot(np.linalg.inv(a), b)
a = tf.constant(a)
b = tf.constant(b)
def paraboloid(x):
return 0.5 * tf.einsum("i,ij,j->", x, a, x) + tf.einsum("i,i->", b, x)
self._check_algorithm(
start_point=np.random.uniform(size=(dim,)),
func=paraboloid,
expected_argmin=argmin)
test_random_quadratic(2, 43)
test_random_quadratic(3, 43)
test_random_quadratic(4, 43)
test_random_quadratic(5, 43)
test_random_quadratic(10, 43)
test_random_quadratic(15, 43)
def test_paraboloid_4th_order(self):
self._check_algorithm(
func=lambda x: tf.reduce_sum(x**4),
start_point=[1, 2, 3, 4, 5],
expected_argmin=[0, 0, 0, 0, 0],
gtol=1e-10)
def test_logistic_regression(self):
dim = 5
n_objs = 10000
np.random.seed(1)
betas = np.random.randn(dim) # The true beta
intercept = np.random.randn() # The true intercept
features = np.random.randn(n_objs, dim) # The feature matrix
probs = 1 / (1 + np.exp(
-np.matmul(features, np.expand_dims(betas, -1)) - intercept))
labels = np.random.binomial(1, probs) # The true labels
regularization = 0.8
feat = tf.constant(features)
lab = tf.constant(labels, dtype=feat.dtype)
def f_negative_log_likelihood(params):
intercept, beta = params[0], params[1:]
logit = tf.matmul(feat, tf.expand_dims(beta, -1)) + intercept
log_likelihood = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(labels=lab, logits=logit))
l2_penalty = regularization * tf.reduce_sum(beta**2)
total_loss = log_likelihood + l2_penalty
return total_loss
start_point = np.ones(dim + 1)
argmin = [
-2.38636155, 1.61778325, -0.60694238, -0.51523609, -1.09832275,
0.88892742
]
self._check_algorithm(
func=f_negative_log_likelihood,
start_point=start_point,
expected_argmin=argmin,
gtol=1e-5)
def test_data_fitting(self):
"""Tests MLE estimation for a simple geometric GLM."""
n, dim = 100, 3
dtype = tf.float64
np.random.seed(234095)
x = np.random.choice([0, 1], size=[dim, n])
s = 0.01 * np.sum(x, 0)
p = 1. / (1 + np.exp(-s))
y = np.random.geometric(p)
x_data = tf.convert_to_tensor(value=x, dtype=dtype)
y_data = tf.expand_dims(tf.convert_to_tensor(value=y, dtype=dtype), -1)
def neg_log_likelihood(state):
state_ext = tf.expand_dims(state, 0)
linear_part = tf.matmul(state_ext, x_data)
linear_part_ex = tf.stack([tf.zeros_like(linear_part), linear_part],
axis=0)
term1 = tf.squeeze(
tf.matmul(tf.reduce_logsumexp(linear_part_ex, axis=0), y_data), -1)
term2 = (0.5 * tf.reduce_sum(state_ext * state_ext, axis=-1) -
tf.reduce_sum(linear_part, axis=-1))
return tf.squeeze(term1 + term2)
self._check_algorithm(
func=neg_log_likelihood,
start_point=np.ones(shape=[dim]),
expected_argmin=[-0.020460034354, 0.171708568111, 0.021200423717])
def test_rosenbrock_2d_v1(self):
self._check_algorithm(
func=_rosenbrock,
start_point=[-1.2, 2],
expected_argmin=[1.0, 1.0])
def test_rosenbrock_2d_v2(self):
self._check_algorithm(
func=_rosenbrock,
start_point=[7, -12],
expected_argmin=[1.0, 1.0])
def test_rosenbock_7d(self):
self._check_algorithm(
func=_rosenbrock,
start_point=np.zeros(7),
expected_argmin=np.ones(7))
def test_himmelblau_v1(self):
self._check_algorithm(
func=_himmelblau,
start_point=[4, 3],
expected_argmin=[3.0, 2.0],
gtol=1e-8)
def test_himmelblau_v2(self):
self._check_algorithm(
func=_himmelblau,
start_point=[-2, 3],
expected_argmin=[-2.805118, 3.131312],
gtol=1e-8)
def test_himmelblau_v3(self):
self._check_algorithm(
func=_himmelblau,
start_point=[-3, -3],
expected_argmin=[-3.779310, -3.283186],
gtol=1e-8)
def test_himmelblau_v4(self):
self._check_algorithm(
func=_himmelblau,
start_point=[3, -1],
expected_argmin=[3.584428, -1.848126],
gtol=1e-8)
def test_mc_cormick(self):
self._check_algorithm(
func=_mc_cormick,
start_point=[0, 0],
expected_argmin=[-0.54719, -1.54719])
def test_beale(self):
self._check_algorithm(
func=_beale,
start_point=[-1.0, -1.0],
expected_argmin=[3.0, 0.5],
gtol=1e-8)
def test_himmelblau_batch_all(self):
self._check_algorithm(
func=_himmelblau,
start_point=[[1, 1], [-2, 2], [-1, -1], [1, -2]],
expected_argmin=[[3, 2], [-2.805118, 3.131312], [-3.779310, -3.283186],
[3.584428, -1.848126]],
gtol=1e-8)
def test_himmelblau_batch_any(self):
val_grad_func = tff.math.make_val_and_grad_fn(_himmelblau)
starts = tf.constant([[1, 1], [-2, 2], [-1, -1], [1, -2]], dtype=tf.float64)
expected_minima = np.array([[3, 2], [-2.805118, 3.131312],
[-3.779310, -3.283186], [3.584428, -1.848126]],
dtype=np.float64)
# Run with `converged_any` stopping condition, to stop as soon as any of
# the batch members have converged.
batch_results = tff.math.optimizer.conjugate_gradient_minimize(
val_grad_func,
initial_position=starts,
stopping_condition=tff.math.optimizer.converged_any,
tolerance=1e-8)
batch_results = self.evaluate(batch_results)
self.assertFalse(np.any(batch_results.failed)) # None have failed.
self.assertTrue(np.any(batch_results.converged)) # At least one converged.
self.assertFalse(np.all(batch_results.converged)) # But not all did.
# Converged points are near expected minima.
for actual, expected in zip(batch_results.position[batch_results.converged],
expected_minima[batch_results.converged]):
self.assertArrayNear(actual, expected, 1e-5)
self.assertEqual(batch_results.num_iterations, 7)
self.assertEqual(batch_results.num_objective_evaluations, 27)
def test_dynamic_shapes(self):
"""Can build op with dynamic shapes in graph mode."""
if tf.executing_eagerly():
return
minimum = np.array([1.0, 1.0])
scales = np.array([2.0, 3.0])
@tff.math.make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
# Test with a vector of unknown dimension.
start = tf.compat.v1.placeholder(tf.float32, shape=[None])
op = tff.math.optimizer.conjugate_gradient_minimize(
quadratic, initial_position=start, tolerance=1e-8)
self.assertFalse(op.position.shape.is_fully_defined())
with self.cached_session() as session:
results = session.run(op, feed_dict={start: [0.6, 0.8]})
self.assertTrue(results.converged)
self.assertLessEqual(_norm(results.objective_gradient), 1e-8)
self.assertArrayNear(results.position, minimum, 1e-5)
def test_multiple_functions(self):
# Define 3 independednt quadratic functions, each with its own minimum.
minima = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
func = lambda x: tf.reduce_sum(tf.square(x - minima), axis=1)
self._check_algorithm(
func=func, start_point=np.zeros_like(minima), expected_argmin=minima)
def test_float32(self):
minimum = np.array([1.0, 1.0], dtype=np.float32)
scales = np.array([2.0, 3.0], dtype=np.float32)
start = np.zeros_like(minimum)
@tff.math.make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
result = tff.math.optimizer.conjugate_gradient_minimize(
quadratic, initial_position=start)
self.assertEqual(result.position.dtype, tf.float32)
self.assertArrayNear(self.evaluate(result.position), minimum, 1e-5)
if __name__ == "__main__":
tf.test.main()
``` |
{
"source": "2trc/boilerplates",
"score": 2
} |
#### File: boilerplates/jinja2_angular/app.py
```python
from aiohttp import web
import aiohttp_jinja2
import jinja2
@aiohttp_jinja2.template('tmpl.index')
async def index(request):
return {}
app = web.Application()
app = web.Application()
app.router.add_route('GET', '/', index)
app.router.add_static('/css', './static/css')
aiohttp_jinja2.setup(app,
loader = jinja2.FileSystemLoader('./templates/'))
web.run_app(app, port=5003)
``` |
{
"source": "2tunnels/http-quest",
"score": 2
} |
#### File: http-quest/tests/conftest.py
```python
from dataclasses import dataclass
import pytest
from _pytest.fixtures import SubRequest
from starlette.applications import Starlette
from starlette.config import environ
from starlette.testclient import TestClient
environ["DEBUG"] = "False"
environ["BUGSNAG_API_KEY"] = "secret"
@dataclass
class Level:
name: str
method: str
def get_route_name(self) -> str:
return f"level:{self.name}"
def get_level_name(level: Level) -> str:
return level.name
@pytest.fixture(
params=[
Level("plain", "GET"),
Level("reverse", "GET"),
Level("base64", "GET"),
Level("header", "GET"),
Level("delete", "DELETE"),
Level("user_agent", "GET"),
Level("accept_language", "GET"),
Level("redirect", "GET"),
Level("robots", "POST"),
Level("guess_number", "POST"),
Level("mask", "POST"),
Level("finish", "GET"),
],
ids=get_level_name,
)
def level(request: SubRequest) -> Level:
param: Level = request.param
return param
@pytest.fixture
def app() -> Starlette:
from http_quest.asgi import application
return application
@pytest.fixture
def client(app: Starlette) -> TestClient:
return TestClient(app)
```
#### File: http-quest/tests/test_levels.py
```python
import pytest
from starlette import status
from starlette.applications import Starlette
from starlette.testclient import TestClient
from http_quest import passwords, secrets
from http_quest.utils import base64_decode
from .conftest import Level
def test_require_password(level: Level, app: Starlette, client: TestClient) -> None:
url = app.url_path_for(level.get_route_name())
response = client.request(level.method, url)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.text == "X-Password header is required"
def test_wrong_password(level: Level, app: Starlette, client: TestClient) -> None:
url = app.url_path_for(level.get_route_name())
response = client.request(level.method, url, headers={"X-Password": "<PASSWORD>"})
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.text == "X-Password header is wrong"
def test_plain(client: TestClient, app: Starlette) -> None:
response = client.get(
app.url_path_for("level:plain"), headers={"X-Password": passwords.PLAIN}
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"password": passwords.REVERSE}
def test_reverse(client: TestClient, app: Starlette) -> None:
response = client.get(
app.url_path_for("level:reverse"), headers={"X-Password": passwords.REVERSE}
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"drowssap": passwords.BASE64[::-1]}
def test_base64(client: TestClient, app: Starlette) -> None:
response = client.get(
app.url_path_for("level:base64"), headers={"X-Password": passwords.BASE64}
)
encoded_password = response.json()["password"]
decoded_password = base64_decode(encoded_password)
assert response.status_code == status.HTTP_200_OK
assert decoded_password == passwords.HEADERS
def test_header(client: TestClient, app: Starlette) -> None:
response = client.get(
app.url_path_for("level:header"), headers={"X-Password": passwords.HEADERS}
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"password": "<PASSWORD>"}
assert response.headers["X-Real-Password"] == passwords.DELETE
@pytest.mark.parametrize("method", ["GET", "POST", "PUT", "PATCH"])
def test_delete_method_not_allowed(
method: str, client: TestClient, app: Starlette
) -> None:
response = client.request(
method,
app.url_path_for("level:delete"),
headers={"X-Password": passwords.DELETE},
)
assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
assert response.text == "Method Not Allowed"
def test_delete(client: TestClient, app: Starlette) -> None:
response = client.delete(
app.url_path_for("level:delete"), headers={"X-Password": passwords.DELETE}
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"password": passwords.USER_AGENT}
def test_user_agent_is_not_ie_6(client: TestClient, app: Starlette) -> None:
response = client.get(
app.url_path_for("level:user_agent"),
headers={"X-Password": passwords.USER_AGENT},
)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.text == (
"Password for the next level is only available for the bravest! "
"Internet Explorer 6 users!"
)
def test_user_agent(client: TestClient, app: Starlette) -> None:
response = client.get(
app.url_path_for("level:user_agent"),
headers={
"X-Password": passwords.USER_AGENT,
"User-Agent": "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)",
},
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"password": passwords.ACCEPT_LANGUAGE}
def test_accept_language_is_not_provided(client: TestClient, app: Starlette) -> None:
response = client.get(
app.url_path_for("level:accept_language"),
headers={"X-Password": passwords.ACCEPT_LANGUAGE},
)
assert response.status_code == status.HTTP_406_NOT_ACCEPTABLE
assert response.text == "Я говорю только по русски, товарищ."
def test_accept_language_is_not_russian(client: TestClient, app: Starlette) -> None:
response = client.get(
app.url_path_for("level:accept_language"),
headers={
"X-Password": passwords.ACCEPT_LANGUAGE,
"Accept-Language": "en-US,en;q=0.5",
},
)
assert response.status_code == status.HTTP_406_NOT_ACCEPTABLE
assert response.text == "Я говорю только по русски, товарищ."
def test_accept_language(client: TestClient, app: Starlette) -> None:
response = client.get(
app.url_path_for("level:accept_language"),
headers={"X-Password": passwords.ACCEPT_LANGUAGE, "Accept-Language": "ru-RU"},
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"пароль": passwords.REDIRECT}
def test_redirect_secret_is_wrong(client: TestClient, app: Starlette) -> None:
response = client.get(
app.url_path_for("level:redirect") + "?secret=qwerty",
headers={"X-Password": passwords.REDIRECT},
)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.text == "Secret is wrong."
def test_redirect(client: TestClient, app: Starlette) -> None:
response = client.get(
app.url_path_for("level:redirect"), headers={"X-Password": passwords.REDIRECT}
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"password": <PASSWORD>}
assert len(response.history) == 20
def test_robots_secret_is_missing(client: TestClient, app: Starlette) -> None:
response = client.post(
app.url_path_for("level:robots"), headers={"X-Password": passwords.ROBOTS}
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json() == {
"errors": {"secret": ["Missing data for required field."]}
}
def test_robots_secret_is_wrong(client: TestClient, app: Starlette) -> None:
response = client.post(
app.url_path_for("level:robots"),
headers={"X-Password": <PASSWORD>.ROBOTS},
json={"secret": "foobar"},
)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.text == "Secret is wrong, human."
def test_robots(client: TestClient, app: Starlette) -> None:
response = client.post(
app.url_path_for("level:robots"),
headers={"X-Password": <PASSWORD>},
json={"secret": secrets.ROBOTS},
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"password": passwords.GUESS_NUMBER}
def test_guess_number_is_missing(client: TestClient, app: Starlette) -> None:
response = client.post(
app.url_path_for("level:guess_number"),
headers={"X-Password": passwords.GUESS_NUMBER},
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json() == {
"errors": {"number": ["Missing data for required field."]}
}
def test_guess_number_is_not_a_valid_integer(
client: TestClient, app: Starlette
) -> None:
response = client.post(
app.url_path_for("level:guess_number"),
headers={"X-Password": <PASSWORD>.GUESS_NUMBER},
json={"number": "foobar"},
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json() == {"errors": {"number": ["Not a valid integer."]}}
@pytest.mark.parametrize("number", [0, 1001])
def test_guess_number_invalid_range(
number: int, client: TestClient, app: Starlette
) -> None:
response = client.post(
app.url_path_for("level:guess_number"),
headers={"X-Password": <PASSWORD>.GUESS_NUMBER},
json={"number": number},
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json() == {
"errors": {
"number": [
"Must be greater than or equal to 1 and less than or equal to 1000."
]
}
}
def test_guess_number_is_wrong(client: TestClient, app: Starlette) -> None:
response = client.post(
app.url_path_for("level:guess_number"),
headers={"X-Password": <PASSWORD>.GUESS_NUMBER},
json={"number": 100},
)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.text == "Number is wrong."
def test_guess_number(client: TestClient, app: Starlette) -> None:
response = client.post(
app.url_path_for("level:guess_number"),
headers={"X-Password": <PASSWORD>.GUESS_NUMBER},
json={"number": 372},
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"password": <PASSWORD>.MASK}
def test_mask_secret_is_missing(client: TestClient, app: Starlette) -> None:
response = client.post(
app.url_path_for("level:mask"), headers={"X-Password": passwords.MASK}
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert response.json() == {
"errors": {"secret": ["Missing data for required field."]}
}
def test_mask_secret_is_wrong(client: TestClient, app: Starlette) -> None:
response = client.post(
app.url_path_for("level:mask"),
headers={"X-Password": passwords.MASK},
json={"secret": "eeeeeeeeeeeeeeeeeeee"},
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"password": "***<PASSWORD>***********"}
def test_mask(client: TestClient, app: Starlette) -> None:
response = client.post(
app.url_path_for("level:mask"),
headers={"X-Password": passwords.MASK},
json={"secret": secrets.MASK},
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"password": <PASSWORD>}
def test_finish(client: TestClient, app: Starlette) -> None:
response = client.get(
app.url_path_for("level:finish"), headers={"X-Password": <PASSWORD>.FINISH},
)
assert response.status_code == status.HTTP_200_OK
assert "You have completed the very last level of HTTP quest." in response.text
```
#### File: http-quest/tests/test_utils.py
```python
import pytest
from http_quest.utils import base64_decode, base64_encode, get_masked_password
def test_base64_encode() -> None:
assert base64_encode("foo") == "Zm9v"
assert base64_encode("bar") == "YmFy"
assert (
base64_encode("Beautiful is better than ugly.")
== "QmVhdXRpZnVsIGlzIGJldHRlciB0aGFuIHVnbHku"
)
def test_base64_decode() -> None:
assert base64_decode("Zm9v") == "foo"
assert base64_decode("YmFy") == "bar"
assert (
base64_decode("QmVhdXRpZnVsIGlzIGJldHRlciB0aGFuIHVnbHku")
== "Beautiful is better than ugly."
)
def test_get_masked_password_password_is_bigger_than_secret() -> None:
with pytest.raises(ValueError) as excinfo:
get_masked_password("mark", "<PASSWORD>", "<PASSWORD>")
assert str(excinfo.value) == "Password and secret should be the same length"
def test_get_masked_password_secret_is_bigger_than_password() -> None:
with pytest.raises(ValueError) as excinfo:
get_masked_password("jon", "mark", "mark")
assert str(excinfo.value) == "Password and secret should be the same length"
def test_get_masked_password() -> None:
assert get_masked_password("mark", "<PASSWORD>", "<PASSWORD>") == "mark"
assert get_masked_password("mark", "alex", "<PASSWORD>") == "mar*"
assert get_masked_password("mark", "alex", "<PASSWORD>") == "*a*k"
assert get_masked_password("mark", "alex", "<PASSWORD>") == "****"
assert get_masked_password("mark", "alex", "<PASSWORD>") == "****"
assert get_masked_password("mark", "alex", "<PASSWORD>") == "****"
``` |
{
"source": "2tunnels/typogen",
"score": 4
} |
#### File: typogen/typogen/__init__.py
```python
import re
def skip_letter(text):
"""Skip letter in text.
Examples:
>>> skip_letter('cat')
{'ca', 'ct', 'at'}
>>> skip_letter('frog')
{'rog', 'fro', 'fog', 'frg'}
:type text: str
:rtype: set
"""
text = _clean_text(text)
typos = []
for i in range(len(text)):
if text[i].isspace():
continue
typo = '{head}{tail}'.format(
head=text[0:i],
tail=text[i + 1:]
)
typos.append(typo)
return set(typos)
def double_letter(text):
"""Double letter in text.
Examples:
>>> double_letter('cat')
{'ccat', 'caat', 'catt'}
>>> double_letter('frog')
{'frogg', 'ffrog', 'frrog', 'froog'}
:type text: str
:rtype: set
"""
text = _clean_text(text)
typos = []
for i in range(len(text)):
if text[i].isspace():
continue
typo = '{head}{letter}{tail}'.format(
head=text[0:i + 1],
letter=text[i],
tail=text[i + 1:]
)
typos.append(typo)
return set(typos)
def reverse_letters(text):
"""Reverse letters in text.
Examples:
>>> reverse_letters('cat')
{'cta', 'act'}
>>> reverse_letters('frog')
{'rfog', 'forg', 'frgo'}
:type text: str
:rtype: set
"""
text = _clean_text(text)
typos = []
for i in range(len(text) - 1):
first_letter = text[i]
second_letter = text[i + 1]
# do not reverse if the same letter
if first_letter == second_letter:
continue
typo = '{head}{second}{first}{tail}'.format(
head=text[0:i],
second=second_letter,
first=first_letter,
tail=text[i + 2:]
)
typos.append(typo)
return set(typos)
def skip_spaces(text):
"""Skip spaces in text.
Examples:
>>> skip_spaces('blue invisible unicorn')
{'blueinvisible unicorn', 'blue invisibleunicorn'}
:type text: str
:rtype: set
"""
text = _clean_text(text)
typos = []
start = 0
while True:
try:
index = text.index(' ', start)
except ValueError:
break
typo = '{head}{tail}'.format(
head=text[0:index],
tail=text[index + 1:]
)
typos.append(typo)
start = index + 1
return set(typos)
def nearest_keys(key):
"""Returns nearest keys to provided one.
:type key: str
:rtype: list
"""
key = str(key)
normalized_key = key.lower()
try:
keys = _nearest_keys[normalized_key]
except KeyError:
return key
if key.isupper():
return [k.upper() for k in keys]
return keys
def missed_key(text):
"""Misses needed key as if was pushed nearest ones.
:type text: str
:rtype: set
"""
text = _clean_text(text)
typos = []
for i in range(len(text)):
if text[i].isspace():
continue
for key in nearest_keys(text[i]):
typos.append(text[0:i] + key + text[i + 1:])
return set(typos)
def inserted_key(text):
"""Inserts nearest keys before and after needed one.
:type text: str
:rtype: set
"""
text = _clean_text(text)
typos = []
for i in range(len(text)):
if text[i].isspace():
continue
for key in nearest_keys(text[i]):
typos.append(text[0:i] + key + text[i] + text[i + 1:])
typos.append(text[0:i] + text[i] + key + text[i + 1:])
return set(typos)
# http://tools.seobook.com/spelling/keywords-typos.cgi
_nearest_keys = {
'1': ['2', 'q'],
'2': ['3', 'w', 'q', '1'],
'3': ['4', 'e', 'w', '2'],
'4': ['5', 'r', 'e', '3'],
'5': ['6', 't', 'r', '4'],
'6': ['7', 'y', 't', '5'],
'7': ['8', 'u', 'y', '6'],
'8': ['9', 'i', 'u', '7'],
'9': ['0', 'o', 'i', '8'],
'0': ['p', 'o', '9'],
'q': ['1', '2', 'w', 's', 'a'],
'w': ['q', '2', '3', 'e', 'd', 's', 'a'],
'e': ['w', '3', '4', 'r', 'f', 'd', 's'],
'r': ['e', '4', '5', 't', 'g', 'f', 'd'],
't': ['r', '5', '6', 'y', 'h', 'g', 'f'],
'y': ['t', '6', '7', 'u', 'j', 'h', 'g'],
'u': ['y', '7', '8', 'i', 'k', 'j', 'h'],
'i': ['u', '8', '9', 'o', 'l', 'k', 'j'],
'o': ['i', '9', '0', 'p', 'l', 'k'],
'p': ['o', '0', 'l'],
'a': ['q', 'w', 's', 'x', 'z'],
's': ['a', 'w', 'e', 'd', 'x', 'z'],
'd': ['s', 'e', 'r', 'f', 'c', 'x'],
'f': ['d', 'r', 't', 'g', 'v', 'c'],
'g': ['f', 't', 'y', 'h', 'b', 'v'],
'h': ['g', 'y', 'u', 'j', 'n', 'b'],
'j': ['h', 'u', 'i', 'k', 'm', 'n'],
'k': ['j', 'i', 'o', 'l', 'm'],
'l': ['k', 'o', 'p'],
'z': ['a', 's', 'x'],
'x': ['z', 's', 'd', 'c'],
'c': ['x', 'd', 'f', 'v'],
'v': ['c', 'f', 'g', 'b'],
'b': ['v', 'g', 'h', 'n'],
'n': ['b', 'h', 'j', 'm'],
'm': ['n', 'j', 'k'],
}
def _clean_text(text):
"""Strips text and removes duplicate spaces from it.
:type text: str
:rtype: str
"""
text = text.strip()
text = re.sub(r'\s+', ' ', text)
return text
``` |
{
"source": "2uanj1e/GadgetFinder",
"score": 2
} |
#### File: src/gadgetfinder/utils.py
```python
import capstone as ct
import keystone as kt
ARCH_DIC = {'x86':[ct.CS_ARCH_X86, kt.KS_ARCH_X86],
'arm':[ct.CS_ARCH_ARM, kt.KS_ARCH_ARM],
'arm64':[ct.CS_ARCH_ARM64, kt.KS_ARCH_ARM64],
'mips':[ct.CS_ARCH_MIPS, kt.KS_ARCH_MIPS]
}
MOD_DIC = {'16':[ct.CS_MODE_16, kt.KS_MODE_16],
'32':[ct.CS_MODE_32, kt.KS_MODE_32],
'64':[ct.CS_MODE_64, kt.KS_MODE_64],
'arm':[ct.CS_MODE_ARM, kt.KS_MODE_ARM],
'bigendian':[ct.CS_MODE_BIG_ENDIAN, kt.KS_MODE_BIG_ENDIAN],
'littleendian':[ct.CS_MODE_LITTLE_ENDIAN, kt.KS_MODE_LITTLE_ENDIAN]
}
def get_ct_arch(arch_str):
arch = ARCH_DIC.get(arch_str, None)
if arch:
return arch[0]
else:
return None
def get_ct_mod(mod_str):
mod = MOD_DIC.get(mod_str, None)
if mod:
return mod[0]
else:
return None
def get_kt_arch(arch_str):
arch = ARCH_DIC.get(arch_str, None)
if arch:
return arch[1]
else:
return None
def get_kt_mod(mod_str):
mod = MOD_DIC.get(mod_str, None)
if mod:
return mod[1]
else:
return None
def page(str, keywords=[], lines=25):
for k in keywords:
str = str.replace(k, highlight(k))
text = str.split('\n')
length = len(text)
for linenum in range(length):
print(text[linenum])
if linenum % lines == 0 and linenum >= lines:
key = input('--More-- (%d/%d)' % (linenum-1, length))
if key == 'q':
break
# linux ansicolor highlighting
def highlight(word, color='green'):
output = ""
suffix = "\033[0m"
if color == "green":
prefix = "\033[1;32m"
output = prefix + word + suffix
return output
``` |
{
"source": "2uinc/tenable2jira",
"score": 2
} |
#### File: tenable2jira/tenable-export-report-27/main.py
```python
from __future__ import print_function
from tenable_io.client import TenableIOClient
from tenable_io.api.scans import ScanExportRequest
import boto3
import os
import lxml.html
folder_id = os.environ['TENABLE_FOLDER_ID']
s3_bucket = os.environ['S3_BUCKET']
s3_path = os.environ['S3_PATH']
client = TenableIOClient()
s3 = boto3.client('s3')
def getAllScans(folder_id):
""" Gets all scans in a given scan folder and returns a list of scan id's. """
scan_list = []
scans = client.scan_helper.scans(folder_id=folder_id)
for scan in scans:
if scan.status() != 'completed':
continue
scan_list.append(scan.id)
return scan_list
def exportAllScansS3(folder_id):
""" Exports all Tenable scans found in a folder to S3. """
scan_list = []
scans = client.scan_helper.scans(folder_id=folder_id)
for scan in scans:
if scan.status() != 'completed':
continue
scan.download("./%s.html" % scan.details().info.name, format='html')
scan_list.append(scan.id)
return scan_list
def updateLinkHashes(filename):
""" Replaces all links in the given file with tag's text. """
document = lxml.html.parse(filename)
elements = document.xpath('//a')
for element in elements:
if len(element.attrib.values()) < 2:
element.attrib.update({'href': "#%s" % element.text})
h2elements = document.xpath('//h2')
for element in h2elements:
if 'id' in element.attrib.keys():
element.attrib.update({'id': "%s" % element.text})
document.write(filename, method='html')
def exportScanS3(group):
""" Download html report from Tenable for given scan group and push to S3 bucket. """
scan = client.scan_helper.scans(name=group)
if len(scan) < 1:
return "Scan group '%s' not found in Tenable" % group
if len(scan) > 1:
return "More than 1 scan group found for %s, check Tenable agent scans configs." % group
if scan[0].status() == 'completed':
scan[0].download("/tmp/%s.html" % scan[0].details().info.name, format=ScanExportRequest.FORMAT_HTML, chapter=ScanExportRequest.CHAPTER_EXECUTIVE_SUMMARY)
updateLinkHashes("/tmp/%s.html" % group)
s3.upload_file("/tmp/%s.html" % group, s3_bucket, "%s/%s.html" % (s3_path, group), ExtraArgs={'ContentType': 'text/html'})
return "success"
return "Something went wrong while exporting scan group %s" % group
def lambda_handler(event, context):
group = event['Records'][0]['Sns']['Message']
return exportScanS3(group)
```
#### File: tenable2jira/tenable-to-jira/main.py
```python
from __future__ import print_function
import requests
import json
import os
from tenable_io.client import TenableIOClient
import boto3
import argparse
import sys
import urllib.parse
jira_url = os.environ['JIRA_URL']
jira_auth = (os.environ['JIRA_USER'], os.environ['JIRA_PASSWORD'])
jira_project = os.environ['JIRA_PROJECT']
json_header = {'Content-Type': 'application/json'}
s3_url = os.environ['S3_URL']
aws_account_id = os.environ['AWS_ACCOUNT_ID']
client = TenableIOClient()
# custom jira fields
hostname_field = os.environ['HOSTNAME_FIELD']
source_field = os.environ['SOURCE_FIELD']
severity_field = os.environ['SEVERITY_FIELD']
os_field = os.environ['OS_FIELD']
vulnerability_field = os.environ['VULNERABILITY_FIELD']
epic_field = os.environ['EPIC_FIELD']
epic_link_field = os.environ['EPIC_LINK_FIELD']
def sendSNSMessage(msg):
""" Sends a message to the tenable SNS topic. """
client = boto3.client('sns')
response = client.publish(
TargetArn="arn:aws:sns:us-west-2:%s:tenable-export-report" % aws_account_id,
Message=msg
)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
return False
return True
def checkJiraAuth():
""" Check authentication to Jira is successful. """
response = requests.get("%s/mypermissions?projectKey=%s" % (jira_url, jira_project), auth=jira_auth)
if response.status_code != 200:
print("Unable to authenticate to Jira. Have you checked: username/password combination, is the user locked out of Jira, and user permissions?")
return False
for permission in ['CREATE_ISSUES', 'CLOSE_ISSUES', 'ADD_COMMENTS', 'RESOLVE_ISSUES', 'TRANSITION_ISSUES', 'EDIT_ISSUES']:
if response.json()['permissions'][permission]['havePermission'] is False:
print("Permissions %s missing for Jira user. Check user groups in Jira" % permission)
return False
return True
def addJiraLink(issue_id, url, title):
""" Adds a link to the given jira issue with url and title. """
payload = {
"globalId": url,
"application": {},
"object": {
"url": url,
"title": title,
}
}
response = requests.post("%s/issue/%s/remotelink" % (jira_url, issue_id),
data=json.dumps(payload),
headers=json_header,
auth=jira_auth)
if not response.ok:
print(response.content)
return False
else:
if response.status_code is 201:
# delete old link
links = requests.get(jira_url + "/issue/%s/remotelink" % issue_id, auth=jira_auth).json()
for link in links:
if link.get('globalId') != url:
response = requests.delete("%s/issue/%s/remotelink/%s" % (jira_url, issue_id, link.get('id')),
headers=json_header,
auth=jira_auth)
print("Updated link: %s" % (issue_id))
return True
def createJiraEpic(group):
""" Checks if an epic exists for a given group and creates it if it doesn't """
tickets = getTickets("""
issuetype = Epic and
Source = tenable and
'Epic Name' = %s and
status != closed
order by created desc
""" % group)
if len(tickets['issues']) > 0:
issue_id = tickets['issues'][0]['key']
else:
payload = {
"fields": {
"project": {"key": jira_project},
"summary": "%s Vulnerability Epic" % group.capitalize(),
"description": """
This is a vulnerability epic for the %s group. This epic will contain
all of the tickets for vulnerable hosts that belong to this group.
""" % group,
"issuetype": {
"name": "Epic"
},
source_field: ["tenable"],
"components": [{"name": group}],
epic_field: group
}
}
response = requests.post("%s/issue/" % jira_url, data=json.dumps(payload), headers=json_header, auth=jira_auth)
if not response.ok:
print(response.content)
return False
else:
issue_id = response.json()['key']
print("Created epic: %s - %s" % (group, issue_id))
return issue_id
def updateJiraHostTask(hostname, group, priority, operating_system):
""" Updates a jira task for a host based on scan results. Opens new ticket if one doesn't exist. """
tickets = getTickets("""
issuetype = Vulnerability and
Source = tenable and
Hostname = %s and
component = %s
order by created desc
""" % (hostname, group))
if len(tickets['issues']) > 0:
issue_id = tickets['issues'][0]['key']
if tickets['issues'][0]['fields']['priority']['id'] != priority:
if updateJiraPriority(issue_id, priority):
print("Updated priority %s : %s" % (issue_id, priority))
else:
if priority:
issue_id = createJiraHostTask(hostname, group, priority, operating_system)
else:
return False
return issue_id
def updateJiraPriority(issue_id, priority):
""" Updates the priority of a given issue in Jira. """
payload = {
"update": {
"priority":
[{"set": {"id": priority}}]
}
}
response = requests.put("%s/issue/%s" % (jira_url, issue_id),
data=json.dumps(payload),
headers=json_header,
auth=jira_auth)
if response.status_code != 204:
return False
return True
def createJiraHostTask(hostname, group, priority, operating_system):
""" Opens a jira task for given host and return the issue key. """
epic_link = createJiraEpic(group)
payload = {
"fields": {
"project": {"key": jira_project},
"summary": "Vulnerable Host: %s" % hostname,
"description": """
Security vulnerabilities were found on host %s. View the attached link for a detailed report of the vulnerabilities and their remediation steps.
h3.Expectations
Complete the remediation for each vulnerability
h3.Process for each sub-task
* Move the ticket to Start Progress when work is started
* Move the ticket to Notify Support if you require help from the Security team
* Move the ticket to Notify Review Process when work is completed
""" % hostname,
"issuetype": {
"name": "Vulnerability"
},
hostname_field: [hostname],
source_field: ["tenable"],
"components": [{"name": group}],
"priority": {"id": priority},
os_field: operating_system,
epic_link_field: epic_link
}
}
response = requests.post("%s/issue/" % jira_url,
data=json.dumps(payload),
headers=json_header,
auth=jira_auth)
if not response.ok:
print(response.content)
return False
else:
print("Created: %s - %s - %s" % (group, hostname, response.json()['key']))
return response.json()['key']
def getTickets(search_string):
""" returns all tickets for a jql search string using pagination. """
done = False
startAt = 0
tickets = {}
search_string_encoded = urllib.parse.quote_plus(search_string)
while not done:
more_tickets = requests.get(
jira_url + "/search?jql=" + search_string_encoded + "&startAt=" + str(startAt),
auth=jira_auth).json()
try:
tickets['issues'].extend(more_tickets['issues'])
except Exception as e:
tickets.update(more_tickets)
if (more_tickets['total'] - more_tickets['startAt']) <= more_tickets['maxResults']:
done = True
else:
startAt += more_tickets['maxResults']
return tickets
def getSubtask(hostname, vulnerability):
""" Checks if a ticket exists for a given vulnerability and host and returns the ticket object. """
tickets = getTickets("""
issuetype = Sub-task and
Hostname = %s and
source = tenable and
Vulnerability ~ '%s'
order by created desc
""" % (hostname, vulnerability))
if len(tickets['issues']) > 0:
return tickets['issues'][0]
return False
def updateSubtasks(parent_ticket, group, hostname, vulnerabilities):
""" Create and close subtasks for vulnerabilities found and no longer found on a host. """
tickets = getTickets("""
issuetype = Sub-task and
parent = %s and
source = tenable and
status != closed
""" % parent_ticket)
updatedTickets = []
for ticket in tickets['issues']:
updatedTickets.append(ticket['key'])
for vulnerability in vulnerabilities:
if vulnerability.severity >= 2:
issue = getSubtask(hostname, vulnerability.plugin_name)
if not issue:
issue_id = createJiraSubtask(parent_ticket, vulnerability, group)
addJiraLink(issue_id,
"https://www.tenable.com/plugins/nessus/%s" % vulnerability.plugin_id,
"Vulnerability Report - %s" % vulnerability.plugin_name)
else:
if issue['fields']['status']['name'].lower() == 'closed':
reopenJiraTicket(issue['key'])
else:
updatedTickets.remove(issue['key'])
for ticket in updatedTickets:
closeJiraTicket(ticket)
return True
def createJiraSubtask(parent_ticket, vulnerability, group):
""" Opens a jira ticket in the given project and returns the issue key. """
if 'ubuntu' in vulnerability.plugin_family.lower():
vuln_name = vulnerability.plugin_name.split(':')[1].strip()
else:
vuln_name = vulnerability.plugin_name
# map tenable vulnerability score to jira fields
severity = {
2: "<img src=\"%s/images/medium.png\" alt=\"Medium Severity\" height=\"25\" width=\"50\">" % s3_url,
3: "<img src=\"%s/images/high.png\" alt=\"High Severity\" height=\"25\" width=\"50\">" % s3_url,
4: "<img src=\"%s/images/critical.png\" alt=\"Critical Severity\" height=\"25\" width=\"50\">" % s3_url,
}
priority = {
2: '3',
3: '2',
4: '1',
}
payload = {
"fields": {
"project": {"key": jira_project},
"parent": {"key": parent_ticket},
"summary": vuln_name,
"description": """
Vulnerability: %s was found on host %s. View the attached link for a detailed report of the vulnerability and remediation steps.
h3.Process
* See parents task for detailed host report
* See attacked link for detailed vulnerability report
* Move to in progress when work is started
* Move to Notify Support if you require help from Security team
* Move to Notify Review Process when remediation is completed
""" % (vuln_name, vulnerability.hostname),
"issuetype": {
"name": "Sub-task"
},
"components": [{"name": group}],
source_field: ["tenable"],
hostname_field: [vulnerability.hostname],
severity_field: {"value": severity[vulnerability.severity]},
vulnerability_field: vulnerability.plugin_name,
"priority": {"id": priority[vulnerability.severity]},
}
}
response = requests.post("%s/issue/" % jira_url,
data=json.dumps(payload),
headers=json_header,
auth=jira_auth)
if not response.ok:
print(response.content)
return False
else:
print("Created sub-task %s" % response.json()['key'])
return response.json()['key']
def closeJiraTicket(issue_id):
""" Closes a given jira ticket if one exists. """
payload = {
"update": {
"comment": [
{
"add": {
"body": "This vulnerability wasn't found in the latest scan, closing ticket."
}
}
]
},
"transition": {
"id": "51"
}
}
response = requests.post("%s/issue/%s/transitions?expand=transitions.fields" % (jira_url, issue_id),
data=json.dumps(payload),
headers=json_header,
auth=jira_auth)
if not response.ok:
print(response.content)
return False
print("Closed sub-task %s" % issue_id)
return True
def reopenJiraTicket(issue_id):
""" Reopen a given jira ticket if one exists. """
payload = {
"update": {
"comment": [
{
"add": {
"body": "A vulnerability was found in the latest scan, reopening ticket."
}
}
]
},
"transition": {
"id": "61"
}
}
response = requests.post("%s/issue/%s/transitions?expand=transitions.fields" % (jira_url, issue_id),
data=json.dumps(payload),
headers=json_header,
auth=jira_auth)
if not response.ok:
print(response.content)
return False
print("Reopened issue %s" % issue_id)
return True
def updateScan(scan_name):
""" Updates tickets and reports for a given tenable scan name. """
scan = client.scan_helper.scans(name=scan_name)[0]
if scan.status() != 'completed':
return False
details = scan.details()
group = details.info.name
print("Updating Group: %s" % group)
for host in details.hosts:
priority = None
if host.critical > 0:
priority = '1'
elif host.high > 0:
priority = '2'
elif host.medium > 0:
priority = '3'
host_details = client.scans_api.host_details(scan.id, host.host_id)
try:
parent_ticket = updateJiraHostTask(host.hostname,
group,
priority,
host_details.info.as_payload()['operating-system'][0])
updateSubtasks(parent_ticket, group, host.hostname, host_details.vulnerabilities)
except Exception as e:
pass
return True
def main():
parser = argparse.ArgumentParser(description='Run tenable to jira.')
parser.add_argument('-s', '--scan', help='Tenable scan name')
parser.add_argument('-sq', '--sqs_body', help='Message received from SQS queue')
args = parser.parse_args()
if not checkJiraAuth():
sys.exit("Exiting... Jira auth check failed")
if args.sqs_body:
body = json.loads(args.sqs_body)
name = json.loads(body['Message'])['mail']['commonHeaders']['subject'].split(':')[-1].strip()
else:
name = args.scan
updateScan(name)
with open('scan.txt', 'w') as fh:
fh.write(name)
return "success"
def lambda_handler(event, context):
if not checkJiraAuth():
sys.exit("Exiting... Jira auth check failed")
name = event['Records'][0]['ses']['mail']['commonHeaders']['subject'].split(':')[-1].strip()
updateScan(name)
return "success"
if __name__ == "__main__":
main()
``` |
{
"source": "2U-maker/Paddle-AAE",
"score": 2
} |
#### File: Paddle-AAE/aae/aae.py
```python
import numpy as np
import math
import itertools
import paddle
import paddle.vision.transforms as T
import paddle.nn.functional as F
import paddle.nn as nn
def reparameterization(mu, logvar, latent_dim):
std = paddle.exp(logvar / 2)
sampled_z = paddle.to_tensor(np.random.normal(0, 1, (mu.shape[0], latent_dim)))
z = sampled_z * std + mu
return z
class Encoder(nn.Layer):
def __init__(self, img_shape, latent_dim):
super(Encoder, self).__init__()
self.latent_dim = latent_dim
self.model = nn.Sequential(
nn.Linear(int(np.prod(img_shape)), 512),
nn.LeakyReLU(0.2),
nn.Linear(512, 512),
nn.BatchNorm1D(512),
nn.LeakyReLU(0.2),
)
self.mu = nn.Linear(512, latent_dim)
self.logvar = nn.Linear(512, latent_dim)
def forward(self, img):
img_flat = paddle.reshape(img, (img.shape[0], -1))
x = self.model(img_flat)
m = self.mu(x)
logvar = self.logvar(x)
z = reparameterization(m, logvar, self.latent_dim)
return z
class Decoder(nn.Layer):
def __init__(self, img_shape, latent_dim):
super(Decoder, self).__init__()
self.img_shape = img_shape
self.model = nn.Sequential(
nn.Linear(latent_dim, 512),
nn.LeakyReLU(0.2),
nn.Linear(512, 512),
nn.BatchNorm1D(512),
nn.LeakyReLU(0.2),
nn.Linear(512, int(np.prod(img_shape))),
nn.Tanh(),
)
def forward(self, z):
img_flat = self.model(z)
img = img_flat.reshape((img_flat.shape[0], *self.img_shape)) # NCHW
return img
class Discriminator(nn.Layer):
def __init__(self, latent_dim):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(latent_dim, 512),
nn.LeakyReLU(0.2),
nn.Linear(512, 256),
nn.LeakyReLU(0.2),
nn.Linear(256, 1),
nn.Sigmoid(),
)
def forward(self, z):
validity = self.model(z)
return validity
``` |
{
"source": "2U-maker/Paddle-CycleMLP",
"score": 2
} |
#### File: 2U-maker/Paddle-CycleMLP/create.py
```python
import paddle
from cycle_mlp import *
from ppcls.arch.backbone.model_zoo.regnet import *
def create_model(model_name,
pretrained=False,
is_teacher=False,
**kwargs):
if is_teacher:
model = eval(model_name)(pretrained=pretrained, use_ssld=False, **kwargs)
else:
model = eval(model_name)(**kwargs)
if pretrained and os.path.exists(pretrained):
model.set_state_dict(paddle.load(pretrained))
return model
def optimizer_kwargs(cfg):
""" cfg/argparse to kwargs helper
Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn.
"""
kwargs = dict()
kwargs['beta1'] = cfg.opt_beta1 if cfg.opt_beta1 != None else 0.9
kwargs['beta2'] = cfg.opt_beta2 if cfg.opt_beta2 != None else 0.999
kwargs['epsilon'] = cfg.opt_eps
kwargs['weight_decay'] = cfg.weight_decay
return kwargs
def scheduler_kwargs(cfg):
kwargs = dict()
kwargs['learning_rate'] = cfg.lr
kwargs['T_max'] = cfg.t_max
kwargs['eta_min'] = cfg.eta_min
kwargs['last_epoch'] = cfg.last_epoch
return kwargs
def create_optimizer_scheduler(cfg, model):
opt = cfg.opt
sched = cfg.sched
assert opt == 'AdamW', 'Currently, only AdamW is supported !'
assert sched == 'CosineAnnealingDecay', 'Currently, only CosineAnnealingDecay is supported !'
clip_grad = cfg.clip_grad
if clip_grad != None:
clip_grad = paddle.nn.ClipGradByNorm(clip_grad)
opt_kwargs = optimizer_kwargs(cfg)
sched_kwargs = scheduler_kwargs(cfg)
scheduler = paddle.optimizer.lr.CosineAnnealingDecay(**sched_kwargs)
optimizer = paddle.optimizer.AdamW(learning_rate=scheduler, parameters=model.parameters(), grad_clip=clip_grad, **opt_kwargs)
return optimizer, scheduler
```
#### File: 2U-maker/Paddle-CycleMLP/dataset.py
```python
import paddle
import paddle.vision.transforms as T
import numpy as np
from PIL import Image
from ppcls.data.preprocess.ops.autoaugment import ImageNetPolicy
from ppcls.data.preprocess.ops.random_erasing import RandomErasing
from utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
class CycleMLPdataset(paddle.io.Dataset):
def __init__(self, img_dir, txtpath, mode='train', transform=None):
"""
Image classification reading class
args:
img_dir: Image folder.
txtpath: TXT file path.
transform: Data enhancement
"""
super(CycleMLPdataset, self).__init__()
assert mode in ['train', 'val', 'test'], "mode is one of ['train', 'val', 'test]"
self.mode = mode
self.transform = transform
self.data = []
with open(txtpath, 'r') as f:
for line in f.readlines():
if mode != 'test':
img_path, label = line.strip().split(' ')
self.data.append([img_dir + '/' + img_path, label])
else:
self.data.append(img_dir + '/' + line.strip())
def __getitem__(self, idx):
if self.mode != 'test':
img = Image.open(self.data[idx][0]).convert('RGB')
label = self.data[idx][1]
if self.transform:
img = self.transform(img)
return img.astype('float32'), np.array(label, dtype='int64')
else:
img = Image.open(self.data[idx]).convert('RGB')
if self.transform:
img = self.transform(img)
return img.astype('float32')
def __len__(self):
return len(self.data)
def build_transfrom(is_train, args):
transform = []
resize_im = args.input_size > 32
if is_train:
transform.extend([
T.RandomResizedCrop(size=args.input_size, interpolation=args.train_interpolation),
T.RandomHorizontalFlip(),
ImageNetPolicy(),
T.ColorJitter(*([args.color_jitter]*3)),
T.ToTensor(),
T.Normalize(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
RandomErasing(EPSILON=args.reprob, mean=IMAGENET_DEFAULT_MEAN)
])
if not resize_im:
transform.append(T.RandomCrop(args.input_size, padding=4))
else:
if resize_im:
size = int((256 / 224) * args.input_size)
transform.append(T.Resize(size, interpolation=args.train_interpolation))
transform.append(T.CenterCrop(size=args.input_size))
transform.extend([
T.ToTensor(),
T.Normalize(mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD)
])
return T.Compose(transform)
```
#### File: 2U-maker/Paddle-CycleMLP/eval.py
```python
import argparse
import paddle
import json
import time
import datetime
from pathlib import Path
import os
from engine import evaluate
from dataset import CycleMLPdataset, build_transfrom
from create import create_model
def get_args_parser():
parser = argparse.ArgumentParser('CycleMLP evaluation script', add_help=False)
parser.add_argument('--batch-size', default=64, type=int)
# Model parameters
parser.add_argument('--model', default='CycleMLP_B1', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--num-classes', type=int, default=1000,
help='number of categories')
parser.add_argument('--model-pretrained', type=str, default='',
help='local model parameter path')
# Augmentation parameters
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
# Dataset parameters
parser.add_argument('--val-data-dir', default='./', type=str, help='image folder path')
parser.add_argument('--val-txt-path', default='./val.txt', type=str,
help='image file name and label information file')
parser.add_argument('--val-data-mode', default='val', type=str,
help="one of ['train', 'val', 'test'], the TXT file whether contains labels")
parser.add_argument('--num_workers', default=0, type=int)
parser.add_argument('--output_dir', default='./output',
help='path where to save, empty for no saving')
return parser
def main(args):
# 构建数据集
val_transform = build_transfrom(is_train=False, args=args)
val_dataset = CycleMLPdataset(args.val_data_dir, args.val_txt_path, mode=args.val_data_mode, transform=val_transform)
data_loader_val = paddle.io.DataLoader(
dataset=val_dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
drop_last=False
)
# 构建模型
print(f"Creating model: {args.model}")
max_accuracy = 0
model = create_model(
args.model,
pretrained=args.model_pretrained,
is_teacher=False,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path)
log_path = args.output_dir + f'/{args.model}_eval_log.txt'
if os.path.exists(log_path):
os.remove(log_path)
# 验证
start_time = time.time()
test_stats = evaluate(data_loader_val, model, log_path)
print(f"Accuracy of the network on the {len(val_dataset)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
log_stats = {**{f'test_{k}': v for k, v in test_stats.items()}}
if args.output_dir:
with open(log_path, 'a') as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Val time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('CycleMLP evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
```
#### File: 2U-maker/Paddle-CycleMLP/train.py
```python
import argparse
import datetime
import os
import time
import paddle
import paddle.distributed as dist
import json
from pathlib import Path
from engine import train_one_epoch, evaluate
import utils
from dataset import CycleMLPdataset, build_transfrom
from losses import DistillationLoss, SoftTargetCrossEntropy, LabelSmoothingCrossEntropy
from data import Mixup
from create import create_model, create_optimizer_scheduler
def get_args_parser():
parser = argparse.ArgumentParser('CycleMLP training and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=64, type=int)
parser.add_argument('--epochs', default=300, type=int)
# Model parameters
parser.add_argument('--model', default='CycleMLP_B1', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--input-size', default=224, type=int, help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--num-classes', type=int, default=1000,
help='number of categories')
parser.add_argument('--model-pretrained', type=str, default='',
help='local model parameter path')
# Optimizer parameters
parser.add_argument('--opt', default='AdamW', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "AdamW"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-beta1', default=None, type=float, nargs='+', metavar='BETA1',
help='Optimizer Beta1 (default: None, use opt default)')
parser.add_argument('--opt-beta2', default=None, type=float, nargs='+', metavar='BETA2',
help='Optimizer Beta1 (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='CosineAnnealingDecay', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "CosineAnnealingDecay"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--t-max', default=300, type=int,
help='the upper limit for training is half the cosine decay period, the default equal epochs')
parser.add_argument('--eta-min', default=0, type=float,
help='the minimum value of the learning rate is ηmin in the formula, the default value is 0')
parser.add_argument('--last-epoch', default=-1, type=int,
help='the epoch of the previous round is set to the epoch of the previous round when training is restarted.\
the default value is -1, indicating the initial learning rate ')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Distillation parameters
parser.add_argument('--teacher-model', default='RegNetX_4GF', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "RegNetX_4GF"')
parser.add_argument('--teacher-pretrained', default=None, type=str,
help='teacher model parameters must be downloaded locally')
parser.add_argument('--distillation-type', default='none', choices=['none', 'soft', 'hard'], type=str, help="")
parser.add_argument('--distillation-alpha', default=0.5, type=float, help="")
parser.add_argument('--distillation-tau', default=1.0, type=float, help="")
# * Finetuning params
parser.add_argument('--finetune', default='', help='finetune from checkpoint')
# Dataset parameters
parser.add_argument('--train-data-dir', default='./', type=str, help='image folder path')
parser.add_argument('--train-txt-path', default='./train.txt', type=str,
help='image file name and label information file')
parser.add_argument('--train-data-mode', default='train', type=str,
help="one of ['train', 'val', 'test'], the TXT file whether contains labels")
parser.add_argument('--val-data-dir', default='./', type=str, help='image folder path')
parser.add_argument('--val-txt-path', default='./val.txt', type=str,
help='image file name and label information file')
parser.add_argument('--val-data-mode', default='val', type=str,
help="one of ['train', 'val', 'test'], the TXT file whether contains labels")
parser.add_argument('--num_workers', default=0, type=int)
parser.add_argument('--output_dir', default='./output',
help='path where to save, empty for no saving')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
# distributed training
parser.add_argument('--is_distributed', default=False, type=bool,
help='whether to enable single-machine multi-card training')
# custom parameters
parser.add_argument('--is_amp', default=False, type=bool,
help='whether to enable automatic mixing precision training')
parser.add_argument('--init_loss_scaling', default=1024, type=float,
help='initial Loss Scaling factor. The default value is 1024')
return parser
def main(args):
print(args)
if args.distillation_type != 'none' and args.finetune:
raise NotImplementedError("Finetuning with distillation not yet supported")
# 构建数据
train_transform = build_transfrom(is_train=True,args=args)
train_dataset = CycleMLPdataset(args.train_data_dir, args.train_txt_path, mode=args.train_data_mode, transform=train_transform)
data_loader_train = paddle.io.DataLoader(
dataset=train_dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
drop_last=True,
)
val_transform = build_transfrom(is_train=False, args=args)
val_dataset = CycleMLPdataset(args.val_data_dir, args.val_txt_path, mode=args.val_data_mode, transform=val_transform)
data_loader_val = paddle.io.DataLoader(
dataset=val_dataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
drop_last=False
)
# mixup混类数据增强
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.num_classes)
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=args.model_pretrained,
is_teacher=False,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path)
# 配置蒸馏模型
teacher_model = None
if args.distillation_type != 'none':
print(f"Creating teacher model: {args.teacher_model}")
teacher_model = create_model(
args.teacher_model,
pretrained=False,
is_teacher=True,
class_num=args.num_classes
)
if os.path.exists(args.teacher_pretrained):
teacher_model.set_state_dict(paddle.load(args.teacher_pretrained))
teacher_model.eval()
get_world_size = 1
# 是否分布式
if args.is_distributed:
dist.init_parallel_env()
model = paddle.DataParallel(model)
teacher_model = paddle.DataParallel(teacher_model)
get_world_size = dist.get_world_size()
# finetune 微调
if args.finetune:
if os.path.exists(args.finetune):
print('You must download the finetune model and place it locally.')
else:
checkpoint = paddle.load(args.finetune)
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias', 'head_dist.weight', 'head_dist.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape((-1, orig_size, orig_size, embedding_size)).transpose((0, 3, 1, 2))
pos_tokens = paddle.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.transpose((0, 2, 3, 1)).flatten(1, 2)
new_pos_embed = paddle.concat((extra_tokens, pos_tokens), axis=1)
checkpoint_model['pos_embed'] = new_pos_embed
model.set_state_dict(checkpoint_model)
# 优化器配置
linear_scaled_lr = args.lr * args.batch_size * get_world_size / 512.0
args.lr = linear_scaled_lr
optimizer, scheduler = create_optimizer_scheduler(args, model)
# setup automatic mixed-precision (AMP) loss scaling and op casting
loss_scaler = None
if args.is_amp:
loss_scaler = paddle.amp.GradScaler(init_loss_scaling=args.init_loss_scaling)
n_parameters = sum(p.numel() for p in model.parameters() if not p.stop_gradient).numpy()[0]
print('number of params:', n_parameters)
print('=' * 30)
if args.mixup > 0.:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = paddle.nn.CrossEntropyLoss()
# wrap the criterion in our custom DistillationLoss, which
# just dispatches to the original criterion if args.distillation_type is 'none'
criterion = DistillationLoss(
criterion, teacher_model, args.distillation_type, args.distillation_alpha, args.distillation_tau
)
# 训练
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
log_path = args.output_dir + "/train_log.txt"
for epoch in range(args.start_epoch, args.epochs):
train_stats = train_one_epoch(
model, criterion, data_loader_train,
optimizer, epoch, log_path, scheduler,
loss_scaler, mixup_fn, args.is_distributed)
# 参数保存
if args.output_dir:
utils.save_on_master({
'pdparams': model.state_dict(),
'pdopt': optimizer.state_dict(),
'pdsched': scheduler.state_dict(),
'pdepoch': epoch,
'pdscaler': loss_scaler.state_dict() if loss_scaler is not None else None,
'pdargs': args,
}, args.output_dir + f'/checkpoint_{epoch}')
# 验证
test_stats = evaluate(data_loader_val, model, log_path)
print(f"Accuracy of the network on the {len(val_dataset)} test images: {test_stats['acc1']:.1f}%")
max_accuracy = max(max_accuracy, test_stats["acc1"])
print(f'Max accuracy: {max_accuracy:.2f}%')
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': str(n_parameters)}
for key in log_stats:
print(type(log_stats[key]))
if args.output_dir and utils.is_main_process():
with open(log_path, 'a') as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('CycleMLP training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
``` |
{
"source": "2vcps/silver-guacamole",
"score": 3
} |
#### File: silver-guacamole/capture/video-py.py
```python
from picamera import PiCamera
from time import sleep
import datetime as dt
# Import MinIO library.
from minio import Minio
from minio.error import (ResponseError, BucketAlreadyOwnedByYou,
BucketAlreadyExists)
import os
import namegenerator
camera = PiCamera()
now = dt.datetime.now()
date_time = now.strftime("%m_%d_%Y")
def makevid():
camera.rotation = 270
camera.resolution = (1920, 1080)
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
camera.start_recording('/home/pi/video/{}-{}.h264'.format(namegenerator.gen(), date_time))
start = dt.datetime.now()
while (dt.datetime.now() - start).seconds < 10:
camera.annotate_text = dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
camera.wait_recording(0.2)
camera.stop_recording()
def uploadvid():
path = '/home/pi/video'
files = []
names = []
# Initialize minioClient with an endpoint and access/secret keys.
minioClient = Minio('10.100.9.86:9000',
access_key='minio',
secret_key='minio123',
secure=False)
# Make a bucket with the make_bucket API call.
try:
minioClient.make_bucket("videos", location="us-east-1")
except BucketAlreadyOwnedByYou as err:
pass
except BucketAlreadyExists as err:
pass
except ResponseError as err:
print('you suck')
raise
finally:
for r, d, f in os.walk(path):
for file in f:
if '.h264' or '.mp4' in file:
#names.append(file)
#files.append(os.path.join(r, file))
obj_path = r.replace("./", "")
obj_name = file
#obj_name = obj_path.replace("/", "") + file
print(obj_name, 'uploaded:', os.path.join(r, file), 'to videos bucket on minio.')
minioClient.fput_object('videos', obj_name, os.path.join(r, file))
cleantmp()
def cleantmp():
path = '/home/pi/video'
files = []
names = []
for r, d, f in os.walk(path):
for file in f:
if '.mp4' in file:
#obj_path = r.replace("./", "")
#obj_name = file
os.remove( '{}/{}'.format(path, file))
print('File {} deleted'.format(file))
if '.h264' in file:
os.remove( '{}/{}'.format(path, file))
print('File {} deleted'.format(file))
makevid()
uploadvid()
``` |
{
"source": "2vergent/pyjournal",
"score": 3
} |
#### File: 2vergent/pyjournal/pyjournal.py
```python
import sys, os, platform, getpass
from datetime import date
def actualsignup(u,p):
f = open('udata.txt','a')
encred = encrypt(u,p)
f.write("%s " % encred[0])
f.write("%s\n" % encred[1])
os.mkdir('%s' % encred[0])
print("-"*67)
print("> USER CREDENTIALS SUCCESSFULLY WRITTEN")
print("-"*67)
print("\n")
f.close()
root()
def actuallogin(u,p):
if platform.system() == "Linux":
if 'ANDROID_STORAGE' in os.environ:
try:
os.chdir('/storage/emulated/0/pyjournal')
f = open("udata.txt","r")
except:
print("> No user credentials found. Use 'signup' command to get started")
print("-"*67)
print("\n")
root()
else:
try:
os.chdir('%s/pyjournal' % os.path.expanduser('~'))
f = open("udata.txt","r")
except:
print("> No user credentials found. Use 'signup' command to get started")
print("-"*67)
print("\n")
root()
elif platform.system() == "Windows":
try:
os.chdir('C:/Users/%s/Documents/pyjournal' % os.getenv('username'))
f = open("udata.txt","r")
except:
print("> No user credentials found. Use 'signup' command to get started")
print("-"*67)
print("\n")
root()
else:
try:
os.chdir('%s/pyjournal' % os.path.expanduser('~'))
f = open("udata.txt","r")
except:
print("> No user credentials found. Use 'signup' command to get started")
print("-"*67)
print("\n")
root()
b = False
for x in f:
line = x
l = ""
for x in line:
l += x
l = l.split(' ')
l1 = list(l[1])
l2 = l1[:len(l1)-1]
l3 = ''.join(l2)
decred = decrypt(l[0],l3)
us = decred[0]
pas = decred[1]
if us == u and pas == p:
print("> LOGIN SUCCESSFUL")
print("-"*67)
print("\n")
journal(u,p)
b = True
break
if b == False:
print("> INVALID USER CREDENTIALS")
print("-"*67)
print("\n")
f.close()
root()
def encrypt(u,p):
u = list(u)
p = list(p)
uencrypt,pencrypt = [],[]
for x in p:
if x.isdigit() and x != 0:
shift = int(x)
break
alphabet = list(map(chr, range(97,123)))
for x in u:
for y in alphabet:
if x == y:
uencrypt += alphabet[(alphabet.index(y))+shift]
break
else:
uencrypt += chr(ord(x)+shift)
break
for x in p:
for y in alphabet:
if x == y:
pencrypt += alphabet[(alphabet.index(y))+shift]
break
elif x!=str(shift):
if x == str(0):
pencrypt += x
else:
pencrypt += chr(ord(x)+shift)
break
else:
pencrypt += x
break
uname = ''.join(uencrypt)
passw = ''.join(pencrypt)
return uname,passw
def decrypt(u,p):
shift = 0
u,p = list(u),list(p)
udecrypt,pdecrypt = [],[]
for x in p:
if x.isdigit() and x != 0:
shift = int(x)
break
alphabet = list(map(chr, range(97,123)))
for x in u:
for y in alphabet:
if x == y:
udecrypt += alphabet[(alphabet.index(y))-shift]
break
else:
udecrypt += chr(ord(x)-shift)
break
for x in p:
for y in alphabet:
if x == y:
pdecrypt += alphabet[(alphabet.index(y))-shift]
break
elif x!=str(shift):
if x==str(0):
pdecrypt += x
else:
pdecrypt += chr(ord(x)-shift)
break
else:
pdecrypt += x
break
una = ''.join(udecrypt)
pas = ''.join(pdecrypt)
return una,pas
def signup():
print("-"*67)
print("> SIGNUP : ")
q = False
r = 0
def us():
print("-"*67)
q = False
while q == False:
try:
use = input(" USERNAME:> ")
q = True
except:
continue
return use
def check(u):
c = True
if platform.system() == "Linux":
if 'ANDROID_STORAGE' in os.environ:
try:
os.chdir('/storage/emulated/0/pyjournal')
except:
os.chdir('/storage/emulated/0')
os.mkdir('pyjournal')
os.chdir('/storage/emulated/0/pyjournal')
try:
f = open("udata.txt","r")
except:
f = open("udata.txt","w")
f.close()
f = open("udata.txt","r")
else:
try:
os.chdir('%s/pyjournal' % os.path.expanduser('~'))
except:
os.chdir('%s' % os.path.expanduser('~'))
os.mkdir('pyjournal')
os.chdir('%s/pyjournal' % os.path.expanduser('~'))
try:
f = open("udata.txt","r")
except:
f = open("udata.txt","w")
f.close()
f = open("udata.txt","r")
elif platform.system() == "Windows":
try:
os.chdir('C:/Users/%s/Documents/pyjournal' % os.getenv('username'))
except:
os.chdir('C:/Users/%s/Documents' % os.getenv('username'))
os.mkdir('pyjournal')
os.chdir('C:/Users/%s/Documents/pyjournal' % os.getenv('username'))
try:
f = open("udata.txt","r")
except:
f = open("udata.txt","w")
f.close()
f = open("udata.txt","r")
else:
try:
os.chdir('%s/pyjournal' % os.path.expanduser('~'))
except:
os.chdir('%s' % os.path.expanduser('~'))
os.mkdir('pyjournal')
os.chdir('%s/pyjournal' % os.path.expanduser('~'))
try:
f = open("udata.txt","r")
except:
f = open("udata.txt","w")
f.close()
f = open("udata.txt","r")
for x in f:
line = x
l = ""
for x in line:
l += x
l = l.split(' ')
l1 = list(l[1])
l2 = l1[:len(l1)-1]
l3 = ''.join(l2)
d = decrypt(l[0],l3)
if u == d[0]:
print("-"*67)
print("> Username already exists")
c = False
return c
while r == False:
user = us()
r = check(user)
while q == False:
spaces = 0
for x in user:
if x.isspace():
spaces+= 1
if len(user) >= 5 and spaces == 0:
q = True
else:
print("-"*67)
print("> Username should not contain spaces and should be atleast 5 characters long.")
print("-"*67)
v = False
while v == False:
try:
user = input(" USERNAME:> ")
v = True
except:
continue
print("-"*67)
v = False
while v == False:
try:
passwrd = getpass.getpass(" PASSWORD:> ")
v = True
except:
continue
v = False
special = 0
numbers = 0
for x in passwrd:
if x.isdigit():
numbers += 1
elif x.isalnum() != True:
special += 1
if special >= 1 and numbers >= 1 and len(passwrd) >= 5:
v = True
else:
print("-"*67)
print("> Password must have special characters,numbers and should be at least 5 characters long")
print("-"*67)
continue
actualsignup(user,passwrd)
def login():
print("-"*67)
print("> LOGIN : ")
print("-"*67)
v = False
while v == False:
try:
user = input(" USERNAME:> ")
v = True
except:
continue
print("-"*67)
v = False
while v == False:
try:
passwrd = getpass.getpass(" PASSWORD:> ")
v = True
except:
continue
print("-"*67)
actuallogin(user,passwrd)
def checkdate(edate):
edate = edate.strip()
datelist = edate.split("/")
dd = int(datelist[0])
mm = int(datelist[1])
yyyy = int(datelist[2])
if len(str(yyyy)) < 4:
return False
m30 = [4,6,9,11]
m31 = [1,3,5,7,8,10,12]
m = 0
cd = False
if mm in m30:
if dd > 0 and dd <= 30:
m = 1
elif mm in m31:
if dd > 0 and dd <= 31:
m = 1
elif mm == 2:
if yyyy % 4 == 0:
if dd > 0 and dd <= 29:
m = 1
else:
if dd > 0 and dd <= 28:
m = 1
else:
return cd
if m == 1:
cd = True
return cd
else:
return cd
def journal(u,p):
username = encrypt(u,p)
if platform.system() == "Linux":
if 'ANDROID_STORAGE' in os.environ:
os.chdir('/storage/emulated/0/pyjournal/%s' % username[0])
else:
os.chdir('%s/pyjournal/%s' % (os.path.expanduser('~'),username[0]))
elif platform.system() == "Windows":
os.chdir('C:/Users/%s/Documents/pyjournal/%s' % (os.getenv('username'),username[0]))
else:
os.chdir('%s/pyjournal/%s' % (os.path.expanduser('~'),username[0]))
today = date.today()
d = today.strftime("%d/%m/%Y")
d = d.split("/")
d = '.'.join(d)
v = False
while v == False:
try:
choice = input("@%s:> " % u)
choice = choice.lower()
choice = choice.strip()
if choice == "entry":
print("-"*67)
entryin = input(" ENTRY:> ")
print("-"*67)
entry = open('%s.txt' % d,'a')
if os.path.getsize('%s.txt' % d) == 0:
entryin = "1 | " + entryin
encred = encrypt(entryin,p)
entry.write("%s\n" % (encred[0]))
entry.close()
else:
entry = open('%s.txt' % d,'r')
lines = entry.readlines()
lastline = lines[-1]
lastdecrypt = decrypt(lastline,p)
lm = lastdecrypt[0].split(' | ')
lastnum = int(lm[0]) + 1
entry.close()
entry = open('%s.txt' % d,'a')
lastnum = "%s | " % str(lastnum)
entryin = lastnum + entryin
encred = encrypt(entryin,p)
entry.write("%s\n" % (encred[0]))
entry.close()
print("\n")
continue
elif choice == "check":
q = False
while q == False:
try:
print("-"*67)
entrydate = input(" DATE (dd/mm/yyyy):> ")
testdate = entrydate.split("/")
_ = int(testdate[1])
_ = int(testdate[2])
q = True
except:
print("-"*67)
print("> You entered an invalid date. Recheck the date format")
continue
q = False
while q == False:
z = checkdate(entrydate)
if z == True:
q = True
else:
print("-"*67)
print("> You entered an invalid date. Recheck the date format")
print("-"*67)
entrydate = input(" DATE (dd/mm/yyyy):> ")
print("-"*67)
entrydate = entrydate.replace('/','.')
try:
entry = open("%s.txt" % entrydate,"r")
except:
print("> ENTRIES DATED %s" % entrydate)
print("-"*67)
print("> No entries on this day")
print("-"*67)
print("\n")
continue
if os.path.getsize("%s.txt" % entrydate) == 0:
print("> ENTRIES DATED %s" % entrydate)
print("-"*67)
print("> No entries on this day")
print("-"*67)
print("\n")
continue
else:
entry = open('%s.txt' % entrydate,'r')
entryout = entry.readlines()
out = ''
for x in entryout:
decred = decrypt(x,p)
out += decred[0] + '\n '
out = out.strip()
print("> ENTRIES DATED %s :" % entrydate)
print("-"*67)
print(" %s" % out)
print("-"*67)
entry.close()
print("\n")
elif choice == "login":
print("-"*67)
print("> You are already logged in")
print("-"*67)
elif choice == "signup":
print("-"*67)
print("> You already have a journal")
print("-"*67)
elif choice == "root":
print("-"*67)
print("> You have logged out")
print("-"*67)
root()
elif choice == "list":
listentries(u,p)
elif choice == "edit":
editentries(u,p)
elif choice == "kill":
print("-"*67)
os._exit(0)
elif choice == "clear":
if platform.system() == "Windows":
os.system('cls')
elif platform.system() == "Linux":
os.system('clear')
else:
os.system('clear')
elif choice == "help":
print("-"*67)
print("To proceed,enter any of the following commands:\n\n entry: Adds a new entry to your journal for this day\n check: Accepts date to check previous entries in your journal\n list: Lists previous entries\n edit: Delete Entries in your Journal\n root: Go back to root\n clear: Clears the screen\n kill: Exits pyJournal")
print("-"*67)
elif choice == "quit" or choice == "exit":
print("-"*67)
print("Use 'kill' to stop pyJournal")
print("-"*67)
elif choice == "":
continue
else:
print("-"*67)
print("'%s': Command not found. Type 'help' for more info" % choice)
print("-"*67)
except:
print("'%s': Command not found. Type 'help' for more info" % choice)
continue
def listentries(u,p):
encred = encrypt(u,p)
user = encred[0]
months = {'1':'January ','2':'February ','3':'March ','4':'April ','5':'May ','6':'June ','7':'July ','8':'August ','9':'September','10':'October ','11':'November ','12':'December '}
days = ""
if platform.system() == "Linux":
if 'ANDROID_STORAGE' in os.environ:
os.chdir('/storage/emulated/0/pyjournal/%s' % user)
files = os.listdir('/storage/emulated/0/pyjournal/%s' % user)
for x in files:
if os.path.getsize('%s' % x) == 0:
os.remove('%s' % x)
files = os.listdir('/storage/emulated/0/pyjournal/%s' % user)
if files == []:
print("-"*67)
print(" No Entries to be listed")
print("-"*67)
return 0
print("-"*67)
for month in months:
for x in files:
entry = x[:len(x)-4]
try:
entry = entry.split(".")
except:
continue
if int(entry[1]) == int(month):
days += "|" + str(entry[0]) + "-"+ entry[2] + "|"
if days != "":
print(" %s: %s" % (months[month],days))
print(" ","-"*61)
days = ""
print("-"*67)
else:
os.chdir('%s/pyjournal/%s' % (os.path.expanduser('~'),user))
files = os.listdir('%s/pyjournal/%s' % (os.path.expanduser('~'),user))
for x in files:
if os.path.getsize('%s' % x) == 0:
os.remove('%s' % x)
files = os.listdir('%s/pyjournal/%s' % (os.path.expanduser('~'),user))
if files == []:
print("-"*67)
print(" No Entries to be listed")
print("-"*67)
return 0
print("-"*67)
for month in months:
for x in files:
entry = x[:len(x)-4]
try:
entry = entry.split(".")
except:
continue
if int(entry[1]) == int(month):
days += "|" + str(entry[0]) + "-"+ entry[2] + "|"
if days != "":
print(" %s: %s" % (months[month],days))
print(" ","-"*61)
days = ""
print("-"*67)
elif platform.system() == "Windows":
os.chdir('C:/Users/%s/Documents/pyjournal/%s' % (os.getenv('username'),user))
files = os.listdir('C:/Users/%s/Documents/pyjournal/%s' % (os.getenv('username'),user))
for x in files:
if os.path.getsize('%s' % x) == 0:
os.remove('%s' % x)
files = os.listdir('C:/Users/%s/Documents/pyjournal/%s' % (os.getenv('username'),user))
if files == []:
print("-"*67)
print(" No Entries to be listed")
print("-"*67)
return 0
print("-"*67)
for month in months:
for x in files:
entry = x[:len(x)-4]
try:
entry = entry.split(".")
except:
continue
if int(entry[1]) == int(month):
days += "|" + str(entry[0]) + "-"+ entry[2] + "|"
if days != "":
print(" %s: %s" % (months[month],days))
print(" ","-"*61)
days = ""
print("-"*67)
else:
os.chdir('%s/pyjournal/%s' % (os.path.expanduser('~'),user))
files = os.listdir('%s/pyjournal/%s' % (os.path.expanduser('~'),user))
for x in files:
if os.path.getsize('%s' % x) == 0:
os.remove('%s' % x)
files = os.listdir('%s/pyjournal/%s' % (os.path.expanduser('~'),user))
if files == []:
print("-"*67)
print(" No Entries to be listed")
print("-"*67)
return 0
print("-"*67)
for month in months:
for x in files:
entry = x[:len(x)-4]
try:
entry = entry.split(".")
except:
continue
if int(entry[1]) == int(month):
days += "|" + str(entry[0]) + "-"+ entry[2] + "|"
if days != "":
print(" %s: %s" % (months[month],days))
print(" ","-"*61)
days = ""
print("-"*67)
def editentries(u,p):
print("-"*67)
t = False
while t == False:
try:
choice = input(" $edit@%s:> " % u)
except:
continue
choice = choice.lower()
choice = choice.strip()
if choice == "del":
print("-"*67)
q = False
while q == False:
try:
date = input(" DATE (dd/mm/yyyy):> ")
testdate = date.split("/")
_ = int(testdate[1])
_ = int(testdate[2])
print(" ","-"*61)
z = checkdate(date)
if z == True:
q = True
else:
raise Exception
except:
print(" ","-"*61)
print(" > You entered an invalid date. Recheck the date format")
print(" ","-"*61)
continue
edate = date.strip()
datelist = edate.split("/")
d = '.'.join(datelist)
try:
entry = open("%s.txt" % d,"r")
if os.path.getsize("%s.txt" % d) == 0:
raise Exception
except:
print(" > No Entries on this day to edit")
print("-"*67)
print("\n")
return 0
lines = entry.readlines()
entry.close()
linenum = []
for x in lines:
n = int(x[0]) - 1
linenum.append(str(n))
out = ''
for x in lines:
decred = decrypt(x,p)
out += decred[0] + '\n '
out = out.strip()
print(" > ENTRIES DATED %s :" % d)
print(" ","-"*61)
print(" %s" % out)
print(" ","-"*61)
entry.close()
count = 0
q = False
while q == False:
try:
num = input(" ENTRY NUMBER:> ")
num = num.split(",")
if "" in num:
raise Exception
print(" ","-"*61)
for x in num:
for y in linenum:
if x in linenum:
count += 1
if count/len(num) == len(linenum):
q = True
else:
raise Exception
except:
print(" > Recheck Entered Entry Number(s)")
print(" ","-"*61)
continue
entries = out.split('\n ')
for x in num:
for y in entries:
if y.startswith(str(x)):
entries.remove(y)
repnum = 1
edit = []
for x in entries:
edited = x.replace(x[0],str(repnum))
edit.append(edited)
repnum += 1
editedentry = ""
for x in edit:
x = x.strip()
encred = encrypt(x,p)
editedentry += encred[0] + '\n'
entry = open("%s.txt" % d,"w")
entry.write(editedentry)
entry.close()
print(" > Entry Deletion Successful")
print(" ","-"*61)
print("\n")
continue
elif choice == "add":
print("-"*67)
q = False
while q == False:
try:
date = input(" DATE (dd/mm/yyyy):> ")
testdate = date.split("/")
_ = int(testdate[1])
_ = int(testdate[2])
print(" ","-"*61)
z = checkdate(date)
if z == True:
q = True
else:
raise Exception
except:
print(" ","-"*61)
print(" > You entered an invalid date. Recheck the date format")
print(" ","-"*61)
continue
edate = date.strip()
datelist = edate.split("/")
d = '.'.join(datelist)
entry = open("%s.txt" % d, "a")
if os.path.getsize("%s.txt" % d) == 0:
print(" > No Previous Entries ")
print(" ","-"*61)
entry.close()
else:
entry.close()
entry = open("%s.txt" % d,"r")
lines = entry.readlines()
out = ""
for x in lines:
decred = decrypt(x,p)
out += decred[0] + '\n '
out = out.strip()
print(" > Entries in this Journal: ")
print(" ","-"*61)
print(" %s" % out)
print(" ","-"*61)
entry.close()
q = False
while q == False:
try:
addentry = input(" ADD:> ")
if addentry != "":
q = True
except:
continue
if os.path.getsize('%s.txt' % d) == 0:
entry = open("%s.txt" % d,"a")
addentry = "1 | " + addentry
encred = encrypt(addentry,p)
entry.write("%s\n" % (encred[0]))
print(" ","-"*61)
print(" > ADDITION OF ENTRY SUCCESSFUL")
print(" ","-"*61)
print("\n")
entry.close()
else:
entry = open('%s.txt' % d,'r')
lines = entry.readlines()
lastline = lines[-1]
lastdecrypt = decrypt(lastline,p)
lm = lastdecrypt[0].split(' | ')
lastnum = int(lm[0]) + 1
entry.close()
entry = open('%s.txt' % d,'a')
lastnum = "%s | " % str(lastnum)
addentry = lastnum + addentry
encred = encrypt(addentry,p)
entry.write("%s\n" % (encred[0]))
entry.close()
print(" ","-"*61)
print(" > ADDITION OF ENTRY SUCCESSFUL")
print(" ","-"*61)
print("\n")
continue
elif choice == "@%s" % u:
print("-"*67)
print("\n")
return 0
elif choice == "help":
print(" ","-"*64)
print(" Type any of these following commands:\n\n add: Adds an entry to a existing/new Journal\n del: Deletes an entry from an existing/new Journal\n @<your-username>: Goes back to your Journal\n root: Goes back to root\n kill: Quits pyJournal\n clear: Clears the screen")
print(" ","-"*64)
elif choice == "root":
print("-"*67)
print("\n")
root()
elif choice == "kill":
print("\n")
os._exit(0)
elif choice == "":
continue
elif choice == "clear":
if platform.system() == "Windows":
os.system('cls')
elif platform.system() == "Linux":
os.system('clear')
else:
os.system('clear')
else:
print("-"*67)
print(" '%s': Command not found. Type 'help' for more info" % choice)
print("-"*67)
continue
def root():
q = False
while q == False:
try:
choice = input("@root:> ")
choice = choice.lower()
choice = choice.strip()
if choice == "login":
login()
elif choice == "signup":
signup()
elif choice == "kill":
print("\n")
os._exit(0)
elif choice == "clear":
if platform.system() == "Windows":
os.system('cls')
elif platform.system() == "Linux":
os.system('clear')
else:
os.system('clear')
elif choice == "help":
print("-"*67)
print("To get started with pyjournal, type any of these commands:\n\n login: Enter your username and password to access your journal\n signup: Enter a new username and password to create your journal\n clear: Clears the screen\n kill: Exits pyJournal")
print("-"*67)
elif choice == "":
continue
elif choice == "vineeth":
print("-"*67)
print(" Created from scratch with passion and elegance in mind")
print("-"*67)
elif choice == "quit" or choice == "exit":
print("-"*67)
print("Use 'kill' to stop pyJournal")
print("-"*67)
else:
raise Exception
except:
print("-"*67)
print("'%s': Command not found. Type 'help' for more info" % choice)
print("-"*67)
continue
print("-"*67)
print("[ | pyJournal v4.3.2 | ]")
print("-"*67)
print("\n")
root()
``` |
{
"source": "2vin2vin/nslquad",
"score": 3
} |
#### File: mjlib/multiplex/mp_console.py
```python
import argparse
import asyncio
import sys
import mjlib.micro.multiplex_protocol as mp
import mjlib.micro.aioserial as aioserial
async def make_stdin(loop, stream):
reader = asyncio.StreamReader(loop=loop)
reader_protocol = asyncio.StreamReaderProtocol(reader)
await loop.connect_read_pipe(lambda: reader_protocol, stream)
return reader
async def read(client):
while True:
data = await client.read(1)
sys.stdout.write(data.decode('latin1'))
sys.stdout.flush()
async def write(stdin, client):
while True:
data = await stdin.readline()
client.write(data)
await client.drain()
async def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-d', '--device', type=str, default='/dev/ttyUSB0',
help='serial device')
parser.add_argument('-b', '--baud', type=int, default=3000000,
help='baud rate')
parser.add_argument('-t', '--target', type=int, default=1,
help='destination multiplex address')
parser.add_argument('-c', '--channel', type=int, default=1,
help='destination multiples channel')
args = parser.parse_args()
serial = aioserial.AioSerial(port=args.device, baudrate=args.baud)
manager = mp.MultiplexManager(serial)
mc = mp.MultiplexClient(
manager,
destination_id=args.target,
channel=args.channel)
stdin = await make_stdin(asyncio.get_event_loop(), sys.stdin)
await asyncio.gather(
read(mc),
write(stdin, mc))
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
```
#### File: multiplex/test/py_stream_helpers_test.py
```python
import asyncio
import io
import unittest
import mjlib.multiplex.stream_helpers as sh
def _run(coro):
return asyncio.get_event_loop().run_until_complete(coro)
class RecordingStreamTest(unittest.TestCase):
def test_basic(self):
stream = sh.AsyncStream(io.BytesIO(bytes([0x05, 0x06, 0x07, 0x08])))
dut = sh.RecordingStream(stream)
async def run():
return await dut.read(1)
result = _run(run())
self.assertEqual(result, bytes([0x05]))
self.assertEqual(dut.buffer(), bytes([0x05]))
result2 = _run(run())
self.assertEqual(result2, bytes([0x06]))
self.assertEqual(dut.buffer(), bytes([0x05, 0x06]))
result3 = _run(run())
self.assertEqual(result3, bytes([0x07]))
self.assertEqual(dut.buffer(), bytes([0x05, 0x06, 0x07]))
class PipeStreamTest(unittest.TestCase):
async def async_test_basic(self):
dut = sh.PipeStream()
async def write(pipe):
pipe.write(bytes([4, 5, 6]))
await pipe.drain()
async def read(pipe):
return await pipe.read(3)
results = await asyncio.gather(write(dut.side_a), read(dut.side_b))
self.assertEqual(results[1], bytes([4, 5, 6]))
def test_basic(self):
# import pdb; pdb.set_trace()
_run(asyncio.Task(self.async_test_basic()))
if __name__ == '__main__':
unittest.main()
```
#### File: mjlib/telemetry/reader.py
```python
import collections
import enum
import struct
_RESERVED_KEYWORDS = set([
'False',
'None',
'True',
'and',
'as',
'assert',
'break',
'class',
'continue',
'def',
'del',
'elif',
'else',
'except',
'finally',
'for',
'from',
'global',
'if',
'import',
'in',
'is',
'lambda',
'nonlocal',
'not',
'or',
'pass',
'raise',
'return',
'try',
'while',
'with',
'yield',
])
def _escape_python3_identifier(name):
if name in _RESERVED_KEYWORDS:
return 'py_' + name
return name
class ParseError(RuntimeError):
pass
class Stream:
def __init__(self, base):
self._base = base
def ignore(self, size):
self._base.read(size)
def read_varuint(self):
result = 0
fk = 1
value = 0
while True:
data = self._base.read(1)
if len(data) != 1:
raise EOFError()
value = data[0]
result = result + fk * (value & 0x7f)
fk = fk * 128
if value < 0x80:
break
if result >= 2**64:
raise ParseError("invalid varuint")
return result
def read_string(self):
size = self.read_varuint()
return self._base.read(size).decode('utf8')
def read_bytes(self):
size = self.read_varuint()
return self._base.read(size)
def _read_format(self, fmt, size):
return struct.unpack(fmt, self._base.read(size))[0]
def read_f32(self):
return self._read_format('<f', 4)
def read_f64(self):
return self._read_format('<d', 8)
def read_u8(self):
return self._read_format('<B', 1)
def read_u16(self):
return self._read_format('<H', 2)
def read_u32(self):
return self._read_format('<I', 4)
def read_u64(self):
return self._read_format('<Q', 8)
def read_i8(self):
return self._read_format('<b', 1)
def read_i16(self):
return self._read_format('<h', 2)
def read_i32(self):
return self._read_format('<i', 4)
def read_i64(self):
return self._read_format('<q', 8)
class FinalType:
@staticmethod
def from_binary(schema_stream, **kwargs):
return FinalType()
def read(self, data_stream):
raise ParseError("invalid")
class NullType:
@staticmethod
def from_binary(schema_stream, **kwargs):
return NullType()
def read(self, data_stream):
return None
class BooleanType:
@staticmethod
def from_binary(schema_stream, **kwargs):
return BooleanType()
def read(self, data_stream):
return data_stream.read_u8() != 0
class FixedIntType:
@staticmethod
def from_binary(schema_stream, **kwargs):
return FixedIntType(schema_stream.read_u8())
def __init__(self, field_size):
if field_size not in [1, 2, 4, 8]:
raise ParseError("invalid fixedint size")
self.field_size = field_size
def read(self, data_stream):
if self.field_size == 1:
return data_stream.read_i8()
elif self.field_size == 2:
return data_stream.read_i16()
elif self.field_size == 4:
return data_stream.read_i32()
elif self.field_size == 8:
return data_stream.read_i64()
assert False
class FixedUIntType:
@staticmethod
def from_binary(schema_stream, **kwargs):
return FixedUIntType(schema_stream.read_u8())
def __init__(self, field_size):
if field_size not in [1, 2, 4, 8]:
raise ParseError("invalid fixeduint size")
self.field_size = field_size
def read(self, data_stream):
if self.field_size == 1:
return data_stream.read_u8()
elif self.field_size == 2:
return data_stream.read_u16()
elif self.field_size == 4:
return data_stream.read_u32()
elif self.field_size == 8:
return data_stream.read_u64()
assert False
class VarintType:
@staticmethod
def from_binary(schema_stream, **kwargs):
return VarintType()
def read(self, data_stream):
raise RuntimeError("not implemented")
class VaruintType:
@staticmethod
def from_binary(schema_stream, **kwargs):
return VaruintType()
def read(self, data_stream):
return data_stream.read_varuint()
class Float32Type:
@staticmethod
def from_binary(schema_stream, **kwargs):
return Float32Type()
def read(self, data_stream):
return data_stream.read_f32()
class Float64Type:
@staticmethod
def from_binary(schema_stream, **kwargs):
return Float64Type()
def read(self, data_stream):
return data_stream.read_f64()
class BytesType:
@staticmethod
def from_binary(schema_stream, **kwargs):
return BytesType()
def read(self, data_stream):
return data_stream.read_bytes()
class StringType:
@staticmethod
def from_binary(schema_stream, **kwargs):
return StringType()
def read(self, data_stream):
return data_stream.read_string()
class Field:
def __init__(self, flags, name, aliases, type_class, default_value):
self.flags = flags
self.name = name
self.aliases = aliases
self.type_class = type_class
self.default_value = default_value
def read(self, data_stream):
return self.type_class.read(data_stream)
class ObjectType:
@staticmethod
def from_binary(schema_stream, name='_', **kwargs):
object_flags = schema_stream.read_varuint()
fields = []
while True:
flags = schema_stream.read_varuint()
name = schema_stream.read_string()
naliases = schema_stream.read_varuint()
aliases = [schema_stream.read_string() for _ in range(naliases)]
type_class = Type.from_binary(schema_stream, name=name)
is_default = schema_stream.read_u8() != 0
default_value = type_class.read(schema_stream) if is_default else None
if isinstance(type_class, FinalType):
break
fields.append(
Field(flags, name, aliases, type_class, default_value))
return ObjectType(flags, fields, name)
def __init__(self, flags, fields, name):
self.flags = flags
self.fields = fields
self.namedtuple = collections.namedtuple(
'_', [_escape_python3_identifier(x.name) for x in self.fields])
def read(self, data_stream):
return self.namedtuple._make([x.read(data_stream) for x in self.fields])
class EnumType:
@staticmethod
def from_binary(schema_stream, name='_', **kwargs):
type_class = Type.from_binary(schema_stream)
nvalues = schema_stream.read_varuint()
items = [
(type_class.read(schema_stream), schema_stream.read_string())
for _ in range(nvalues)]
items = { key : value for (value, key) in items }
return EnumType(name, type_class, items)
def __init__(self, name, type_class, items):
self.type_class = type_class
class Enum(enum.IntEnum):
@classmethod
def _missing_(cls, value):
return cls._create_pseudo_member_(value)
@classmethod
def _create_pseudo_member_(cls, value):
pseudo_member = cls._value2member_map_.get(value, None)
if pseudo_member is None:
new_member = int.__new__(cls, value)
new_member._name_ = str(value)
new_member._value_ = value
pseudo_member = cls._value2member_map_.setdefault(value, new_member)
return pseudo_member
self.enum_class = Enum(name, items)
def read(self, data_stream):
return self.enum_class(self.type_class.read(data_stream))
class ArrayType:
@staticmethod
def from_binary(schema_stream, **kwargs):
return ArrayType(Type.from_binary(schema_stream))
def __init__(self, type_class):
self.type_class = type_class
def read(self, data_stream):
nvalues = data_stream.read_varuint()
return [self.type_class.read(data_stream) for _ in range(nvalues)]
class FixedArrayType:
@staticmethod
def from_binary(schema_stream, **kwargs):
size = schema_stream.read_varuint()
return FixedArrayType(size, Type.from_binary(schema_stream))
def __init__(self, size, type_class):
self.size = size
self.type_class = type_class
def read(self, data_stream):
return [self.type_class.read(data_stream) for _ in range(self.size)]
class MapType:
@staticmethod
def from_binary(schema_stream, **kwargs):
type_class = Type.from_binary(schema_stream)
return MapType(type_class)
def __init__(self, type_class):
self.type_class = type_class
def read(self, data_stream):
nitems = data_stream.read_varuint()
return dict((data_stream.read_string(), self.type_class.read(data_stream))
for _ in range(nitems))
class UnionType:
@staticmethod
def from_binary(schema_stream, **kwargs):
items = []
while True:
type_class = Type.from_binary(schema_stream)
if isinstance(type_class, FinalType):
break
items.append(type_class)
return UnionType(items)
def __init__(self, items):
self.items = items
def read(self, data_stream):
index = data_stream.read_varuint()
return self.items[index].read(data_stream)
class TimestampType:
@staticmethod
def from_binary(schema_stream, **kwargs):
return TimestampType()
def read(self, data_stream):
us_since_epoch = data_stream.read_i64()
return us_since_epoch / 1000000.0
class DurationType:
@staticmethod
def from_binary(schema_stream, **kwargs):
return DurationType()
def read(self, data_stream):
us = data_stream.read_i64()
return us / 1000000.0
TYPES = [
FinalType, # 0
NullType, # 1
BooleanType, # 2
FixedIntType, # 3
FixedUIntType, # 4
VarintType, # 5
VaruintType, # 6
Float32Type, # 7
Float64Type, # 8
BytesType, # 9
StringType, # 10
None,
None,
None,
None,
None,
ObjectType, # 16
EnumType, # 17
ArrayType, # 18
FixedArrayType, # 19
MapType, # 20
UnionType, # 21
TimestampType, # 22
DurationType, # 23
]
_TYPES_FROM_BINARY = [x.from_binary if x else None for x in TYPES]
class Type:
'''Read a telemetry serialized schema'''
@staticmethod
def from_binary(schema_stream, **kwargs):
if not isinstance(schema_stream, Stream):
schema_stream = Stream(schema_stream)
type_index = schema_stream.read_varuint()
try:
this_type = _TYPES_FROM_BINARY[type_index]
except IndexError:
raise RuntimeError("Unknown type: {}".format(type_index))
return this_type(schema_stream, **kwargs)
```
#### File: mjmech/video-ui/calibration_cv.py
```python
import cv
import cv2
import numpy
import os
import yaml
import re
import logging
DEFAULT_CAL = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'cal-20141222.yaml')
# If calibration does not yield proper values, we cvan tweak them
# a little useinf fixups vector.
# Elements 0 and 1: increase f_x/f_y by that many percent
DEFAULT_FIXUPS = (0, -7.25)
class CameraCalibration(object):
"""This class handles camera calibration -- it translates between
projection point (pixel coordinates on the image) and
camera-referenced coordinates (X/Z and Y/Z fractions)
"""
# Default diagonal field-of-view, in degrees
_DEFAULT_FOV_DIAG = 78.0 # From C920 camera datasheet
# Camera aspect ratio
_DEFAULT_ASPECT = (16, 9)
# camera metadata for describe()
# For Logitech C920 --> 1/3" 3 MP HD sensor (according to some forum)
_DEFAULT_IMAGE_SIZE = (1928, 1080) # pixels
_DEFAULT_SENSOR_SIZE = (6.0, 4.8) # mm
def __init__(self):
"""Create an object. Use default camera matrix"""
# (x, y) tuple -- size of image used for calibration, pixels
self.image_size = None
self.camera_matrix = None
self.dist_coeffs = None
self.axis_scale = (1.0, 1.0)
# A fixup vector. see DEFAULT_FIXUPS definition above
self.fixups = None
self.logger = logging.getLogger('calibration_cv')
self.setup_yaml(DEFAULT_CAL, DEFAULT_FIXUPS)
def setup_yaml(self, fname, fixups=None):
"""Load a yaml file produced by opencv calibration procedure"""
# The file is in FileStorage class, which does not have full python API.
# Load member-by-member as recommended here:
# http://stackoverflow.com/questions/11025477/error-loading-opencv-xml-file-with-python
self.camera_matrix = numpy.asarray(
cv.Load(fname, cv.CreateMemStorage(), 'camera_matrix'))
self.dist_coeffs = numpy.asarray(
cv.Load(fname, cv.CreateMemStorage(), 'distortion_coefficients'))
with open(fname, 'r') as f:
data_str = f.read()
# Clean up invalid directive yaml
data_str = re.sub('^%.*$', '', data_str, count=1, flags=re.MULTILINE)
# Remove class tags
data_str = re.sub(': !!.*$', ':', data_str, flags=re.MULTILINE)
data = yaml.safe_load(data_str)
self.image_size = (data['image_width'], data['image_height'])
self.fixups = fixups
self._apply_fixups()
def _apply_fixups(self):
if self.fixups is None:
self.axis_scale = (1.0, 1.0)
else:
self.axis_scale = (1.0 + self.fixups[0] / 100.0,
1.0 + self.fixups[1] / 100.0)
def tweak_fixups(self, dfx, dfy):
"""Temporary tweak fixups vector. Arguments are _changes_ to fixup
values.
It is expected that this function is only used to determine optimal
fixups value which should then be hardcoded into the top of this
script.
"""
self.fixups = (self.fixups[0] + dfx,
self.fixups[1] + dfy)
self._apply_fixups()
self.logger.warn('Temporary fixup vector: (%.2f, %.2f)' % self.fixups)
def describe(self):
"""Describe calibration info as a string. Use human-understandable
metrics."""
image_size = (self.image_size or self._DEFAULT_IMAGE_SIZE)
aperture = self._DEFAULT_SENSOR_SIZE
assert self.camera_matrix is not None, 'Calibration not loaded'
# Center coordinates -- in percent, with (0, 0) being image center
c_x = (self.camera_matrix[0, 2] *1.0 / image_size[0] - 0.5) * 100.0
c_y = (self.camera_matrix[1, 2] *1.0 / image_size[1] - 0.5) * 100.0
# f_x/f_y - if object size is same a distance to object, how much of a
# frame will it take? in percent
f_x = self.camera_matrix[0, 0] * 100.0 / image_size[0]
f_y = self.camera_matrix[1, 1] * 100.0 / image_size[1]
fov_x, fov_y, focal_len, principal, aspect = \
cv2.calibrationMatrixValues(self.camera_matrix, image_size,
aperture[0], aperture[0])
fixups = self.fixups
return ("FOV(deg)=({fov_x:.1f}/{fov_y:.1f}) "
"principal=({principal[0]:.1f}/{principal[1]:.1f}) "
"center=({c_x:.1f},{c_y:.1f})% "
"focal_len=({f_x:.1f},{f_y:.1f})% "
"focal_len_mm={focal_len:.2f} aspect={aspect:.3f} "
"fixups=({fixups[0]:.2f},{fixups[1]:.2f})"
).format(**locals())
def to_world2d(self, uv_pos, image_size=None):
"""Given a list of pixel positions (u, v) (correspond to camera_x and
camera_y), rectify and return world 2d coordinates (x_p=X/Z and
y_p=Y/Z, assuming origin is at camera center, and Z axis is along
camera optical axis)
"""
if image_size is None:
image_size = self.image_size
# Scale to image size during calibration
cam_pts = [
[[pos_x * self.image_size[0] * 1.0 / image_size[0],
pos_y * self.image_size[1] * 1.0 / image_size[1]]]
for (pos_x, pos_y) in uv_pos]
cam_pts_np = numpy.array(cam_pts, dtype=numpy.float32)
world_pts = cv2.undistortPoints(cam_pts_np,
self.camera_matrix, self.dist_coeffs)
#print 'undistort debug:', cam_pts, world_pts
return [(itm[0][0] * self.axis_scale[0],
itm[0][1] * self.axis_scale[1]) for itm in world_pts]
def from_world2d(self, ptlist, image_size=None):
world_pts = numpy.array(
[ (x / self.axis_scale[0],
y / self.axis_scale[1], 1) for (x, y) in ptlist ])
# No rotation or translation
rvec = tvec = (0, 0, 0)
img_pts, jacobian = cv2.projectPoints(
world_pts, rvec, tvec, self.camera_matrix, self.dist_coeffs)
if image_size is None:
scale_x, scale_y = 1.0, 1.0
else:
scale_x = 1.0 * image_size[0] / self.image_size[0]
scale_y = 1.0 * image_size[1] / self.image_size[1]
return [(itm[0][0] * scale_x, itm[0][1] * scale_y) for itm in img_pts]
if __name__ == '__main__':
cc = CameraCalibration()
import sys
if len(sys.argv) > 1:
cc.setup_yaml(sys.argv[1])
print cc.describe()
orig = ((0, 0),
(cc.image_size[0] * 0.5, cc.image_size[1] * 0.5),
cc.image_size)
distorted = cc.to_world2d(orig)
back = cc.from_world2d(distorted)
print 'transform test:'
for (orig_i, distorted_i, back_i) in zip(orig, distorted, back):
print ' (%8.3f, %8.3f) -> (%8.5f, %8.5f) -> (%8.3f, %8.3f)' % (
orig_i + distorted_i + back_i)
```
#### File: mjmech/video-ui/vui_helpers.py
```python
import functools
import logging
import os
import re
import signal
import sys
import time
import traceback
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../legtool/'))
import trollius as asyncio
import gbulb
# common helpers for vclient.py and vserver.py
g_quit_handlers = list()
class FCMD(object):
"""Fire_cmd constants"""
off = 0 # Motor disabled
inpos1 = 1 # Fire 1 shot when inposition
inpos2 = 2 # Fire 2 shots when inposition
inpos3 = 3 # Fire 3 shots when inposition
inpos5 = 5 # Fire 5 shots when inposition
now1 = 11 # Fire 1 shot immediately
cont = 20 # Keep firing for some time
@classmethod
def _numshots(cls, x):
if x in [cls.off, cls.cont]:
return 0
elif x in [cls.inpos1, cls.now1]:
return 1
elif x == cls.inpos2:
return 2
elif x == cls.inpos3:
return 3
elif x == cls.inpos5:
return 5
assert False, 'Unknown FCMD input: %r' % (x, )
@classmethod
def _is_inpos(cls, x):
return x in [cls.inpos1, cls.inpos2, cls.inpos3, cls.inpos5]
_error_logger = logging.getLogger('fatal-error')
def wrap_event(callback):
"""Wrap event callback so the app exit if it crashes"""
def wrapped(*args, **kwargs):
try:
return callback(*args, **kwargs)
except BaseException as e:
_error_logger.error("Callback %r crashed:", callback)
_error_logger.error(" %s %s" % (e.__class__.__name__, e))
for line in traceback.format_exc().split('\n'):
_error_logger.error('| %s', line)
for cb in g_quit_handlers:
cb()
raise
return wrapped
@wrap_event
def _sigint_handler():
# wrap_event decorator will make sure the exception stops event loop.
raise Exception('Got SIGINT')
@wrap_event
def _sigterm_handler():
# wrap_event decorator will make sure the exception stops event loop.
raise Exception('Got SIGTERM')
def CriticalTask(coro, exit_ok=False):
"""Just like asyncio.Task, but if it ever completes, the program
exits. (unless @p exit_ok is True, in which case non-exception
exit is ok)
"""
task = asyncio.Task(coro)
task.add_done_callback(
functools.partial(_critical_task_done, exit_ok=exit_ok))
return task
@wrap_event
def _critical_task_done(task, exit_ok=False):
if exit_ok and task.done() and (task.exception() is None):
# No errors
return
# Reach inside task's privates to get exception traceback
# (if this fails for some reason, wrap_event will terminate us anyway)
logger = logging.getLogger('fatal-error')
if task._tb_logger:
tb_text = task._tb_logger.tb
if task._tb_logger.source_traceback:
# Do not care
tb_text.append('Task creation information not shown')
else:
tb_text = ['No traceback info']
_error_logger.error("Critical task (%r) exited:", task._coro)
e = task.exception()
if e is None:
_error_logger.error('Successful (but unexpected) exit')
else:
_error_logger.error(" %s %s" % (e.__class__.__name__, e))
for line in ''.join(tb_text).split('\n'):
_error_logger.error('| %s', line.rstrip())
# Terminate program
for cb in g_quit_handlers:
cb()
_INVALID_CHARACTER_RE = re.compile('[^\x20-\x7E]')
def sanitize_stdout(line):
"""Strip newline from end of line, call repr if any nonprintables
left ater that.
"""
if line.endswith('\n'):
line = line[:-1]
if _INVALID_CHARACTER_RE.search(line):
return repr(line)
return line
@asyncio.coroutine
def dump_lines_from_fd(fd, print_func):
"""Given a file descriptor (integer), asyncronously read lines from it.
Sanitize each line and pass as a sole argument to @p print_func.
"""
fdobj = os.fdopen(fd, 'r')
loop = asyncio.get_event_loop()
reader = asyncio.streams.StreamReader(loop=loop)
transport, _ = yield asyncio.From(loop.connect_read_pipe(
lambda: asyncio.streams.StreamReaderProtocol(reader),
fdobj))
while True:
line = yield asyncio.From(reader.readline())
if line == '': # EOF
break
print_func(sanitize_stdout(line))
transport.close()
def asyncio_misc_init():
asyncio.set_event_loop_policy(gbulb.GLibEventLoopPolicy())
main_loop = asyncio.get_event_loop()
main_loop.add_signal_handler(signal.SIGINT, _sigint_handler)
main_loop.add_signal_handler(signal.SIGTERM, _sigterm_handler)
g_quit_handlers.append(
lambda: main_loop.call_soon_threadsafe(main_loop.stop))
def add_pair(a, b, scale=1.0):
return (a[0] + b[0] * scale,
a[1] + b[1] * scale)
def logging_init(verbose=True):
root = logging.getLogger()
root.setLevel(logging.DEBUG)
# Code below is like basicConfig, but we do not apply limits on loggers;
# instead we apply them on handlers.
outhandler = logging.StreamHandler()
outhandler.setFormatter(
logging.Formatter(
fmt=("%(asctime)s.%(msecs).3d [%(levelname).1s]"
" %(name)s: %(message)s"),
datefmt="%T"))
root.addHandler(outhandler)
if not verbose:
outhandler.setLevel(logging.INFO)
class MemoryLoggingHandler(logging.Handler):
"""Handler that just appends data to python array.
The elements are tuples:
(time, level, logger_name, message)
"""
SHORT_LEVEL_NAMES = {
logging.CRITICAL: 'C',
logging.ERROR: 'E',
logging.WARNING: 'W',
logging.INFO: 'I',
logging.DEBUG: 'D',
}
def __init__(self, install=False, max_records=10000):
logging.Handler.__init__(self)
self.data = list()
self.max_records = max_records
self.on_record = list()
self.last_time = 0
if install:
logging.getLogger().addHandler(self)
def emit(self, record):
"""Part of logging.Handler interface"""
ts = record.created
if ts <= self.last_time:
# timestamp must always increase
ts = self.last_time + 1.0e-6
self.last_time = ts
self.data.append(
(ts,
record.levelno,
record.name,
record.getMessage()))
while len(self.data) > self.max_records:
self.data.pop(0)
for cb in self.on_record:
cb()
@staticmethod
def to_dict(mtuple, time_field='time'):
"""Given a 4-tuple, convert it to dict"""
return {
time_field: mtuple[0],
'levelno': mtuple[1],
'name': mtuple[2],
'message': mtuple[3]}
@classmethod
def to_string(cls, mtuple):
"""Given a 4-tuple, convert it to string (default formatted)
"""
return "%s [%s] %s: %s" % (
time.strftime("%T", time.localtime(mtuple[0])),
cls.SHORT_LEVEL_NAMES.get(mtuple[1], mtuple[1]),
mtuple[2], mtuple[3])
@staticmethod
def relog(mtuple, delta_t=0, prefix=''):
"""Given a 4-tuple, re-log it to local logger"""
# NOTE: this igores whole logger hierarchy. If we ever use it, pass a
# name here.
root = logging.getLogger()
assert len(mtuple) == 4
rec = root.makeRecord(
prefix + mtuple[2], mtuple[1], 'remote-file', -1, mtuple[3],
[], None, 'remote-func', None)
# Override time. There is no better way.
ct = delta_t + mtuple[0]
rec.created = ct
rec.msecs = (ct - long(ct)) * 1000
rec.relativeCreated = (rec.created - logging._startTime) * 1000
# Dispatch.
root.handle(rec)
```
#### File: workspace/clang/repository.bzl
```python
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def clang_repository():
http_archive(
name = "org_llvm_clang",
urls = [
"https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz",
],
sha256 = "b25f592a0c00686f03e3b7db68ca6dc87418f681f4ead4df4745a01d9be63843",
strip_prefix = "clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04",
build_file = Label("//tools/workspace/clang:package.BUILD"),
)
http_archive(
name = "org_llvm_libcxx",
urls = [
"https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/libcxx-10.0.0.src.tar.xz",
],
sha256 = "270f8a3f176f1981b0f6ab8aa556720988872ec2b48ed3b605d0ced8d09156c7",
strip_prefix = "libcxx-10.0.0.src",
build_file = Label("//tools/workspace/clang:libcxx.BUILD"),
)
http_archive(
name = "org_llvm_libcxxabi",
urls = [
"https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/libcxxabi-10.0.0.src.tar.xz",
],
sha256 = "e71bac75a88c9dde455ad3f2a2b449bf745eafd41d2d8432253b2964e0ca14e1",
strip_prefix = "libcxxabi-10.0.0.src",
build_file = Label("//tools/workspace/clang:libcxxabi.BUILD"),
)
``` |
{
"source": "2vin/pg_voice",
"score": 2
} |
#### File: pg_voice/scripts/wake.py
```python
import os
import sys
scriptpath = "../binding/python"
# Add the directory containing your module to the Python path (wants absolute paths)
sys.path.append(os.path.abspath(scriptpath))
# Do the import
import porcupine
## Path to Porcupine's C library available under lib/${SYSTEM}/${MACHINE}/
library_path = '../lib/linux/x86_64/libpv_porcupine.so'
## It is available at lib/common/porcupine_params.pv
model_file_path = '../lib/common/porcupine_params.pv'
## Path to single or multiple .ppn files
keyword_file_paths = ['../ppn/p and g_linux.ppn', '../ppn/tangent_linux.ppn', '../ppn/tint click_linux.ppn']
## Within [0, 1]. A higher sensitivity reduces miss rate at cost of increased false alarm rate
sensitivities = [0.5, 0.4, 0.5]
handle = porcupine.Porcupine(library_path, model_file_path, keyword_file_paths=keyword_file_paths, sensitivities=sensitivities)
def get_next_audio_frame():
handle.sample_rate()
handle.frame_length()
pass
while True:
pcm = get_next_audio_frame()
keyword_index = handle.process(pcm)
if keyword_index >= 0:
# detection event logic/callback
print("detected")
pass
handle.delete()
``` |
{
"source": "2waybene/GwasJP",
"score": 2
} |
#### File: GwasJP/accord/accordAnalysis.py
```python
import sys
import shlex
import subprocess as sp
import os
from ..utils import statFittings, createSlurmJob, commonVariantAnalysis
from ..wrappers import gctaCalls,plinkCalls,smartpcaCalls
def modelSetupDirectories (fullPath, prerequisitesdir , projectname):
print ("Copying presequiste files")
cmdTmp = "cp " + prerequisitesdir+"/forced_covars.txt " + str(fullPath)
sp.call(cmdTmp, shell=True)
cmdTmp = "cp " + prerequisitesdir+"/starting_covars.txt " + str(fullPath)
sp.call(cmdTmp, shell=True)
cmdTmp = "cp " + prerequisitesdir+"/phenotypes.txt " + str(fullPath)
sp.call(cmdTmp, shell=True)
cmdTmp = "cp " + prerequisitesdir+"/pheno_data_rhtn.txt " + str(fullPath)
sp.call(cmdTmp, shell=True)
print ("Creating directories for " + str(fullPath) + " for project " + str(projectname))
phenotype = str(fullPath) + "/phenotypes.txt"
f = open(phenotype, 'r')
phenoname = f.readline().strip()
print("phenoname is " + str(phenoname) + "\n")
#f.close()
creatingDirs (fullPath, phenoname)
print ("Finished directory setup, ready for analysis\n")
def modelStep1 (filepath, phenotype , Rdir , bFileInit ):
print ("****** Begin JOB:' " + str(filepath) + "'")
print ("****** This is the phenotype data info:' " + str(phenotype) + "'")
#for path in filepath :
print ('*************************************')
print ('This is the working path entered from the user:', str(filepath))
# creatingDirs (filepath, phenoname)
##============================================================
## command 0: replacing model_setup_step1.sh
##============================================================
## prepare a file pheontypes.txt
# phenotypes = filepath + "/" + "phenotypes.txt"
# f = open(phenotype, 'w')
#f.write(phenoname + "\n")
#f.close()
## ON Bioinformatic cluster at NIEHS
##============================================================
## command 1: used to be pheno_data_step1.r
## Now located: /ddn/gs1/home/li11/local/accord/bin/pheno_data_step1.r
##============================================================
outputFile = filepath + "/pheno_data/pheno_data_step1.txt"
'''
cmd1 = "R --slave --vanilla --file=/ddn/gs1/home/li11/local/accord/bin/pheno_data_step1.r --args " + filepath + " " + phenotype + " " + outputFile
sp.call(cmd1, shell=True)
'''
cmd0 = "R --slave --vanilla --file=" + str(Rdir) + "/pheno_data_step1.r --args " + filepath + " " + phenotype + " " + outputFile
##============================================================
## command 2-- : used to be time ./bin/relatedness.sh $p
# echo;echo "Compute relatedness (bin/relatedness.sh)"
# Now, re-write the file as command 2--
##=============================================================
keptOut = filepath + "/relatedness/keep.txt"
cmd1 = "cut -f 1-2 <(tail -n +2 " + outputFile + ") > " + keptOut
##=============================================================
#for plink
##=============================================================
# bFile = "/ddn/gs1/home/li11/local/accord/data/geno_data/unc.jj/post_qc.v3"
outDir = filepath + "/relatedness/data"
cmd2 = "plink --bfile " + bFileInit + " --keep " + keptOut+ " --silent --noweb --recode --make-bed --out " + outDir
##=============================================================
#for kingship: king
##=============================================================
bedFile = filepath + "/relatedness/data.bed"
kPrefix = filepath + "/relatedness/king"
kLog = filepath + "/relatedness/king.log"
cmd3 = "king -b " + bedFile + " --kinship --related --degree 5 --prefix " + kPrefix+ " > " + kLog
##=============================================================
#for Compute and plot relatedness
##=============================================================
cmd4 = "R --slave --vanilla --file=" + str(Rdir) + "/relatedness_plot.r --args "+ filepath
cmd5 = "R --slave --vanilla --file=" + str(Rdir) + "/relatedness_discard.r --args " + filepath
# Filter SNPs in LD
##=============================================================
#for two plink analysis
##=============================================================
bFile = filepath + "/relatedness/data"
rmFile = filepath + "/relatedness/discard.txt"
outDir = filepath + "/pca/data_maf_r2"
cmd6 = "plink --bfile " + bFile + " --remove " + rmFile+ " --maf 0.01 --indep 50 5 1.5 --silent --noweb --out " + outDir
extractFile = filepath + "/pca/data_maf_r2.prune.in"
outPruned = filepath + "/pca/data_pruned"
cmd7 = "plink --bfile " + bFile + " --remove " + rmFile+ " --extract " + extractFile+ " --recode12 --transpose --silent --noweb --out " + outPruned
cmd8 = "R --slave --vanilla --file=" + str(Rdir) + "/pca_ind.r --args " + filepath + " " + phenotype
##======================================================================
## two parts of awk script to parse files after pca
##======================================================================
outPrunedTped = filepath + "/pca/data_pruned.tped"
snpFile = filepath + "/pca/snp.txt"
cmd9 = "awk '{print $2\"\\t\"$1\"\\t0.0\\t\"$4}' " + outPrunedTped + " > " + snpFile
# sp.call(cmdTemp, shell=True, executable="/bin/bash")
genoFile = filepath + "/pca/geno.txt"
cmd10 = "awk '{for (i=5;i<=NF;i=i+2) {j=i+1;v=$i+$j-2;if (v==-2) printf \"%d\",9;else printf \"%d\",v;};printf \"\\n\";}' " + outPrunedTped + " > " + genoFile
# sp.call(cmdTemp, shell=True, executable="/bin/bash")
# Compute PCs
oPCA = filepath + "/pca/result.pca"
pPCA = filepath + "/pca/result.plot"
ePCA = filepath + "/pca/result.eval"
lPCA = filepath + "/pca/result.log"
#cmd11 = "perl /ddn/gs1/home/li11/local/accord/bin/smartpca.perl -i " + genoFile + " -a " + snpFile + " -b " + filepath + "/pca/ind.txt" + " -k 10 -o " + oPCA \
# + " -p " + pPCA + " -e " + ePCA + " -l " + lPCA + " -m 0 -t 5 -s 6.0"
cmd11 = "smartpca.perl -i " + genoFile + " -a " + snpFile + " -b " + filepath + "/pca/ind.txt" + " -k 10 -o " + oPCA \
+ " -p " + pPCA + " -e " + ePCA + " -l " + lPCA + " -m 0 -t 5 -s 6.0"
# Plot PCs
cmd12 = "R --slave --vanilla --file=" + str(Rdir) + "/pca_plot.r --args " + filepath
##======================================================================
## String the commands get a slurm file to submit to bioinfo cluster
##======================================================================
commands = [cmd0,cmd1,cmd2,cmd3,cmd4,cmd5,cmd6,cmd7,cmd8,cmd9,cmd10,cmd11,cmd12]
jobName = "modelsetupstep1"
slurmSbatchFile="modelsetupstep1.sh"
## create a temporary sbatch file to submit
(f,d) = createSlurmJob.getASLURMJob (slurmSbatchFile , jobName, commands, filepath)
print (f)
print(d)
cmd = "sbatch --partition=bioinfo --cpus-per-task=8 " + f
sp.call(cmd, shell=True)
def creatingDirs (filepath, phenoname):
dirBatch1 = ["association_cv",
"association_cv/imputed_chunks",
"association_cv/imputed_chunks/imputed_chunks_forMeta",
"association_rv",
"cluster_plots",
"gcta",
"outputs",
"outputs/gc",
"pca",
"peak_data",
"pheno_data",
"relatedness",
"sbatch_logs",
"reg_plots"]
dirBatch2 = [
"reg_plots/" +phenoname + "_call",
"reg_plots/" +phenoname + "_call_bar",
"reg_plots/" +phenoname + "_dosage",
"reg_plots/" +phenoname + "_dosage_bar"
]
dirs2make = []
for dir in dirBatch1:
dirs2make.append(filepath+"/"+dir)
for dir in dirBatch2:
dirs2make.append(filepath+"/"+dir)
for dir in dirs2make:
if (os.path.isdir(dir) == False):
try:
os.mkdir(dir)
print("Directory '% s' created" % dir)
except OSError as error:
print(error)
def checkDirectories (filepath, phenoname):
dirBatch1 = ["association_cv",
"association_cv/imputed_chunks",
"association_cv/imputed_chunks/imputed_chunks_forMeta",
"association_rv",
"cluster_plots",
"gcta",
"outputs",
"outputs/gc",
"pca",
"peak_data",
"pheno_data",
"relatedness",
"sbatch_logs",
"reg_plots"]
dirBatch2 = [
"reg_plots/" +phenoname + "_call",
"reg_plots/" +phenoname + "_call_bar",
"reg_plots/" +phenoname + "_dosage",
"reg_plots/" +phenoname + "_dosage_bar"
]
dirs2check = []
for dir in dirBatch1:
dirs2check.append(filepath+"/"+dir)
for dir in dirBatch2:
dirs2check.append(filepath+"/"+dir)
for dir in dirs2check:
if (os.path.isdir(dir) == False):
print ("this is dir: " + str(dir) + "\n")
return (1)
return(0)
def modelStep2 (filepath, Rdir, bFileInit):
print ("****** Begin JOB:' " + str(filepath) + "'")
#for path in filepath :
print ('*************************************')
print ('This is the working path entered from the user:', str(filepath))
## Create system command
#--file=/ddn/gs1/home/li11/local/accord/bin/pca_plot.r -
#echo "Remove designated covars and related individuals. Add first 10 PCs..."
# Remove selected covars and related individuals. Add first 10 PCs.
cmd1 = "R --slave --vanilla --file=" + str(Rdir) + "/pheno_data_step2.r --args " + filepath
# Perform log transformation on pheno_data_step2.txt. Creates histograms and replaces vals in d4m cols
#R --slave --vanilla --file=bin/rotroff_scripts/log_transform_and_hist_v1.R --args $p
#echo "Create modeltypes.txt. If only unique(phenotype values)=2, then logistic model is chosen..."
## Create a file called modeltypes.txt which explains the model type for each line of phenotypes.txt (lm or glm models)
cmd2 = "R --slave --vanilla --file=" + str(Rdir) + "/create.model.types.r --args " + filepath
#echo "Perform backwards selection on covars..."
# Backwards select non-forced covars. Create pheno files for R script, PLINK, and GCTA
rSourceFile = str(Rdir) + "/load_pheno_data.r"
cmd3 = "R --slave --vanilla --file=" + str(Rdir) + "/covar_backwards_selection_BIC.r --args " + \
filepath + " " + rSourceFile
#echo "Create samplelist.txt and frequency file..."
# Create sample list and frequency file
outputFile = filepath + "/pheno_data/pheno_data_step2.txt"
sampleList = filepath + "/pheno_data/sample_list.txt"
cmd4 = "cut -f1,2 <(tail -n +2 " + outputFile + ") > " + sampleList
#$p/pheno_data/pheno_data_step2.txt) > $p/pheno_data/sample_list.txt
# bFile = "/home/accord/data/geno_data/post_qc.unc.uva.merged"
plinkCV = filepath + "/association_cv/plink"
cmd5 = "plink --bfile " + bFileInit + " --keep " + sampleList + " --silent --freq --out " + plinkCV
#$p/association_cv/plink
##======================================================================
## String the commands get a slurm file to submit to bioinfo cluster
##======================================================================
commands = [cmd1,cmd2,cmd3,cmd4,cmd5]
jobName = "modelsetupstep2"
slurmSbatchFile="modelsetupstep2.sh"
## create a temporary sbatch file to submit
(f,d) = createSlurmJob.getASLURMJob (slurmSbatchFile , jobName, commands, filepath)
print (f)
print(d)
cmd = "sbatch --partition=bioinfo --cpus-per-task=8 " + f
sp.call(cmd, shell=True)
def heritabilityTest (filepath, sampleList, phenotype, p , genoTypeData):
print ("****** Begin JOB:' " + str(filepath) + "'")
#for path in filepath :
print ('*************************************')
print ('This is the working path entered from the user:', str(filepath))
## Create system command
## ON NCSU cluter server
# cmd = 'sbatch -p standard -o '+ genoTypeData +' --keep " /sbatch_logs/gcta.out ./bin/run_gcta.sh ' + filepath
outputPath = filepath + "/gcta/out"
cmd1 = "gcta64 --bfile " + genoTypeData + " --keep " + sampleList + " --autosome --make-grm --out " + outputPath
pheno = filepath + "/gcta/pheno_" + phenotype + ".txt"
dcov = filepath + "/gcta/dcovar_" + phenotype + ".txt"
qcov = filepath + "/gcta/qcovar_" + phenotype + ".txt"
outdir = filepath + "/gcta/out_" + phenotype
cmd2 = "gcta64 --reml --grm " + outputPath + " --thread-num " + str(p) + " --pheno " + pheno + " --covar " + dcov \
+ " --qcovar " + qcov + " --out " + outdir
commands = [cmd1,cmd2]
jobName = "heritability"
slurmSbatchFile="accordHeritability.sh"
## create a temporary sbatch file to submit
(f,d) = createSlurmJob.getASLURMJob (slurmSbatchFile , jobName, commands,filepath)
print (f)
print(d)
cmd = "sbatch --partition=bioinfo --cpus-per-task=8 " + f
sp.call(cmd, shell=True)
## on Bioinfomatic slurm
## cmd = "srun --partition=bioinfo --cpus-per-task=8 -o " + filepath + "/sbatch_logs/gcta.out ./bin/run_gcta.sh " + filepath
# print (cmd)
# sp.call(cmd, shell=True)
print ("Launching launchHeritability step 1 of 3:" + cmd)
print ("Check the job status with command: squeue ")
def common_variant_analysis_genotyped (filepath, phenosFile, modelsFile, snplistFile , bFile):
# accord.common_variant_analysis_genotyped (fullPath, phenoname, modelfile, snplistFile)
'''
Current working path: RHTN_testRun/rhtn_combined/
Launching logistic model for phenotype RHTN:
sbatch -p bigmem -o RHTN_testRun/rhtn_combined//sbatch_logs/chr0.RHTN.out ./bin/model.eval.cv.genotyped.sh RHTN_testRun/rhtn_combined/ RHTN logistic False
Submitted batch job 1498222
'''
print ("****** Begin JOB: Genotyped Common Variant Analysis ******")
print ("Here is the file path: " + str(filepath) )
# phenosFile = filepath + "/" + str(phenosFile)
# snplistFile = filepath + "/" + str(snplistFile)
phenos = [line.strip() for line in open(phenosFile, 'r')]
models = [line.strip() for line in open(modelsFile, 'r')]
# snplist = os.path.isfile(snplistFile)
commands =[]
clusterJobs = []
## For each phenotype/modeltype, launch common variant analysis
for i,pheno in enumerate(phenos):
## modeltype is passed as a parameter to the bash script
print ("This is this the i: " + str(i))
print ("This is the phenotype: " + pheno )
cmdTemp = [commonVariantAnalysis.modelEvalCVGenotyped (filepath, pheno, models, snplistFile, bFile)]
## using Default genotypeFle ="/home/accord/data/geno_data/post_qc.unc.uva.merged")
jobName = "GenotypedCommonVariant" + str(i)
slurmSbatchFile="GenotypedCommonVariant" + str(i) + ".sh"
## create a temporary sbatch file to submit
(f,d) = createSlurmJob.getASLURMJob (slurmSbatchFile , jobName, cmdTemp, filepath )
print (f)
print(d)
clusterJobs.append(f)
print ("Launching impute common variant analysis step 3 of 3:")
print ("Check the job status with command: squeue ")
for job in clusterJobs:
cmd = "sbatch --partition=highmem --cpus-per-task=8 " + job
sp.call(cmd, shell=True)
def common_variant_analysis_imputed (filepath, phenosFile, modelsFile, snplistFile, bFile):
# phenosFile = filepath + "/" + str(phenosFile)
#modelsFile = filepath + "/" + str(modelsFile)
# snplistFile = filepath + "/" + str(snplistFile)
#phenos = [line.strip() for line in open(phenosFile, 'r')]
#models = [line.strip() for line in open(modelsFile, 'r')]
phenos = [line.strip() for line in open(phenosFile, 'r')]
models = [line.strip() for line in open(modelsFile, 'r')]
chroms = list(range(1, 23, 1)) + list(range(101, 113, 1))
print (chroms)
commands =[]
clusterJobs = []
'''
for i,pheno in enumerate(phenos):
## modeltype is passed as a parameter to the bash script
for chrm in chroms:
print ("This is this the i: " + str(i))
print ("This is the phenotype: " + pheno )
cmdTemp = [commonVariantAnalysis.modelEvalCVGenotyped (filepath, pheno, models, snplistFile)]
## using Default genotypeFle ="/home/accord/data/geno_data/post_qc.unc.uva.merged")
cmdTemp = ' '.join(('sbatch -x node[1-9] -o '+path+'/sbatch_logs/chr'+str(chrm)+'.cv.for.meta.'+pheno+
'.out ./bin/model_eval_cv_imputed.for.meta.sh',
path,str(chrm),models[i]))
jobName = "GenotypedCommonVariant" + str(i)
slurmSbatchFile="GenotypedCommonVariant" + str(i) + ".sh"
## create a temporary sbatch file to submit
(f,d) = createSlurmJob.getASLURMJob (slurmSbatchFile , jobName, cmdTemp, filepath )
print (f)
print(d)
clusterJobs.append(f)
cmd = ' '.join(('sbatch -x node[1-11] -o '+path+'/sbatch_logs/chr'+str(chrm)+'.cv.'+pheno+
'.out ./bin/model_eval_cv_imputed.sh',
path,str(chrm),models[i]))
print 'Launching full imputed analysis',models[i],'model for phenotype',pheno+':\n',cmd
## Split cmd for shell
split_cmd = shlex.split(cmd)
print ("This is this the i: " + str(i))
print ("This is the phenotype: " + pheno )
cmdTemp = [commonVariantAnalysis.modelEvalCVGenotyped (filepath, pheno, models, snplistFile)]
## using Default genotypeFle ="/home/accord/data/geno_data/post_qc.unc.uva.merged")
jobName = "GenotypedCommonVariant" + str(i)
slurmSbatchFile="GenotypedCommonVariant" + str(i) + ".sh"
## create a temporary sbatch file to submit
(f,d) = createSlurmJob.getASLURMJob (slurmSbatchFile , jobName, cmdTemp, filepath )
print (f)
print(d)
clusterJobs.append(f)
## Impute Analysis for Meta Analysis
cmd = ' '.join(('sbatch -x node[1-9] -o '+path+'/sbatch_logs/chr'+str(chrm)+'.cv.for.meta.'+pheno+
'.out ./bin/model_eval_cv_imputed.for.meta.sh',
path,str(chrm),models[i]))
print 'Launching imputed analysis for meta',models[i],'model for phenotype',pheno+':\n',cmd
## Split cmd for shell
split_cmd = shlex.split(cmd)
## Launch command
sp.call(split_cmd)#,stdout=log_file,stderr=logerr_file)
'''
print ("Launching impute common variant analysis step 3 of 3:")
print ("Check the job status with command: squeue ")
for job in clusterJobs:
cmd = "sbatch --partition=highmem --cpus-per-task=8 " + job
sp.call(cmd, shell=True)
print ("Check the job status with command: squeue ")
def cleanupImpuCommVarData (fullPath, phenotype, modelfile, selectedsnp):
print ("Error checking and cleaning up data")
def metaAnalysis (filepath, phenosFile, modelsFile, snplistFile = None):
#FIXME need to test
phenosFile = filepath + "/" + str(phenosFile)
modelsFile = filepath + "/" + str(modelsFile)
# snplistFile = filepath + "/" + str(snplistFile)
phenos = [line.strip() for line in open(phenosFile, 'r')]
models = [line.strip() for line in open(modelsFile, 'r')]
snplist = os.path.isfile(filepath+'/snp_list.txt')
## For each phenotype/modeltype, launch common variant analysis
for i,pheno in enumerate(phenos):
## modeltype is passed as a parameter to the bash script
cmd = ' '.join(('sbatch -p standard -o '+filepath+'/sbatch_logs/runMetaAnalysis.'+pheno+
'.out ./bin/runPlinkMeta.sh',filepath,pheno,models[i],str(snplist)))
'''
#this is runPlinkMeta.sh
if [ "$model" == "linear" ]
then
plink --meta-analysis $p/association_cv/allChrImputed_forMetaAnalysis.$pheno.assoc $p/association_cv/chr0.$pheno.assoc.$model + qt --silent --noweb --out $p/association_cv/plink_meta_$pheno
echo 'plink --meta-analysis '$p'/association_cv/allChrImputed_forMetaAnalysis.'$pheno'.assoc '$p'/association_cv/chr0.'$pheno'.assoc.'$model' + qt --silent --noweb --out '$p'/association_cv/plink_meta_'$pheno
else ## logistic
plink --meta-analysis $p/association_cv/allChrImputed_forMetaAnalysis.$pheno.assoc $p/association_cv/chr0.$pheno.assoc.$model --silent --noweb --out $p/association_cv/plink_meta_$pheno
echo 'plink --meta-analysis '$p'/association_cv/allChrImputed_forMetaAnalysis.'$pheno'.assoc '$p'/association_cv/chr0.'$pheno'.assoc.'$model' + logscale --silent --noweb --out '$p'/association_cv/plink_meta_'$pheno
'''
# print 'Launching meta analysis for phenotype',pheno+':\n',cmd
## Split cmd for shell
split_cmd = shlex.split(cmd)
## Launch command
sp.call(split_cmd)#,stdout=log_file,stderr=logerr_file)
print ("needs to implement the meta analysis")
def getPlotting (fullPath, phenotype, modelfile, selectedsnp):
print ("implement all the plottings ")
def rareVariantAnalysis(fullPath, phenotype, modelfile, selectedsnp):
print ("Rare Variant Analysis")
```
#### File: GwasJP/utils/statFittings.py
```python
import math
import numpy
def gaussian(x, mu, sig):
'''
Gaussian function.
'''
return (1 / math.sqrt(2 * math.pi * sig ** 2)) * \
numpy.exp(-(x - mu) ** 2 / (2 * sig ** 2))
def logistic(x, x0, L, M, k):
'''
Logistic function.
'''
return M + (L / (1 + numpy.exp(-k * (x - x0))))
def entropy (num1, num2):
percnt1 = num1/(num1+num2)
percnt2 = num2/(num1+num2)
return (-(percnt1*math.log2(percnt1) + percnt2*math.log2(percnt2)))
if __name__ == "__main__":
print(entropy(5,9))
print(entropy(3,2))
print(entropy(2,3))
``` |
{
"source": "2wendex2/IDM",
"score": 2
} |
#### File: IDM/idm/routes.py
```python
from flask import Flask, redirect, request, render_template
from .objects import Event, dp, DB
from vkapi import VkApi
from hashlib import md5
import traceback
import json
app = Flask(__name__)
@app.route('/')
def index():
db = DB()
return render_template('pages/index.html', installed=db.installed)
@app.route('/install')
def install():
db = DB()
return render_template('pages/install.html', installed=db.installed)
@app.route('/api/<string:method>', methods=["POST"])
def api(method: str):
db = DB()
if method == "setup_idm":
if db.installed:
return redirect('/')
local_db = DB()
local_db.owner_id = int(request.form.get('owner_id', None))
local_db.secret = request.form.get('secret').lower()
local_db.access_token = request.form.get('access_token', None)
local_db.online_token = request.form.get('online_token', None) if request.form.get('online_token',
None) != '' else None
local_db.me_token = request.form.get('me_token', None) if request.form.get('me_token', None) != '' else None
local_db.bp_token = request.form.get('bp_token', None) if request.form.get('bp_token', None) != '' else None
local_db.vk_app_id = int(request.form.get('vk_app_id', None))
local_db.vk_app_secret = request.form.get('vk_app_secret', None)
local_db.host = request.form.get('host', None)
local_db.installed = True
local_db.trusted_users.append(local_db.owner_id)
local_db.duty_id = VkApi(local_db.access_token)('users.get')[0]['id']
local_db.trusted_users.append(local_db.duty_id)
db = local_db
db.save()
return redirect('/login?next=/')
if method == "edit_bot":
if request.form.get('uid', None) is None:
return redirect('/login?next=/admin')
uid = int(request.form.get('uid', None))
token = request.form.get('token', None)
if uid != db.owner_id and uid != db.duty_id:
return redirect('/')
if md5(f"{db.vk_app_id}{uid}{db.vk_app_secret}".encode()).hexdigest() != token:
return redirect('/login?next=/admin')
db.secret = request.form.get('secret', '').lower()
access_token = request.form.get('access_token', None)
online_token = request.form.get('online_token', None)
bp_token = request.form.get('bp_token', None)
me_token = request.form.get('me_token', None)
if access_token is not None and access_token != '' and '*' not in access_token:
db.access_token = access_token
if online_token is not None and online_token != '' and '*' not in online_token:
db.online_token = online_token
if bp_token is not None and bp_token != '' and '*' not in bp_token:
db.bp_token = bp_token
if me_token is not None and me_token != '' and '*' not in me_token:
db.me_token = me_token
db.save()
return redirect('/admin')
if method == "reset":
secret = request.form.get('secret', None)
if secret == db.secret:
db.installed = False
db.chats = {}
db.trusted_users = []
db.owner_id = 0
db.duty_id = 0
db.vk_app_id = 0
db.vk_app_secret = ""
db.host = ""
db.secret = ""
db.access_token = None
db.online_token = None
db.me_token = None
db.bp_token = None
db.save()
return redirect('/')
return "ok"
@app.route('/admin')
def admin():
def get_musk(token: str) -> str:
if token is None or len(token) != 85: return ""
return token[:4] + "*" * 77 + token[81:]
db = DB()
uid = request.cookies.get('uid', 0)
token = request.cookies.get('token', None)
if not db.installed:
return redirect('/install')
if request.cookies.get('uid', None) is None:
return redirect('/login?next=/admin')
if int(request.cookies.get('uid', 0)) != db.owner_id and int(request.cookies.get('uid', 0)) != db.duty_id:
return redirect('/')
if md5(f"{db.vk_app_id}{uid}{db.vk_app_secret}".encode()).hexdigest() != token:
return redirect('/login?next=/admin')
local_db = db
local_db.access_token = get_musk(db.access_token)
local_db.me_token = get_musk(db.me_token)
local_db.online_token = get_musk(db.online_token)
local_db.bp_token = get_musk(db.bp_token)
return render_template('pages/admin.html', db=local_db.raw)
@app.route('/login')
def login():
db = DB()
return render_template('pages/login.html', vk_app_id=db.vk_app_id)
@app.route('/callback', methods=["POST"])
def callback():
event = Event(request)
if event.db.secret != event.secret:
return "Неверный секретный код"
if event.user_id != event.db.duty_id:
return "Неверный ID дежурного"
data = [d for d in dp.event_run(event)]
for d in data:
if d != "ok":
return "<ошибочка>" + json.dumps({"ошибка": d}, ensure_ascii=False, indent=2)
return "ok"
@app.errorhandler(Exception)
def on_error(e):
return "<ошибочка>" + json.dumps({"тип": "неизвестный (on_error)", "ошибка": f"{e}", "traceback": traceback.format_exc()}, ensure_ascii=False, indent=2)
``` |
{
"source": "2wendex2/wendex-boshy-server",
"score": 2
} |
#### File: wendex-boshy-server/wbs/server.py
```python
from twisted.internet import reactor
from lacewing.server import ServerProtocol, ServerDatagram, ServerFactory
from dummy import DUMMY_PLAYER_NAME, DummyProtocol
from __init__ import __version__
from lacewing.multidict import MultikeyDict
from boshyframes import BOSHY_FRAMES
PROHIBITED_NICKNAMES = [
'OnlineCoop', DUMMY_PLAYER_NAME
]
def getProtocolType(settings):
if settings.get('datagram', False):
return 'UDP'
else:
return 'TCP'
class WendexBoshyServer(ServerProtocol):
def connectionAccepted(self, welcome):
self.log('Client connection accepted.')
def messageReceived(self, message):
protocolType = getProtocolType(message.settings)
if protocolType=='TCP':
self.log('(%s) %s %r' % (protocolType, message.subchannel, message.value))
def channelMessageReceived(self, channel, message):
protocolType = getProtocolType(message.settings)
if protocolType=='TCP':
self.log('(%s)(%s) %s %r' % (protocolType, channel.name, message.subchannel, message.value))
if message.value == "/ping":
channel.sendMessage('pong', message.subchannel, self.factory.dummy,
typeName = message.getDataType(), asObject = message.isObject,
asDatagram = message.settings.get('datagram', False))
elif message.value == "/version":
channel.sendMessage('Wendex boshy server v%s' % __version__, message.subchannel, self.factory.dummy,
typeName = message.getDataType(), asObject = message.isObject,
asDatagram = message.settings.get('datagram', False))
else:
splitted = message.value.split()
if (len(splitted) == 4 and splitted[0] == '/save' and splitted[1] in BOSHY_FRAMES and
splitted[2].isdigit() and splitted[3].isdigit()):
channel.sendMessage('%s|%s|%s|%s' % (splitted[2], splitted[3], BOSHY_FRAMES[splitted[1]], 0), 63, self.factory.dummy,
typeName = message.getDataType(), asObject = message.isObject,
asDatagram = message.settings.get('datagram', False))
def privateMessageReceived(self, channel, recipient, message):
protocolType = getProtocolType(message.settings)
if protocolType=='TCP':
self.log('(%s)(to %s) %s %r' % (protocolType, recipient.name, message.subchannel, message.value))
def loginAccepted(self, name):
self.log('Name set to "%s"' % name)
if name in PROHIBITED_NICKNAMES:
self.log('PIZDEC')
self.disconnect()
def channelListSent(self):
self.log('(sent channel list)')
def channelJoined(self, channel):
self.log('Signed on to channel "%s"' % channel.name)
self.factory.dummy.joinChannelWeak(channel)
def channelLeft(self, channel):
self.log('Left channel "%s"' % channel.name)
def nameChanged(self, name):
self.log('Name changed to %s' % name)
def connectionLost(self, reason):
# here, we need to call OServer's connectionLost
# because connectionLost is a twisted method, and the server
# needs to know that the client has disconnected.
ServerProtocol.connectionLost(self, reason)
if self.loggedIn:
self.log('Connection disconnected.')
def disconnect(self, reason = None, *arg, **kw):
print self.log('Kicked: %s' % reason)
ServerProtocol.disconnect(self, reason, *arg, **kw)
def log(self, message):
"""
Log a message.
"""
print '%s: %s' % (self.id, message)
class WendexBoshyFactory(ServerFactory):
protocol = WendexBoshyServer
ping = True
channelListing = True
masterRights = True
welcomeMessage = 'Wendex Boshy Server v' + __version__
dummy = None
def run(self, prt):
try:
import psyco
psyco.full()
except ImportError:
pass
port = reactor.listenTCP(prt, self)
reactor.listenUDP(prt, ServerDatagram(self))
print 'Opening new server on port %s...' % prt
reactor.run()
def startFactory(self):
ServerFactory.startFactory(self)
channels = MultikeyDict()
self.dummy = DummyProtocol(self)
``` |
{
"source": "2wind/eval_gui_example",
"score": 2
} |
#### File: 2wind/eval_gui_example/main.py
```python
from typing import List
from numpy import ndarray
import matplotlib.pyplot as plt
import threading
# import tensorflow as tf
from PIL import Image, ImageTk
import PySimpleGUI as sg
from config import *
import evaluate
import utility
# Global variables
input_values = [] # Input values for evaluator (ex. image paths)
ml_datas = [] # Output values from evaluator (ex. evaluated raw data)
display_image = None # Image displayed.
window = None # main window.
eval_event = threading.Event()
def initialize_window() -> sg.Window:
sg.theme(WINDOW_THEME)
sg_image = sg.Image(size=(900, 9000), key=KEY_CANVAS, expand_x=True, expand_y=True)
image_column = [[sg.pin(
sg.Column([[sg_image]], key=KEY_IMAGE_COLUMN, visible=False, scrollable=True, expand_x=True, expand_y=True)
)]]
output_text = sg.Text(TEXT_OUTPUT_INITIAL, key=KEY_OUTPUT)
control_group = [[sg.Input(key='_FILES_'), sg.FilesBrowse(key=KEY_BROWSE_FILES, file_types=IMAGE_FILETYPES, initial_folder=DIR_PATH, disabled=True)],
[sg.OK(button_text=BUTTON_DETECT, key=BUTTON_DETECT, disabled=True),
sg.FileSaveAs(
key=BUTTON_SAVE_FILE,
target=KEY_SAVE_FILE,
file_types=IMAGE_FILETYPES,
default_extension=".jpg",
disabled=True
),
sg.In(key=KEY_SAVE_FILE, enable_events=True, visible=False)],
[sg.Submit(KEY_RERENDER, key=KEY_RERENDER, disabled=True)],
[output_text]]
output_group = [[sg.Output(size=(200, 10))]]
# output_group = [[sg.Text("__")]] # Dummy group for evaluation
layout = [[sg.Column(control_group, key=KEY_CONTROL_GROUP), sg.Column(output_group, key=KEY_OUTPUT_GROUP)],
[image_column]]
window = sg.Window('Example GUI for ML Project', resizable=True, auto_size_text=True, size=(900, 800), finalize=True).Layout(layout)
return window
def create_thread(window:sg.Window, result_values:List[evaluate.MLData], eval_event:threading.Event):
evaluator = evaluate.Evaluator(window)
print("evaluator ready")
while True:
eval_event.wait()
print("starting...")
evaluator.try_evaluate(input_values, result_values)
eval_event.clear()
# Do evaluator clearing action here
def excepthook(args):
print(args)
# TODO: reactivate buttons in main thread when exception happens in other thread
# disable_buttons(window, False)
eval_event.clear()
print("Unknown problem while evaluating. Run inside console to see tensorflow debug messages.")
print("Possibly: Your GPU may not have enough memory to run model. Try running it in a CPU mode.")
threading.excepthook = excepthook
def main() -> None:
# Initialize Window
window = initialize_window()
# Set memory growth for all GPUs to use least VRAM possible
# gpus = tf.config.experimental.list_physical_devices('GPU')
# for gpu in gpus:
# tf.config.experimental.set_memory_growth(gpu, True)
# Initialize evaluator with default window
eval_thread = threading.Thread(target=create_thread, args=(window, ml_datas, eval_event), daemon=True)
eval_thread.start()
# Main loop
while True:
event, values = window.Read()
if event == sg.WIN_CLOSED or event == 'Cancel' or event == BUTTON_EXIT: # if user closes window or clicks cancel
break
if (event == KEY_RERENDER): # if Rerender without evaluating
display_image = draw_on_image(window, ml_datas)
if (event.startswith(BUTTON_DETECT)): # if user presses Detect button
window[KEY_IMAGE_COLUMN].Update(visible = False)
input_values.clear()
input_values.extend(values['_FILES_'].split(';'))
print(input_values)
try:
# physical_devices = tf.config.list_physical_devices('GPU')
# print("GPU Available: ", len(physical_devices))
disable_buttons(window, True)
eval_event.set()
except Exception as e:
eval_event.clear()
disable_buttons(window, False)
print(e)
print("Unknown problem while evaluating. Run inside console to see tensorflow debug messages.")
print("Possibly: Your GPU may not have enough memory to run model. Try running it in a CPU mode.")
if (event.startswith(KEY_SAVE_FILE)): # if user closes file save dialog
print("Try Saving file...")
try:
filename = values['filename']
print(filename)
display_image.convert('RGB').save(filename)
print("Image saved")
except Exception as e:
print(e)
if (event == THREAD_EVENT): # if try_evaluate signals THREAD_EVENT
if values[THREAD_EVENT] == EVAL_READY:
disable_buttons(window, False)
window[KEY_OUTPUT].Update(value=TEXT_EVAL_READY)
if values[THREAD_EVENT] == EVAL_START:
window[KEY_OUTPUT].Update(value=TEXT_EVAL_START)
if values[THREAD_EVENT] == EVAL_COMPLETE:
window[KEY_OUTPUT].Update(value=TEXT_EVAL_COMPLETE)
disable_buttons(window, False)
display_image = draw_on_image(window, ml_datas)
window.close()
def disable_buttons(window, disabled):
window[BUTTON_DETECT].Update(disabled=disabled)
window[KEY_SAVE_FILE].Update(disabled=disabled)
window[KEY_BROWSE_FILES].Update(disabled=disabled)
window[BUTTON_SAVE_FILE].Update(disabled=disabled)
window[KEY_RERENDER].Update(disabled=disabled)
def draw_on_image(window: sg.Window, ml_datas:List[evaluate.MLData]) -> Image.Image:
"""
Draw contents of RESULTS inside WINDOW.
"""
print(f"drawing {len(ml_datas)} image(s)...")
sg_image = window[KEY_CANVAS]
display_image = None
try:
fig, axs = plt.subplots(nrows=len(ml_datas), ncols=1)
for i, ml_data in enumerate(ml_datas):
ax = axs[i] if type(axs) == ndarray else axs
ax.set_anchor('N')
# use drawing function that might come with your ML Package,
# or simple draw image and data on ax using ml_data.
ax.text(0, 0, ml_data.result["key"])
ax.imshow(ml_data.image)
fig.set_dpi(120)
fig.subplots_adjust(left=0, right=1, wspace=0.01, hspace=0.15)
# FIXME: magic number 대신, 추가 legend 비례한 값으로 fig 크기 늘리기
height = 2.3 * len(ml_datas)
fig.set_size_inches(7, height, forward=True)
fig.tight_layout()
display_image = Image.fromarray(utility.figure_to_array(fig))
result_image = ImageTk.PhotoImage(image=display_image)
# display image in main screen
sg_image.update(data=result_image)
window.refresh()
window[KEY_IMAGE_COLUMN].contents_changed()
window[KEY_IMAGE_COLUMN].Update(visible = True)
except ValueError as e:
print(e)
print("length of data detected is ", len(ml_datas))
finally:
return display_image
if __name__ == "__main__":
main()
``` |
{
"source": "2wind/LandmarkDetector",
"score": 3
} |
#### File: 2wind/LandmarkDetector/dataset.py
```python
import os
import pandas as pd
import numpy as np
import cv2
import PIL.Image as Image
import math
import random
import imutils
import torch
from torchvision import datasets, models, transforms
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms.functional as TF
class CustomDataSet(Dataset):
def __init__(self, data_dir: str, image_postfix:str, tsv_postfix:str, landmark_regex:str, landmark_length:int):
self.photo_img_string = image_postfix
self.photo_tsv_string = tsv_postfix
self.data_dir = data_dir
self.landmark_regex = landmark_regex
self.landmark_length = landmark_length
files = os.listdir(self.data_dir)
self.photo_images = [x for x in files if self.photo_img_string in x]
self.photo_tsvs = [x for x in files if self.photo_tsv_string in x]
assert(len(self.photo_images) == len(self.photo_tsvs))
for i in range(len(self.photo_images)):
x, y = self.photo_images[i], self.photo_tsvs[i]
assert(os.path.splitext(x)[0] == os.path.splitext(y)[0])
def __len__(self):
return len(self.photo_tsvs)
# load_tsv: load tsv --> return dataframe with name, x, y column.
def load_tsv(self, name):
# Loading dataframe
df = pd.read_csv(os.path.join(self.data_dir, name), sep='\t')
df = df.iloc[:99, 0:3]
df.columns = ['name', 'X', 'Y']
return df
# load_image: load image --> return plt.Image grayscale.
def load_image(self, name):
image = cv2.imread(os.path.join(self.data_dir, name), flags=cv2.IMREAD_GRAYSCALE)
img = Image.fromarray(image)
return img
# bounding_box: df(name, x, y) --> return top, left, height, width in integer
def bounding_box(self, df):
center = df[df['name'] == '2']
cy, bottom, cx, right = center['Y'].values[0], df['Y'].max(), center['X'].values[0], df['X'].max()
dy, dx = bottom - cy, right - cx
top, left = cy - dy, cx
# print((left, top), (right, bottom))
# creating bounding box
width, height = (right - left), (bottom - top)
# rand_size_bias = random.uniform(0.9, 1.1)
# width, height = width * rand_size_bias, height * rand_size_bias
return int(top), int(left), int(height), int(width)
def add_random_bias(self, top, left, height, width, bias=0.01):
top_bias = int(random.uniform(-height*bias,0))
left_bias = int(random.uniform(-width*bias,0))
height_bias = -top_bias + int(random.uniform(0, height*bias))
width_bias = -left_bias + int(random.uniform(0, width*bias))
top, left, height, width = top + top_bias, left + left_bias, height + height_bias, width + width_bias
return top, left, height, width
def extract_landmarks(self, df, landmark_regex, landmark_length):
# (gathering only needed landmarks)
df = df.loc[df['name'].str.contains(landmark_regex, regex=True), :]
# there are **18** landmarks that is unique and valid among all files
# should we sort df?
df = df.sort_values(by=['name'])
df = df.loc[:, ['X', 'Y']]
df = df.reset_index(drop=True)
# ... and landmark
landmark = df.to_numpy(dtype=np.float32)
return landmark
def rotate(self, img, landmark, angle):
angle = random.uniform(-angle, +angle)
transformation_matrix = torch.tensor([
[+math.cos(math.radians(angle)), -math.sin(math.radians(angle))],
[+math.sin(math.radians(angle)), +math.cos(math.radians(angle))]
])
image = imutils.rotate(np.array(img), angle)
landmark = landmark - 0.5
new_landmarks = np.matmul(landmark, transformation_matrix)
new_landmarks = new_landmarks + 0.5
return Image.fromarray(image), new_landmarks
def crop(self, img, landmark, top, left, height, width):
# Cropping image...
img = TF.crop(img, top, left, height, width)
#oh, ow = np.array(img).shape[0], np.array(img).shape[1]
landmark = torch.tensor(landmark) - torch.tensor([[left, top]])
landmark = landmark / torch.tensor([width, height])
return img, landmark
def normalize(self, img, landmark, height, width):
# normalizing the pixel values
img = TF.to_tensor(img)
img = TF.normalize(img, [0.6945], [0.33497])
landmark -= 0.5
return img, landmark
def __getitem__(self, index):
img_name = self.photo_images[index]
tsv_name = self.photo_tsvs[index]
img = self.load_image(img_name)
df = self.load_tsv(tsv_name)
top, left, height, width = self.bounding_box(df)
top, left, height, width = self.add_random_bias(top, left, height, width, 0.02)
landmark = self.extract_landmarks(df, self.landmark_regex, self.landmark_length)
# rand_top = int(top) + random.randint(-int(height * 0.1), int(height * 0.1))
# rand_left = int(left) + random.randint(0, int(width * 0.2))
img, landmark = self.crop(img, landmark, top, left, height, width)
# resizing image..
img = TF.resize(img, (224, 224))
# packing image
# use dsplit when RGB to make 224x224x3 --> 3x224x224
#img = np.dsplit(img, img.shape[-1])
img, landmark = self.rotate(img, landmark, 5)
img, landmark = self.normalize(img, landmark, height, width)
#arr = arr.flatten('F')
return img, landmark, img_name
``` |
{
"source": "2xB/faultguard",
"score": 3
} |
#### File: 2xB/faultguard/example.py
```python
import faultguard
import numpy as np
def launch(faultguard_data, args):
"""
Demo software main method
:param faultguard_data: Faultguard data dictionary
:param args: Data passed from faultguard.start.
"""
print("Launching demo")
# Some important data
important_data_1 = np.array([1,2,3])
important_data_2 = args[0] + " " + args[1]
# Some dummy important data manipulation
for i in range(10):
important_data_1[i%3] = i
important_data_2 += str(i)
print("important_data_1:", important_data_1)
print("important_data_2:", important_data_2)
# Sending important data to faultguard process
faultguard_data["important_data_1"] = important_data_1
faultguard_data["important_data_2"] = important_data_2
# Generate segfault
if i == 7:
import ctypes
ctypes.string_at(0)
def rescue(faultguard_data, exit_code, args):
"""
Demo rescue handler
:param faultguard_data: Faultguard data dictionary
:param exit_code: Exit code of occured fault.
:param args: Data passed from faultguard.start.
"""
print("Fault occured. Exit code: {}. Rescued data:".format(exit_code))
# Check if fault occurs before data was initialized
if "important_data_1" not in faultguard_data or "important_data_2" not in faultguard_data:
return
# Restore data
important_data_1 = faultguard_data["important_data_1"]
important_data_2 = faultguard_data["important_data_2"]
# You might need to assign the class here by important_data_1.__class__ = ...
print("important_data_1:", important_data_1)
print("important_data_2:", important_data_2)
def main():
faultguard.start(launch, rescue, args=("Hello", "World"))
if __name__ == "__main__":
main()
``` |
{
"source": "2xki/hybridbox-api",
"score": 3
} |
#### File: hybridbox-api/hybridbox/session.py
```python
import requests
import base64
import hashlib
import re
import json
class Session(object):
"""The origin of everything you want to do with the hybridbox-api.
Configuration can be set with keyword arguments when initializing
:py:class:`Session`.
:type username: str
:param username: The login username default: "admin"
:type password: str
:param password: The login password default: ""
:type ip: str
:param ip: The IP-address of the Router interface default: "10.0.0.138"
"""
def __init__(self, username="admin", password="", ip="10.0.0.138"):
self.USERNAME = username
self.PASSWORD = password
self.IP = ip
self.session = requests.Session()
self.csrf_param = ""
self.csrf_token = ""
def _encrypt_password(self):
""" Encrypting the password with the username , csrf_param and csrf_token.
:return: Encrypted password
"""
encrypted_password = self.USERNAME.encode("UTF-8") \
+ base64.b64encode(hashlib.sha256(self.PASSWORD.encode("UTF-8"))
.hexdigest().encode("UTF-8")) \
+ self.csrf_param.encode("UTF-8") \
+ self.csrf_token.encode("UTF-8")
encrypted_password = str(hashlib.sha256(encrypted_password).hexdigest())
return encrypted_password
def _set_csrf(self, page):
"""Every time you send a request to the Routers page the csrf-creds change so you have to set
them on every response. The reason why I'm not only using json is because sometimes we have to
filter the csrf-creds form an html.
:param page: A Json object with the last responses csrf-creds or the whole response html.
"""
if type(page) == dict:
self.csrf_param = page["csrf_param"]
self.csrf_token = page["csrf_token"]
elif page is not None:
self.csrf_param = re.search('(.*csrf_param".{1,9})"(\w{32})"', page.text).group(2)
self.csrf_token = re.search('(.*csrf_token".{1,9})"(\w{32})"', page.text).group(2)
@staticmethod
def _get_json(given_string):
"""Filter and return the json from the response string.
:param given_string: Response string
:return: Json object
"""
json_string = re.search('(.*/\*)(.*)(\*/)', given_string).group(2)
return json.loads(json_string)
def _cleanup(self, r):
"""Setting the last responses csrf-creds and returning the error code
:param r: Last response.
:return: Error code of the last response.
"""
try:
json_response = self._get_json(r.text)
self._set_csrf(json_response)
return json_response["errcode"]
except Exception as error:
print("error: " + repr(error))
def login(self):
"""Initializes an authentication.
:returns: The session. This is useful for jQuery-like command
"""
self.session = requests.Session()
try:
page = self.session.get("http://" + self.IP + "/html/index.html")
if page.ok:
self._set_csrf(page)
encrypted_password = self._encrypt_password()
csrf = dict(csrf_param=self.csrf_param, csrf_token=self.csrf_token)
data = dict(UserName=self.USERNAME, Password=<PASSWORD>, LoginFlag=1)
login_data = dict(csrf=csrf, data=data)
r = self.session.post("http://" + self.IP + "/api/system/user_login", json=login_data)
json_response = self._get_json(r.text)
if json_response["errorCategory"] == "ok":
self._set_csrf(json_response)
else:
raise Exception(json_response["errorCategory"])
return self
except Exception as error:
raise Exception("connection failed")
def logout(self):
"""Log out of session
:returns: The session. This is useful for jQuery-like command
"""
csrf = dict(csrf_param=self.csrf_param, csrf_token=self.csrf_token)
r = self.session.post("http://" + self.IP + "/api/system/user_logout", json=csrf)
self.csrf_param = ""
self.csrf_token = ""
self.session.close()
return self
def turn5goff(self):
"""Disables the 5G wifi
:return: Error code of the response.
"""
csrf = dict(csrf_param=self.csrf_param, csrf_token=self.csrf_token)
config5g = dict(enable="false", ID="InternetGatewayDevice.X_Config.Wifi.Radio.2.")
data = dict(config5g=config5g)
json_data = dict(action="BasicSettings", csrf=csrf, data=data)
r = self.session.post("http://" + self.IP + "/api/ntwk/WlanBasic?showpass=false", json=json_data)
return self._cleanup(r)
def turn5gon(self):
"""Enables the 5G wifi
:return: Error code of the response.
"""
csrf = dict(csrf_param=self.csrf_param, csrf_token=self.csrf_token)
config5g = dict(enable="true", ID="InternetGatewayDevice.X_Config.Wifi.Radio.2.")
data = dict(config5g=config5g)
json_data = dict(action="BasicSettings", csrf=csrf, data=data)
r = self.session.post("http://" + self.IP + "/api/ntwk/WlanBasic?showpass=false", json=json_data)
return self._cleanup(r)
def turn2goff(self):
"""Disables the 2G wifi
:return: Error code of the response.
"""
csrf = dict(csrf_param=self.csrf_param, csrf_token=self.csrf_token)
config2g = dict(enable="false", ID="InternetGatewayDevice.X_Config.Wifi.Radio.1.")
data = dict(config2g=config2g)
json_data = dict(action="BasicSettings", csrf=csrf, data=data)
r = self.session.post("http://" + self.IP + "/api/ntwk/WlanBasic?showpass=false", json=json_data)
return self._cleanup(r)
def turn2gon(self):
"""Enables the 2G wifi
:return: Error code of the response.
"""
csrf = dict(csrf_param=self.csrf_param, csrf_token=self.csrf_token)
config2g = dict(enable="true", ID="InternetGatewayDevice.X_Config.Wifi.Radio.1.")
data = dict(config2g=config2g)
json_data = dict(action="BasicSettings", csrf=csrf, data=data)
r = self.session.post("http://" + self.IP + "/api/ntwk/WlanBasic?showpass=false", json=json_data)
return self._cleanup(r)
def reboot(self):
"""Reboot the Router
:return: Error code of the response.
"""
r = self.session.get("http://" + self.IP + "/html/advance.html#device_mngt")
self._set_csrf(r)
csrf = dict(csrf_param=self.csrf_param, csrf_token=self.csrf_token)
data = dict(csrf=csrf)
self.session.cookies['activeMenuID'] = "maintain_settings"
self.session.cookies['activeSubmenuID'] = "device_mngt"
r = self.session.post("http://" + self.IP + "/api/service/reboot.cgi", json=data)
json_response = json.loads(r.text)
self.csrf_param = ""
self.csrf_token = ""
self.session.close()
return json_response["errcode"]
``` |
{
"source": "2XL/junit-daemon",
"score": 2
} |
#### File: namekos/tests/test_nameko.py
```python
import sys, os
source_path = os.path.dirname(os.path.abspath(__file__))
print source_path
sys.path.insert(0, source_path + '/../')
from nameko.standalone.events import event_dispatcher
from nameko.testing.services import entrypoint_waiter
from namekos import EmulatorService
from namekos.settings import DAEMON_LANGUAGE, SERVICE_NAME
def test_handle_code_challenge_submissions(container_factory, rabbit_config):
container = container_factory(EmulatorService, rabbit_config)
container.start()
dispatch = event_dispatcher(rabbit_config)
# print service received payload before execute
with entrypoint_waiter(
container=container,
method_name='check_challenge'):
# broadcast_event
dispatch(service_name=SERVICE_NAME,
event_type='codechallenge_submitted_{language}'.format(
language=DAEMON_LANGUAGE
),
event_data={
'payload': 'payload_value',
'language': DAEMON_LANGUAGE,
'token': 'token goes here'
})
pass
```
#### File: junit-daemon/pipeline/executor.py
```python
import os
class PipelineExecutor(object):
def __init__(self):
self.queue = []
pass
def load_queue_from_submission(self, code=''):
"""
Initialize pipeline operations
:return:
:: generate java project tree with source from submission payload
"""
file_source = None
file_source_path = None
file_jinja_key = None
do_operation = None
is_source = False
for line in code.split('\n'):
if line.startswith('%%'):
split_line = line.split('%%') # '%%CREATE%%app/models.py%%'
operation = split_line[1]
if operation != 'END':
do_operation = operation
is_source = True
file_source_path = split_line[2]
file_jinja_key = split_line[3]
file_source = [] # start empty file
else:
is_source = False
self.queue.append({
'operation': do_operation,
'file_path': file_source_path,
'file_source': '\n'.join(file_source),
'jinja_key': file_jinja_key
})
elif is_source:
# append source
file_source.append(line)
pass
def list_queue(self):
for source_code in self.queue:
print source_code['file_source']
pass
def apply_queue(self):
for source_code in self.queue:
to_apply = getattr(self, 'do_' + source_code['operation'].lower())
assert to_apply(**source_code)
pass
def do_create(self, file_source, operation, file_path, jinja_key, *args, **kwargs):
"""
# create or replace file with value
%%CREATE%%file_path.file_name.extension%%
value
"""
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
if os.path.isfile(file_path):
pass # just checking
with open(file_path, 'w') as f:
f.write(file_source)
return True
pass
def do_update(self, *args, **kwargs):
"""
# update source from existing file, lookup for a key
%%UPDATE%%file_path.file_name.extension%%jinja.key%%
value
:param args:
:param kwargs:
:return:
"""
raise NotImplemented
return True
pass
def do_put(self, *args, **kwargs):
"""
# append source to existing file or create new
%%PUT%%file_path.file_name.extension%%
value
:param args:
:param kwargs:
:return:
"""
raise NotImplemented
return True
pass
def do_drop(self, *args, **kwargs):
"""
# drop file or secction of a file
%%DROP%%file_path.file_name.extension%%key
%%DROP%%file_path.file_name.extension
:param args:
:param kwargs:
:return:
"""
raise NotImplemented
return True
pass
```
#### File: 2XL/junit-daemon/tasks.py
```python
import base64, json, os, sys, yaml
from invoke import task
from pipeline import executor, reporter, exporter
import kombu # dependency of wait for it
import socket, time
@task
def test(ctx, fixture=None, code=None, assertion=None, lenguage='java', framework='junit'):
from workload_generator.workload_generator import WorkloadGenerator
executor = WorkloadGenerator()
executor.submit(fixture=fixture)
pass
@task
def wait(ctx, host='localhost', port=80, retry_itv=1, max_retry=10):
""" Command to wait for it
example:
invoke wait -h='mq' -p=5673
:param ctx:
:param host:
:param port:
:param retry_itv:
:param max_retry:
:return:
"""
available = False
socket_connector = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while not available and max_retry > 0:
available = socket_connector.connect_ex((host, port)) == 0
if available == 0:
continue
else:
time.sleep(retry_itv)
max_retry = max_retry - 1
print "timeout in: {} {}s".format(available, max_retry * retry_itv)
pass
@task
def wait_amqp(ctx, host='localhost', port=5672, user='guest', password='<PASSWORD>', vhost='/', amqp_url=None,
retry_itv=1, max_retry=10):
"""
Options:
-a STRING, --password=STRING
-h STRING, --host=STRING
-m STRING, --amqp-url=STRING
-p, --port
-r, --retry-itv
-u STRING, --user=STRING
-v STRING, --vhost=STRING
-x, --max-retry
:param ctx:
:param host:
:param port:
:param user:
:param password:
:param vhost:
:param amqp_url:
:param retry_itv:
:param max_retry:
:return:
"""
if amqp_url is not "" and amqp_url is None:
amqp_url = 'amqp://{0}:{1}@{2}:{3}/{4}'.format(user, password, host, port, vhost)
with kombu.Connection(amqp_url) as conn:
while max_retry > 0:
try:
conn.connect()
except socket.error:
print "amqp not running, timeout in: {}s".format(max_retry * retry_itv)
max_retry = max_retry - 1
time.sleep(retry_itv)
except Exception as ex:
print "its running! ", ex.message
break
else:
print "its running && valid credentials!"
break
@task
def clean(ctx):
""" Cleans up generated files, clean all files in source
"""
# cleanup migration
ctx.run('rm -rf src/*')
ctx.run('rm -rf out/*')
ctx.run('rm -rf tests/*')
ctx.run('rm -rf reports/*')
@task
def report(ctx):
"""Generate json reports from execution stdout
:param ctx:
:return:
"""
ctx.run('rm -rf reports/*.json')
report_generator = reporter.ReportGenerator()
report_generator.generate_report()
report_generator.export_json_report()
@task
def export(ctx, source_home='src', test_home='tests', fixture_home='fixture', fixture_name='exported_challenge.yml',
is_correct=False):
"""Generate fixture yml from src and tests
:param ctx:
:param source_home: (default:'src')
:param test_home: (default:'tests')
:param fixture_home: (default:'fixture')
:param fixture_name: (default:'exported_challenge.yml')
:param is_correct: (default:False)
:return:
"""
# need placeholder name which should be equal to the fixture content hash
fixture_exporter = exporter.FixtureExporter(
source_home=source_home,
test_home=test_home,
fixture_home=fixture_home,
is_correct=is_correct,
fixture_name=fixture_name)
fixture_exporter.generate_fixture()
fixture_exporter.export_yml_fixture()
@task(pre=[clean], post=[])
def build(ctx, code=None, language='java', fixture=None, test='junit', case='answer'):
"""Provision the project with the provided payload
# -c answer submitted by the user - expect it to be k/v = relative path to `project`
# -t test framework `default: pytest` # dummy field
# -f assertion_fixture, code - expect it to be k/v = relative path to `project`, json encoded
# -l java `default: java` # dummy field
:param ctx:
:param code:
:param language:
:param fixture:
:param test:
:param case:
:return:
"""
# if __name__ == "__main__":
# case = 'answer'
# code = None
# fixture = None
data = None
# provision submission with option: -f file_path.yml
if fixture is not None:
with open(fixture, 'r') as fixture_stream:
challenge = yaml.load(fixture_stream)
code_submission = '\n'.join(challenge['challenge'][case]['files'])
valid_assertion = '\n'.join(challenge['challenge']['valid_assertion']['files'])
data = '\n'.join([code_submission, valid_assertion])
# provision submission from exported_challenge.json
elif code is None:
# load default bootstrap source for demo
with open('data/exported_challenge.json', 'r') as file_stream:
challenge = json.load(file_stream)
code_submission = '\n'.join(challenge['challenge'][case]['files'])
valid_assertion = '\n'.join(challenge['challenge']['valid_assertion']['files'])
data = '\n'.join([code_submission, valid_assertion])
else:
# provision submission with option: -c "string with source code"
data = code
submission = executor.PipelineExecutor()
submission.load_queue_from_submission(code=data)
# submission.list_queue()
submission.apply_queue()
pass
```
#### File: junit-daemon/workload_generator/workload_generator.py
```python
from nameko.standalone.events import event_dispatcher
import yaml, os, random, string
from namekos.settings import BROKER_URL, DAEMON_LANGUAGE
class WorkloadGenerator(object):
def __init__(self, config=None, language=None):
if config is None:
self.config = {
'AMQP_URI': BROKER_URL
}
if language is None:
self.language = DAEMON_LANGUAGE
self.token = 'hash_key of the submission' # todo
self.submission = None
self.token_size = 16
pass
def _broadcast_event(self, service, event, payload):
dispatcher = event_dispatcher(self.config, use_confirms=False)
dispatcher(service, event, payload)
return True
def submit(self, payload=None, fixture=None, case='answer'):
# broadcast submission to emulator-daemon workers
fixture_payload = None
if fixture is None:
pass # no payload nor fixture for submitted
elif isinstance(fixture, str):
if not os.path.exists(fixture):
fixture = os.path.join('fixture', fixture)
if not os.path.exists(fixture):
exit(4)
with open(fixture, 'r') as fstream:
fixture_payload = yaml.load(fstream)
elif isinstance(fixture, dict):
fixture_payload = fixture
else:
exit(2)
if fixture_payload is not None:
payload = fixture_payload
if payload is None:
exit(1)
self._parse_submission(payload)
self._broadcast_event(
service='school',
event='codechallenge_submitted_{language}'.format(
language=self.language
),
payload=dict(
token=self.token,
snippet=self.submission[case],
language=self.language,
valid_assertion=self.submission['valid_assertion']
)
)
def _parse_submission(self, payload):
self.token = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(self.token_size))
self.submission = payload['challenge']
# if __name__ == "__main__":
# workload_generator = WorkloadGenerator
# workload_generator.submit(fixture=yaml.load(fstream))
pass
``` |
{
"source": "2xra/Felda_Project",
"score": 3
} |
#### File: Felda_Project/code/player.py
```python
import pygame
from settings import *
class Player(pygame.sprite.Sprite):
def __init__(self,pos,groups):
super().__init__(groups)
self.image = pygame.image.load('./graphics/test/player.png').convert_alpha()
self.rect = self.image.get_rect(topleft = pos)
self.direction = pygame.math.Vector2()
``` |
{
"source": "2xR/rr.opt.mcts.simple",
"score": 2
} |
#### File: src/examples/gurobi.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.builtins import object, dict, range
import sys
import math
import gurobipy
import rr.opt.mcts.simple as mcts
random = mcts.random
mcts.config_logging(level="DEBUG")
logger = mcts.logger
info = logger.info
debug = logger.debug
EPS = 1e-9
# utility functions
def is_approx(x, y):
return abs(x - y) <= EPS
def is_integral(x):
return abs(x - round(x)) <= EPS
def is_nonzero(x):
return abs(x) > EPS
def verify_sol(filename, sol):
print("checking", filename)
model = gurobipy.gurobi.read(filename)
model.params.outputFlag = 0
intvars = []
for v in model.getVars():
if v.vType != gurobipy.GRB.CONTINUOUS:
intvars.append(v)
v.vType = gurobipy.GRB.CONTINUOUS
assert {v.VarName for v in intvars} == set(sol.data.keys())
for v in intvars:
x = sol.data[v.VarName]
v.lb = v.ub = x
print("\t{}: {}".format(v.VarName, x))
model.optimize()
assert model.status == gurobipy.GRB.Status.OPTIMAL
assert all(is_integral(v.x) for v in intvars)
assert is_approx(model.objVal, sol.obj)
print("solution passed all checks")
def set_var_bounds(domains):
for vdata, (lb, ub) in domains.items():
var = vdata.var
var.lb = lb
var.ub = ub
def solve_lp(model, domains):
set_var_bounds(domains)
model.optimize()
return model.status == gurobipy.GRB.Status.OPTIMAL
class IntVarData(object):
"""A very basic wrapper around gurobi Var objects. This is necessary because gurobi Vars
redefine comparison operators to simplify creation of expressions, e.g. x == y does not
compare the two variables, but instead creates a LinExpr object. This is nice to create models
in a more readable manner, but it breaks any lists or dicts of Var objects, as any attempt to
remove them will remove the wrong Var object. Namely, it will remove the first var that it
tries to compare to the target var, as it creates a LinExpr object (which when evaluated as a
boolean returns True), therefore wrongly saying the any two Vars are equal."""
def __init__(self, var):
self.var = var
self.name = var.VarName
def __repr__(self):
return "<IntVar {} @{:x}>".format(self.name, id(self))
class MipTreeNode(mcts.TreeNode):
@classmethod
def root(cls, filename):
root = cls()
root.model = gurobipy.gurobi.read(filename) # *shared* gurobi model :: Model
root.model.params.outputFlag = 0
root.domains = {} # *node* variable domains :: {IntVarData: (lb, ub)}
root.relaxed = [] # *node* free vars :: [IntVarData]
root.upper_bound = None # *node* upper bound :: float | Infeasible
root.lower_bound = None # *node* lower bound :: float | Infeasible
# collect integer variables and relax them
for var in root.model.getVars():
if var.vType != gurobipy.GRB.CONTINUOUS:
var.vType = gurobipy.GRB.CONTINUOUS
lb = int(math.ceil(var.lb - EPS))
ub = int(math.floor(var.ub + EPS))
assert lb <= ub
vdata = IntVarData(var)
root.domains[vdata] = (lb, ub)
root.relaxed.append(vdata)
# ensure that list is ordered to maintain determinism
root.relaxed.sort(key=lambda vd: vd.name)
info("model has {} vars ({} int)".format(root.model.NumVars, root.model.NumIntVars))
info("int vars: {}".format([vd.name for vd in root.relaxed]))
assert len(root.domains) == len(root.relaxed) == root.model.NumIntVars
root.propagate() # reduce domains and fix any singleton variables
root.solve_relaxation() # solve root relaxation to determine bound
info("ROOT RELAXATION:")
for vdata in root.domains.keys():
info("\t{}: {}".format(vdata.name, vdata.var.x))
return root
def fixed(self):
return {vd.name: lb for vd, (lb, ub) in self.domains.items() if lb == ub}
def __str__(self):
return "\n".join([
"NODE {:x} INFO:".format(id(self)),
"\tfixed: {}".format(self.fixed()),
"\trelaxed: {}".format([vd.name for vd in self.relaxed]),
"\tupper_bound: {}".format(self.upper_bound),
"\tlower_bound: {}".format(self.lower_bound),
"\tsim_count: {}".format(self.sim_count),
"\tsim_sol: {}".format(self.sim_sol),
"\tsim_best: {}".format(self.sim_best),
])
def copy(self):
clone = mcts.TreeNode.copy(self)
# global data (shallow-copied)
clone.model = self.model
# local data (which must be copied)
clone.domains = dict(self.domains)
clone.relaxed = list(self.relaxed)
clone.upper_bound = self.upper_bound
clone.lower_bound = self.lower_bound
return clone
def branches(self):
if len(self.relaxed) == 0:
return []
vdata = random.choice(self.relaxed)
lb, ub = self.domains[vdata]
return [(vdata, value) for value in range(lb, ub + 1)]
def apply(self, branch):
vdata, value = branch
lb, ub = self.domains[vdata]
assert lb < ub
assert lb <= value <= ub
self.domains[vdata] = (value, value)
self.relaxed.remove(vdata)
self.solve_relaxation()
if self.propagate():
self.solve_relaxation()
else:
assert len(self.relaxed) == 0
def solve_relaxation(self):
# solve linear relaxation to find a lower bound for the node
if solve_lp(self.model, self.domains):
self.lower_bound = self.model.objVal
# if all unfixed variables have integral values, we have a full solution
if all(is_integral(vd.var.x) for vd in self.relaxed):
for vdata in self.relaxed:
self.domains[vdata] = (vdata.var.x, vdata.var.x)
self.relaxed = []
self.upper_bound = self.model.objVal
# otherwise we set an Infeasible as upper bound and make the node a leaf
else:
self.lower_bound = mcts.Infeasible(len(self.relaxed))
self.upper_bound = mcts.Infeasible(len(self.relaxed))
self.relaxed = []
def simulate(self):
# return a solution immediately if this is a leaf node
if len(self.relaxed) == 0:
return mcts.Solution(value=self.upper_bound, data=self.fixed())
node = self.copy()
node.solve_relaxation() # determine variable values in initial LP
while len(node.relaxed) > 0:
vdata = random.choice(node.relaxed)
value = vdata.var.x
if random.random() < value - math.floor(value):
value = int(math.ceil(value))
else:
value = int(math.floor(value))
node.apply((vdata, value))
return mcts.Solution(value=node.upper_bound, data=node.fixed())
def bound(self):
return self.lower_bound
def propagate(self):
set_var_bounds(self.domains)
model = self.model
obj_func = model.getObjective()
obj_sense = model.ModelSense
feasible = True
while True:
fixed = []
for vdata in self.relaxed:
var = vdata.var
model.setObjective(var, gurobipy.GRB.MAXIMIZE)
model.optimize()
if model.status != gurobipy.GRB.Status.OPTIMAL:
feasible = False
break
ub = int(math.floor(var.X + EPS))
model.setObjective(var, gurobipy.GRB.MINIMIZE)
model.optimize()
assert model.status == gurobipy.GRB.Status.OPTIMAL
lb = int(math.ceil(var.X - EPS))
if lb > ub:
feasible = False
break
prev_lb, prev_ub = self.domains[vdata]
assert prev_lb <= lb
assert prev_ub >= ub
self.domains[vdata] = (lb, ub)
var.lb = lb
var.ub = ub
if lb == ub:
fixed.append(vdata)
if not feasible or len(fixed) == 0:
break
self.relaxed = [vd for vd in self.relaxed if vd not in fixed]
if not feasible:
self.upper_bound = mcts.Infeasible(len(self.relaxed))
self.lower_bound = mcts.Infeasible(len(self.relaxed))
self.relaxed = []
model.setObjective(obj_func, obj_sense)
return feasible
def main(instance, niter, seed):
root = MipTreeNode.root(instance)
sols = mcts.run(root, iter_limit=niter, rng_seed=seed)
info("solutions found: {}".format(sols))
info("best found objective: {}".format(sols.best.value))
if sols.best.is_feas:
info("best solution (non-zeros):")
for var, val in sols.best.data.items():
if is_nonzero(val):
info("\t{}:\t{}".format(var, val))
return root, sols
usage = """usage: {prog} filename N seed
where"
- filename is in a format recognized by SCIP (mps, lp, ...)
- N is the number of iterations
- seed initializes the pseudo-random generator""".format(prog=sys.argv[0])
if __name__ == "__main__":
if len(sys.argv) == 4:
instance = sys.argv[1]
niter = int(sys.argv[2])
seed = int(sys.argv[3])
main(instance, niter, seed)
else:
print(usage)
exit(1)
```
#### File: src/examples/partition.py
```python
from math import log
from bisect import insort
from collections import defaultdict
import rr.opt.mcts.simple as mcts
JOIN = 0
SPLIT = 1
def load_instance(filepath):
with open(filepath, "rt") as istream:
return [int(line.strip()) for line in istream]
def objective(discrepancy):
return log(abs(discrepancy)+1, 2)
def karmarkar_karp(labels):
labels = list(labels)
edges = []
sum_remaining = sum(n for n, _ in labels)
for _ in range(len(labels) - 1):
n, i = labels.pop()
m, j = labels.pop()
insort(labels, (n-m, i))
edges.append((i, j, SPLIT))
sum_remaining -= 2 * m
assert len(labels) == 1
assert sum_remaining == labels[0][0]
return edges, sum_remaining
class TreeNode(mcts.TreeNode):
EXPANSION_LIMIT = float("inf")
@classmethod
def root(cls, instance):
if isinstance(instance, str):
instance = load_instance(instance)
assert type(instance) is list # NPP instances are flat lists of positive integers
root = cls()
root.labels = sorted((n, i) for i, n in enumerate(instance)) # vertex labels (nums)
root.edges = [] # [(i, j, EDGE_TYPE<JOIN|SPLIT>)]
root.sum_remaining = sum(instance) # sum of all numbers still unassigned
return root
def copy(self):
clone = mcts.TreeNode.copy(self)
clone.labels = list(self.labels)
clone.edges = list(self.edges)
clone.sum_remaining = self.sum_remaining
return clone
def branches(self):
# If there are only 4 or less items left, KK is optimal (and we've already done it in
# simulate()). We only branch if the largest number does not exceed the sum of the other
# items +1, and that was also already verified in the simulate() method.
return () if len(self.labels) <= 4 else (SPLIT, JOIN)
def apply(self, edge_type):
labels = self.labels
n, i = labels.pop()
m, j = labels.pop()
self.edges.append((i, j, edge_type))
if edge_type == SPLIT:
insort(labels, (n-m, i))
self.sum_remaining -= 2 * m
else:
insort(labels, (n+m, i))
def simulate(self):
edges = self.edges
if len(edges) > 0 and edges[-1][-1] == SPLIT:
# reuse parent solution if this is the differencing child
return self.parent.sim_sol
labels = self.labels
largest, i = labels[-1]
delta = largest - (self.sum_remaining - largest)
if delta >= -1:
# the best solution in this subtree consists of putting the largest element in one
# set and the remaining elements in the other
labels.pop()
for _, j in labels:
edges.append((i, j, SPLIT))
del labels[:] # force next branches() call to return empty branch list
return mcts.Solution(value=objective(delta), data=edges)
else:
kk_edges, diff = karmarkar_karp(self.labels)
return mcts.Solution(value=objective(diff), data=edges+kk_edges)
def make_partition(edges):
adj = {
JOIN: defaultdict(set),
SPLIT: defaultdict(set),
}
for i, j, edge_type in edges:
adj_edge_type = adj[edge_type]
adj_edge_type[i].add(j)
adj_edge_type[j].add(i)
nverts = len(edges) + 1
subset = [None] * nverts
_assign_subset(adj, subset, 0, 0)
return subset
def _assign_subset(adj, subset, i, s):
subset[i] = s
for edge_type in (JOIN, SPLIT):
adj_edge_type = adj[edge_type]
adj_edge_type_i = adj_edge_type.pop(i, ())
s_j = s if edge_type == JOIN else 1 - s
for j in adj_edge_type_i:
adj_edge_type[j].remove(i)
_assign_subset(adj, subset, j, s_j)
mcts.config_logging()
r = TreeNode.root("instances/npp/hard1000.dat")
s = mcts.run(r, iter_limit=1000)
``` |
{
"source": "2xR/statemachine",
"score": 3
} |
#### File: rr/statemachine/dynamicdispatch.py
```python
class DynamicDispatchMixin:
"""A mixin which adds dynamic dispatch of state machine events --- enter state, exit state,
and transition --- to different methods according to the states (and input symbol) involved.
These methods are looked up and, if they're callable, are called with the positional and
keyword arguments that accompany the input symbol (passed to `.input()`).
Note that usage of this mixin makes sense only when states and symbols are representable as
strings, as the name of the method to which an event is dispatched is built from a format
string that, by default, uses the state or transition symbol.
As an example, for a machine in state 'a' receiving symbol 's' as input and moving into state
'b', the following handlers would be called (if they exist and are callable):
on_exit_a()
on_transition_a_s_b()
on_enter_b()
The names of the methods to which these events are dispatched can be customized by redefining
the format strings `enter_handler_name`, `exit_handler_name` and `transition_handler_name`.
"""
enter_handler_name = "on_enter_{0}"
exit_handler_name = "on_exit_{0}"
transition_handler_name = "on_transition_{0.source}_{0.input.symbol}_{0.target}"
def on_enter(self, state, *args, **kwargs):
handler = getattr(self, self.enter_handler_name.format(state), None)
return handler(*args, **kwargs) if callable(handler) else None
def on_exit(self, state):
handler = getattr(self, self.exit_handler_name.format(state), None)
return handler() if callable(handler) else None
def on_transition(self, transition):
handler = getattr(self, self.transition_handler_name.format(transition), None)
return handler() if callable(handler) else None
```
#### File: rr/statemachine/finitestateautomaton.py
```python
import collections
from .statemachine import StateMachine
from .transitiongraph import TransitionGraph, TransitionGraphDrivenMixin
class FiniteStateAutomatonTransitionGraph(TransitionGraph):
def __init__(self, transitions=()):
self.arcs = collections.defaultdict(dict)
self.update(transitions)
def add(self, transition):
source, symbol, target = transition
self.arcs[source][symbol] = target
def target(self, source, symbol):
return self.arcs[source][symbol]
class FiniteStateAutomaton(TransitionGraphDrivenMixin, StateMachine):
# Provide access to the transition graph class that should be used when constructing the
# class or instance's transition graph.
TransitionGraph = FiniteStateAutomatonTransitionGraph
def __init__(self, initial_state=None, transition_graph=None):
TransitionGraphDrivenMixin.__init__(self, transition_graph)
StateMachine.__init__(self, initial_state)
``` |
{
"source": "2xsys/debian-server-tools",
"score": 3
} |
#### File: debian-server-tools/mail/mbox_send2.py
```python
import sys
import os
import time
import mailbox
import email
import smtplib
from optparse import OptionParser, make_option
#---------------------------------------------------------------------------
# Set some defaults
defTo = []
defFrom = None
defChunkSize = 100
defChunkDelay = 30
defSmtpHost = 'localhost'
defSmtpPort = 25
defCount = -1
defStart = -1
# define the command line options
option_list = [
make_option('--to', action='append', dest='toAddresses', default=defTo,
help="The address to send the messages to. May be repeated."),
make_option('--from', dest='fromAddress', default=defFrom,
help="The address to send the messages from."),
make_option('--chunk', type='int', dest='chunkSize', default=defChunkSize,
help='How many messages to send in each batch before pausing, default: %d' % defChunkSize),
make_option('--pause', type='int', dest='chunkDelay', default=defChunkDelay,
help='How many seconds to delay between chunks. default: %d' % defChunkDelay),
make_option('--count', type='int', dest='count', default=defCount,
help='How many messages to send before exiting the tool, default is all messages in the mbox.'),
make_option('--start', type='int', dest='start', default=defStart,
help='Which message number to start with. Defaults to where the tool left off the last time, or zero.'),
make_option('--smtpHost', dest='smtpHost', default=defSmtpHost,
help='Hostname where SMTP server is running'),
make_option('--smtpPort', type='int', dest='smtpPort', default=defSmtpPort,
help='Port number to use for connecting to SMTP server'),
]
smtpPassword = None # implies using TLS
#---------------------------------------------------------------------------
def get_hwm(hwmfile):
if not os.path.exists(hwmfile):
return -1
hwm = int(file(hwmfile).read())
return hwm
def set_hwm(hwmfile, count):
f = file(hwmfile, 'w')
f.write(str(count))
f.close()
def main(args):
if sys.version_info < (2,5):
print "Python 2.5 or better is required."
sys.exit(1)
# Parse the command line args
parser = OptionParser(usage="%prog [options] mbox_file(s)",
description=__doc__,
version="%prog 0.9.1",
option_list=option_list)
options, arguments = parser.parse_args(args)
# ensure we have the required options
if not options.toAddresses:
parser.error('At least one To address is required (use --to)')
if not options.fromAddress:
parser.error('From address is required (use --from)')
if not arguments:
parser.error('At least one mbox file is required')
# process the mbox file(s)
for mboxfile in arguments:
print "Opening %s..." % mboxfile
mbox = mailbox.mbox(mboxfile)
totalInMbox = len(mbox)
print "Total messages in mbox: %d" % totalInMbox
hwmfile = mboxfile + '.hwm'
print 'Storing last message processed in %s' % hwmfile
start = get_hwm(hwmfile)
if options.start != -1:
start = options.start
start += 1
print 'Starting with message #%d' % start
totalSent = 0
current = start
# Outer loop continues until either the whole mbox or options.count
# messages have been sent,
while (current < totalInMbox and
(totalSent < options.count or options.count == -1)):
# Inner loop works one chunkSize number of messages at a time,
# pausing and reconnecting to the SMTP server for each chunk.
print 'Connecting to SMTP(%s, %d)' % (options.smtpHost, options.smtpPort)
smtp = smtplib.SMTP(options.smtpHost, options.smtpPort)
if smtpPassword: # use TLS
smtp.ehlo()
smtp.starttls()
smtp.ehlo()
smtp.login(options.fromAddress, smtpPassword)
chunkSent = 0
while chunkSent < options.chunkSize:
msg = mbox[current]
print 'Processing message %d: %s' % (current, msg['Subject'])
# Here is where we actually send the message
smtp.sendmail(options.fromAddress, options.toAddresses, msg.as_string())
set_hwm(hwmfile, current) # set new 'high water mark'
current += 1
totalSent += 1
chunkSent += 1
if (current >= totalInMbox or
(totalSent >= options.count and options.count != -1)):
break
else:
smtp.close()
del smtp
print "Pausing for %d seconds..." % options.chunkDelay,
time.sleep(options.chunkDelay)
print
print 'Goodbye'
#---------------------------------------------------------------------------
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: debian-server-tools/monitoring/hangouts-notify.py
```python
from httplib2 import Http
from json import dumps
import sys
CHAT_WEBHOOK = ''
def main(argv):
message_headers = {'Content-Type': 'application/json; charset=UTF-8'}
bot_message = {'text': argv[0]}
http_obj = Http()
(resp_headers, content) = http_obj.request(
uri=CHAT_WEBHOOK,
method='POST',
headers=message_headers,
body=dumps(bot_message)
)
if resp_headers.status == 200:
return 0
return 11
if __name__ == '__main__':
exitcode = main(sys.argv[1:])
sys.exit(exitcode)
```
#### File: debian-server-tools/package/index_gen.py
```python
import os
import os.path
import sys
class SimpleHtmlFilelistGenerator:
# start from this directory
base_dir = None
def __init__(self, dir):
self.base_dir = dir
def print_html_header(self):
print """<!DOCTYPE html><html>
<body>
<code>
""",
def print_html_footer(self):
home = 'https://pythonadventures.wordpress.com/2011/03/26/static-html-filelist-generator/'
name = 'Static HTML Filelist Generator'
print '</code>'
href = "<a href=\"%s\">%s</a>" % (home, name)
print "<p><em><sub>This page was generated with Jabba Laci's %s.</sub></em></p>" % href
print """</body>
</html>
""",
def processDirectory ( self, args, dirname, filenames ):
print '<strong>', dirname + '/', '</strong>', '<br>'
for filename in sorted(filenames):
rel_path = os.path.join(dirname, filename)
if rel_path in [sys.argv[0], './index.html']:
continue # exclude this generator script and the generated index.html
if os.path.isfile(rel_path):
href = "<a href=\"%s\">%s</a>" % (rel_path, filename)
print ' ' * 4, href, '<br>'
def start(self):
self.print_html_header()
os.path.walk( self.base_dir, self.processDirectory, None )
self.print_html_footer()
# class SimpleHtmlFilelistGenerator
if __name__ == "__main__":
base_dir = '.'
if len(sys.argv) > 1:
base_dir = sys.argv[1]
gen = SimpleHtmlFilelistGenerator(base_dir)
gen.start()
``` |
{
"source": "2xx4ever/cmdb",
"score": 2
} |
#### File: cmdb/app01/models.py
```python
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
import datetime, time
# Create your models here.
class Asset(models.Model):
ip_pub = models.GenericIPAddressField(max_length=50, verbose_name=u'外网IP地址')
#ip_prv = models.GenericIPAddressField(max_length=50, verbose_name=u'内网IP地址')
hostname = models.CharField(max_length=50, verbose_name=u'主机名')
os = models.CharField(max_length=50, verbose_name=u'操作系统')
cpu_model = models.CharField(max_length=50, verbose_name=u'CPU型号')
cpu = models.CharField(max_length=50, verbose_name=u'CPU')
mem = models.CharField(max_length=50, verbose_name=u'内存')
disk = models.CharField(max_length=50, verbose_name=u'硬盘')
update_time = models.DateTimeField(auto_now=True, verbose_name=u'更新时间')
class Meta:
verbose_name = u"资产信息表"
verbose_name_plural = verbose_name
ordering = ['ip_pub']
def __unicode__(self):
return self.ip_pub
class Host(models.Model):
ip_pub = models.GenericIPAddressField(max_length=20, verbose_name=u'外网IP地址')
ip_prv = models.GenericIPAddressField(max_length=20, null=True, blank=True, verbose_name=u'内网IP地址')
username = models.CharField(max_length=20, verbose_name=u'用户名')
pwd_root = models.CharField(max_length=100, verbose_name=u'root登录密码')
pwd_user = models.CharField(max_length=100, verbose_name=u'普通用户登录密码')
add_time = models.DateTimeField(auto_now_add=True, verbose_name=u'添加时间')
update_time = models.DateTimeField(auto_now=True, verbose_name=u'上次检测时间')
status = models.CharField(max_length=50, verbose_name=u'添加状态')
class Meta:
verbose_name = u"主机信息表"
verbose_name_plural = verbose_name
ordering = ['ip_pub']
def __unicode__(self):
return self.ip_pub
```
#### File: cmdb/other/transfer.py
```python
def do(password):
pattern = {
'\\': "\\\\",
";": "\;",
"'": "\\'",
"<": "\<",
">": "\>",
'"': '\\"',
"|": "\|",
"(": "\(",
")": "\)",
"&": "\&",
"!": "\!",
"`": "\`",
"$": "\$",
"\n": "",
}
for key, value in pattern.items():
password = password.replace(key, value)
return password
if __name__ == '__main__':
print 'Only Run By Import'
```
#### File: cmdb/other/update_pwd.py
```python
import subprocess
import time
import chk_ping
def do(username, ip, pwd, debug):
try:
res = chk_ping.do(ip, 'root', debug)
if res=='unneed':
cmd = "ssh root@%s -C 'echo %s | passwd --stdin %s'" % (ip, pwd, username)
print u'step:开始更新密码'
run = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(output, error) = run.communicate()
# print 'output:', output, 'error:', error
if u'成功' in output:
info = 'success'
elif 'uccess' in output:
info = 'success'
else:
info = 'failed'
else:
info = 'failed'
print u'step:未认证,无法更新密码'
except Exception as e:
print e
info = 'failed'
return info
if __name__ == '__main__':
print 'Only Run By Import'
``` |
{
"source": "2xyo/client-python-1",
"score": 2
} |
#### File: tests/integration/test_concept.py
```python
import unittest
from grakn.client import GraknClient, DataType
import datetime
import uuid
from grakn.exception.GraknError import GraknError
from tests.integration.base import test_Base, GraknServer
client = None
session = None
class test_concept_Base(test_Base):
""" Sets up DB for use in tests """
@classmethod
def setUpClass(cls):
""" Make sure we have some sort of schema and data in DB, only done once """
super(test_concept_Base, cls).setUpClass()
global client, session
# TODO this is not neat - this is basically emulating a constructor/destructor operation using globals
client = GraknClient("localhost:48555")
keyspace = "test_" + str(uuid.uuid4()).replace("-", "_")[:8]
session = client.session(keyspace)
# temp tx to set up DB, don"t save it
tx = session.transaction().write()
try:
# define parentship roles to test agains
tx.query("define "
"parent sub role; "
"child sub role; "
"mother sub role; "
"son sub role; "
"person sub entity, has age, has gender, plays parent, plays child, plays mother, plays son; "
"age sub attribute, datatype long; "
"gender sub attribute, datatype string; "
"parentship sub relation, relates parent, relates child, relates mother, relates son;")
except GraknError as ce:
print(ce)
answers = list(tx.query("match $x isa person, has age 20; get;"))
if len(answers) == 0:
tx.query("insert $x isa person, has age 20;")
tx.commit()
@classmethod
def tearDownClass(cls):
super(test_concept_Base, cls).tearDownClass()
global session, client
session.close()
# clear the test keyspace
client.keyspaces().delete(session.keyspace)
client.close()
def setUp(self):
global session
self.tx = session.transaction().write()
# functions called by `addCleanup` are reliably called independent of test pass or failure
self.addCleanup(self.cleanupTransaction, self.tx)
def cleanupTransaction(self, tx):
tx.close()
class test_Concept(test_concept_Base):
""" Test methods available on all Concepts """
def test_delete_schema_types(self):
car_type = self.tx.put_entity_type("car")
schema_concept = self.tx.get_schema_concept("car")
self.assertTrue(schema_concept.is_schema_concept())
schema_concept.delete()
none_schema_car = self.tx.get_schema_concept("car")
self.assertIsNone(none_schema_car, msg="Deletion of car schema type failed")
def test_delete_instance(self):
car_type = self.tx.put_entity_type("car")
car = car_type.create()
car.delete()
none_car = self.tx.get_concept(car.id)
self.assertIsNone(none_car, msg="Deletion of car instance failed")
def test_re_delete_instance(self):
car_type = self.tx.put_entity_type("car")
car = car_type.create()
car.delete()
none_car = self.tx.get_concept(car.id)
self.assertIsNone(none_car)
with self.assertRaises(GraknError) as context:
car.delete()
self.assertTrue("FAILED_PRECONDITION" in str(context.exception))
def test_is_deleted(self):
car_type = self.tx.put_entity_type("car")
car = car_type.create()
self.assertFalse(car.is_deleted())
car.delete()
self.assertTrue(car.is_deleted())
car2 = car_type.create()
self.tx.query("match $x isa car; delete $x;")
self.assertTrue(car2.is_deleted)
def test_is_each_schema_type(self):
car_type = self.tx.put_entity_type("car")
car = car_type.create()
self.assertTrue(car.is_entity())
self.assertFalse(car.is_attribute())
self.assertFalse(car.is_relation())
rel_type = self.tx.put_relation_type("owner")
owner = rel_type.create()
self.assertFalse(owner.is_entity())
self.assertFalse(owner.is_attribute())
self.assertTrue(owner.is_relation())
attr_type = self.tx.put_attribute_type("age", DataType.LONG)
age = attr_type.create(50)
self.assertFalse(age.is_entity())
self.assertTrue(age.is_attribute())
self.assertFalse(age.is_relation())
class test_SchemaConcept(test_concept_Base):
""" Test methods available on all SchemaConcepts """
def test_set_label(self):
""" Get and set labels """
with self.subTest(i=0):
# get label
car_schema_type = self.tx.put_entity_type("car")
car_type = self.tx.get_schema_concept("car")
self.assertEqual(car_type.label(), "car")
with self.subTest(i=1):
# set label
car_type = self.tx.get_schema_concept("car")
car_type.label("vehicle")
vehicle_type = self.tx.get_schema_concept("vehicle")
self.assertEqual(vehicle_type.label(), "vehicle")
with self.subTest(i=2):
bike_type = self.tx.get_schema_concept("bike")
with self.assertRaises(AttributeError):
bike_type.label("")
with self.assertRaises(AttributeError):
bike_type.label(100)
self.assertIsNone(bike_type)
def test_is_implicit(self):
""" Test implicit schema concepts """
person = self.tx.get_schema_concept("person")
self.assertFalse(person.is_implicit())
implicit_concept = self.tx.get_schema_concept("@has-age")
self.assertTrue(implicit_concept.is_implicit())
def test_get_sups(self):
""" Test get super types of a schema concept -- recall a type is supertype of itself always """
person = self.tx.get_schema_concept("person")
sups = list(person.sups())
self.assertEqual(len(sups), 2, msg="person does not have 2 sups")
sup_labels = [concept.label() for concept in sups]
self.assertTrue("person" in sup_labels and "entity" in sup_labels)
# check supertype of toplevel schema concepts
schema_entity = self.tx.get_schema_concept("entity")
thing_type = schema_entity.sup() # this is Thing
self.assertEqual(thing_type.base_type, "META_TYPE")
thing_sup = thing_type.sup()
self.assertIsNone(thing_sup)
def test_set_sups(self):
""" Test setting super type of a schema concept """
human_schema_concept = self.tx.put_entity_type("human")
male_schema_concept = self.tx.put_entity_type("male")
human_sup = human_schema_concept.sup()
self.assertEqual(human_sup.base_type, "ENTITY_TYPE")
male_schema_concept.sup(human_schema_concept)
sup = male_schema_concept.sup()
self.assertEqual(sup.label(), "human")
def test_get_subs(self):
""" Test get sub types of schema concept -- recall a type is a subtype of itself always """
entity = self.tx.get_schema_concept("entity")
subs = list(entity.subs())
self.assertEqual(len(subs), 2, msg="entity does not have 2 subs")
subs_labels = [sub.label() for sub in subs]
self.assertTrue('entity' in subs_labels and 'person' in subs_labels)
class test_Type(test_concept_Base):
""" Tests concept API of things common to Type objects """
def test_is_abstract(self):
""" Tests get/set of is_abstract on types """
dog_type = self.tx.put_entity_type("dog")
with self.subTest(i=0):
abstract = dog_type.is_abstract()
self.assertFalse(abstract)
with self.subTest(i=1):
dog_type.is_abstract(True)
abstract = dog_type.is_abstract() #re-retrieve from server
self.assertTrue(abstract)
with self.subTest(i=2):
dog_type.is_abstract(False)
abstract = dog_type.is_abstract()
self.assertFalse(abstract)
def test_plays_methods(self):
""" Test get/set/delete plays ie. roles """
father = self.tx.put_role("father")
with self.subTest(i=0):
person_schema_type = self.tx.get_schema_concept("person")
person_plays = list(person_schema_type.playing())
# by default, they play 4 explicit roles and 2 @has-... roles
self.assertEqual(len(person_plays), 6)
with self.subTest(i=1):
person_schema_type.plays(father)
updated_person_plays = person_schema_type.playing()
labels = [role.label() for role in updated_person_plays]
self.assertEqual(len(labels), 7)
self.assertTrue("father" in labels)
with self.subTest(i=2):
# remove role/plays from person
person_schema_type.unplay(father)
updated_person_plays = person_schema_type.playing()
labels = [role.label() for role in updated_person_plays]
self.assertEqual(len(labels), 6)
self.assertFalse("father" in labels)
def test_attributes_methods(self):
""" Test get/set/delete attributes """
person = self.tx.get_schema_concept("person")
haircolor_attr = self.tx.put_attribute_type("haircolor", DataType.STRING)
with self.subTest(i=0):
# get attrs
current_attrs = person.attributes()
labels = [attr.label() for attr in current_attrs]
self.assertEqual(len(labels), 2) # has age, gender to start with
with self.subTest(i=1):
# add an attr
person.has(haircolor_attr)
new_attrs = person.attributes()
new_labels = [attr.label() for attr in new_attrs]
self.assertEqual(len(new_labels), 3)
self.assertTrue('haircolor' in new_labels)
with self.subTest(i=2):
# delete an attrs
person.unhas(haircolor_attr)
attrs_fewer = person.attributes()
labels_fewer = [attr.label() for attr in attrs_fewer]
self.assertEqual(len(labels_fewer), 2)
self.assertFalse('haircolor' in labels_fewer)
def test_instances(self):
""" Test retrieving instances of a type """
person = self.tx.get_schema_concept("person")
people = list(person.instances())
person_inst = person.create()
people_more = list(person.instances())
self.assertEqual(len(people_more) - len(people), 1)
def test_key(self):
""" Test get/set/delete key on Type """
person_type = self.tx.get_schema_concept("person")
name_attr_type = self.tx.put_attribute_type('name', DataType.STRING)
with self.subTest(i=0):
# check current keys
keys = list(person_type.keys())
self.assertEqual(len(keys), 0, "Person has more than 0 keys already")
with self.subTest(i=1):
# set a key
person_type.key(name_attr_type)
keys = list(person_type.keys())
self.assertEqual(len(keys), 1)
self.assertEqual(keys[0].base_type, "ATTRIBUTE_TYPE")
self.assertEqual(keys[0].label(), 'name')
with self.subTest(i=2):
# remove a key
person_type.unkey(name_attr_type)
keys = list(person_type.keys())
self.assertEqual(len(keys), 0)
class test_EntityType(test_concept_Base):
def test_create(self):
person_type = self.tx.get_schema_concept("person")
person = person_type.create()
self.assertTrue(person.is_entity())
class test_AttributeType(test_concept_Base):
def test_create(self):
str_attr_type = self.tx.put_attribute_type("firstname", DataType.STRING)
john = str_attr_type.create("john")
self.assertTrue(john.is_attribute())
self.assertEqual(john.value(), "john")
bool_attr_type = self.tx.put_attribute_type("employed", DataType.BOOLEAN)
employed = bool_attr_type.create(True)
self.assertEqual(employed.value(), True)
double_attr_type = self.tx.put_attribute_type("length", DataType.DOUBLE)
one = double_attr_type.create(1.0)
self.assertEqual(one.value(), 1.0)
def test_data_type(self):
str_attr_type = self.tx.put_attribute_type("firstname", DataType.STRING)
self.assertEqual(str_attr_type.data_type(), DataType.STRING)
bool_attr_type = self.tx.put_attribute_type("employed", DataType.BOOLEAN)
self.assertEqual(bool_attr_type.data_type(), DataType.BOOLEAN)
double_attr_type = self.tx.put_attribute_type("length", DataType.DOUBLE)
self.assertEqual(double_attr_type.data_type(), DataType.DOUBLE)
long_attr_type = self.tx.put_attribute_type("randomint", DataType.LONG)
self.assertEqual(long_attr_type.data_type(), DataType.LONG)
def test_attribute(self):
""" Test retrieve attribute instances """
name = self.tx.put_attribute_type("name", DataType.STRING)
john = name.create("john")
with self.subTest(i=0):
# retrieve existing attr client
retrieved_john = name.attribute("john")
self.assertEqual(retrieved_john.value(), john.value())
self.assertTrue(retrieved_john.is_attribute())
with self.subTest(i=1):
# retrieve nonexistant attr client
retrieved_none = name.attribute("nobody")
self.assertIsNone(retrieved_none)
def test_regex(self):
""" Test get/set regex """
attr_type = self.tx.put_attribute_type("dogbadness", DataType.STRING)
empty_regex = attr_type.regex()
self.assertEqual(len(empty_regex), 0, msg="Unset regex does not have length 0")
attr_type.regex("(good|bad)-dog")
regex = attr_type.regex()
self.assertEqual(regex, "(good|bad)-dog")
class test_RelationType(test_concept_Base):
def test_create(self):
rel_type = self.tx.put_relation_type("owner")
rel = rel_type.create()
self.assertTrue(rel.is_relation())
self.assertTrue(rel_type.is_relation_type())
def test_relates(self):
""" Test get/relate/unrelate roles for a relation type """
ownership = self.tx.put_relation_type("ownership")
role_owner = self.tx.put_role("owner")
role_owned = self.tx.put_role("owned")
with self.subTest(i=0):
# currently no roles in the new relation
roles = list(ownership.roles())
self.assertEqual(len(roles), 0)
with self.subTest(i=1):
# set roles in relation
ownership.relates(role_owner)
ownership.relates(role_owned)
roles = list(ownership.roles())
self.assertEqual(len(roles), 2)
with self.subTest(i=2):
# unrelate a role
ownership.unrelate(role_owned)
roles = list(ownership.roles())
self.assertEqual(len(roles), 1)
self.assertEqual(roles[0].base_type, "ROLE")
class test_Rule(test_concept_Base):
def test_when_then(self):
""" Test get valid when/then """
label = "genderizedparentship"
when = "{ (parent: $p, child: $c) isa parentship; $c has gender \"male\"; $p has gender \"female\"; };"
then = "{ (mother: $p, son: $c) isa parentship; };"
rule = self.tx.put_rule(label, when, then)
self.assertEqual(rule.get_when(), when)
self.assertEqual(rule.get_then(), then)
def test_none_when_then(self):
""" Test get when/then for rule with null when/then """
rule = self.tx.get_schema_concept('rule')
self.assertIsNone(rule.get_when())
self.assertIsNone(rule.get_then())
class test_Role(test_concept_Base):
def test_relations(self):
""" Test retrieving relations of a role """
# parent role, parentship already exist
result = [ans.get("x") for ans in self.tx.query("match $x type parent; get;")]
parent_role = result[0]
self.assertEqual(parent_role.base_type, "ROLE")
relations = list(parent_role.relations())
self.assertEqual(len(relations), 1)
self.assertEqual(relations[0].base_type, "RELATION_TYPE")
self.assertEqual(relations[0].label(), "parentship")
def test_players(self):
""" Test retrieving entity types playing this role """
result = [ans.get("x") for ans in self.tx.query("match $x type parent; get;")]
parent_role = result[0]
self.assertEqual(parent_role.base_type, "ROLE")
entity_types = list(parent_role.players())
self.assertEqual(len(entity_types), 1)
self.assertEqual(entity_types[0].base_type, "ENTITY_TYPE")
self.assertEqual(entity_types[0].label(), "person")
class test_Thing(test_concept_Base):
def test_is_inferred(self):
person_type = self.tx.get_schema_concept("person")
person = person_type.create()
self.assertFalse(person.is_inferred())
def test_type(self):
person_type = self.tx.get_schema_concept("person")
person = person_type.create()
p_type = person.type()
self.assertEqual(p_type.id, person_type.id) # same schema concept
self.assertTrue(p_type.is_type())
def test_relations(self):
""" Test retrieve relations narrowed optionally by roles """
# create a first relation
sibling_type = self.tx.put_relation_type('sibling')
brother_role = self.tx.put_role("brother")
sibling_type.relates(brother_role)
person = self.tx.get_schema_concept("person")
# create a second relation
ownership_type = self.tx.put_relation_type("ownership")
owner_role = self.tx.put_role("owner")
ownership_type.relates(owner_role)
person.plays(owner_role)
# connect entities/relation instances
sibling = sibling_type.create()
ownership = ownership_type.create()
son = person.create()
sibling.assign(brother_role, son) # assign son to sibling rel
ownership.assign(owner_role, son) # attach son to owner rel
# retrieve all relations
rels = list(son.relations())
self.assertEqual(len(rels), 2)
rel_ids = [rel.id for rel in rels]
self.assertTrue(sibling.id in rel_ids and ownership.id in rel_ids)
# retrieve filtered by only the owner role
filtered_rels = list(son.relations(owner_role))
self.assertEqual(len(filtered_rels), 1)
self.assertEqual(filtered_rels[0].id, ownership.id)
def test_roles(self):
# create a relation
ownership_type = self.tx.put_relation_type("ownership")
owner_role = self.tx.put_role("owner")
ownership_type.relates(owner_role)
person_type = self.tx.get_schema_concept("person")
person_type.plays(owner_role)
# connect entities/relation instances
ownership = ownership_type.create()
person = person_type.create()
ownership.assign(owner_role, person) # attach son to owner rel
roles = list(person.roles())
self.assertEqual(len(roles), 1)
self.assertEqual(roles[0].id, owner_role.id)
def test_has_unhas_attributes(self):
""" Test has/unhas/get attributes """
person_type = self.tx.get_schema_concept("person")
name_attr_type = self.tx.put_attribute_type("name", DataType.STRING)
person_type.has(name_attr_type)
person = person_type.create()
attr_john = name_attr_type.create("john")
person.has(attr_john)
attrs = list(person.attributes())
self.assertEqual(len(attrs), 1)
self.assertEqual(attrs[0].id, attr_john.id)
person.unhas(attr_john)
empty_attrs = list(person.attributes())
self.assertEqual(len(empty_attrs), 0)
def test_attributes(self):
""" Test retrieve attrs optionally narrowed by types """
person_type = self.tx.get_schema_concept("person")
name_attr = self.tx.put_attribute_type("name", DataType.STRING)
foo_attr = self.tx.put_attribute_type("foo", DataType.BOOLEAN)
bar_attr = self.tx.put_attribute_type("bar", DataType.LONG)
person_type.has(name_attr)
person_type.has(foo_attr)
person = person_type.create()
name = name_attr.create("john")
foo = foo_attr.create(False)
person.has(name)
person.has(foo)
attrs = list(person.attributes())
self.assertEqual(len(attrs), 2)
for attr in attrs:
self.assertTrue(attr.is_attribute())
#filtered attrs
attrs = list(person.attributes(name_attr))
self.assertEqual(len(attrs), 1)
self.assertTrue(attrs[0].is_attribute())
self.assertEqual(attrs[0].id, name.id)
attrs = list(person.attributes(name_attr, foo_attr))
self.assertEqual(len(attrs), 2)
#nonexistant filtering
attrs = list(person.attributes(bar_attr)) # not attached
self.assertEqual(len(attrs), 0)
def test_keys(self):
""" Test retrieving keys optionally filtered by attribute types """
person_type = self.tx.get_schema_concept("person")
name_type = self.tx.put_attribute_type("name", DataType.STRING)
surname_type = self.tx.put_attribute_type("surname", DataType.STRING)
person_type.key(name_type)
person_type.has(surname_type)
name = name_type.create("john")
surname = surname_type.create("lennon")
person = person_type.create()
person.has(name)
person.has(surname)
keys = list(person.keys())
self.assertEqual(len(keys), 1)
self.assertEqual(keys[0].id, name.id)
filtered_keys = list(person.keys(name_type, surname_type))
self.assertEqual(len(filtered_keys), 1)
self.assertEqual(filtered_keys[0].id, name.id)
empty_keys = list(person.keys(surname_type))
self.assertEqual(len(empty_keys), 0)
class test_Attribute(test_concept_Base):
def test_value(self):
""" Get attribute value """
double_attr_type = self.tx.put_attribute_type("length", DataType.DOUBLE)
double = double_attr_type.create(43.1)
self.assertEqual(double.value(), 43.1)
def test_get_date_value(self):
date_type = self.tx.put_attribute_type("birthdate", DataType.DATE)
person_type = self.tx.get_schema_concept("person")
person_type.has(date_type)
concepts = [ans.get("x") for ans in self.tx.query("insert $x isa person, has birthdate 2018-08-06;")]
person = concepts[0]
attrs_iter = person.attributes()
for attr_concept in attrs_iter:
# pick out the birthdate
if attr_concept.type().label() == "birthdate":
date = attr_concept.value()
self.assertIsInstance(date, datetime.datetime)
self.assertEqual(date.year, 2018)
self.assertEqual(date.month, 8)
self.assertEqual(date.day, 6)
return
def test_set_date_value(self):
date_type = self.tx.put_attribute_type("birthdate", DataType.DATE)
test_date = datetime.datetime(year=2018, month=6, day=6)
date_attr_inst = date_type.create(test_date)
value = date_attr_inst.value() # retrieve from server
self.assertIsInstance(value, datetime.datetime)
self.assertEqual(value.timestamp(), test_date.timestamp())
def test_owners(self):
""" Test retrieving entities that have an attribute """
person_type = self.tx.get_schema_concept("person")
animal_type = self.tx.put_entity_type("animal")
name_type = self.tx.put_attribute_type("name", DataType.STRING)
person_type.has(name_type)
animal_type.has(name_type)
person = person_type.create()
animal = animal_type.create()
john = name_type.create("john")
person.has(john)
animal.has(john)
owners = list(john.owners())
self.assertEqual(len(owners), 2)
labels = [x.id for x in owners]
self.assertTrue(person.id in labels and animal.id in labels)
class test_Relation(test_concept_Base):
def test_role_players_2_roles_1_player(self):
""" Test role_players_map and role_players with 2 roles and 1 player each """
parentship_type = self.tx.get_schema_concept("parentship")
person_type = self.tx.get_schema_concept("person")
parent_role = self.tx.get_schema_concept("parent")
child_role = self.tx.get_schema_concept("child")
parent = person_type.create()
child = person_type.create()
parentship = parentship_type.create()
parentship.assign(parent_role, parent)
parentship.assign(child_role, child)
role_players_map = parentship.role_players_map()
self.assertEqual(len(role_players_map.keys()), 2)
for role in role_players_map:
players_set = role_players_map[role]
self.assertEqual(len(players_set), 1)
self.assertTrue(role.is_role())
role_players = list(parentship.role_players())
self.assertEqual(len(role_players), 2)
def test_role_players_1_role_2_players(self):
parentship_type = self.tx.get_schema_concept("parentship")
person_type = self.tx.get_schema_concept("person")
parent_role = self.tx.get_schema_concept("parent")
parent = person_type.create()
another_parent = person_type.create()
parentship = parentship_type.create()
parentship.assign(parent_role, parent)
parentship.assign(parent_role, another_parent)
role_players_map = parentship.role_players_map()
self.assertEqual(len(role_players_map.keys()), 1)
for role in role_players_map:
players_set = role_players_map[role]
self.assertEqual(len(players_set), 2)
self.assertTrue(role.is_role())
role_players = list(parentship.role_players())
self.assertEqual(len(role_players), 2)
def test_role_players_2_roles_same_player(self):
parentship_type = self.tx.get_schema_concept("parentship")
person_type = self.tx.get_schema_concept("person")
parent_role = self.tx.get_schema_concept("parent")
child_role = self.tx.get_schema_concept("child")
self_parent = person_type.create()
parentship = parentship_type.create()
parentship.assign(parent_role, self_parent)
parentship.assign(child_role, self_parent)
role_players_map = parentship.role_players_map()
self.assertEqual(len(role_players_map.keys()), 2)
for role in role_players_map:
players_set = role_players_map[role]
self.assertEqual(len(players_set), 1)
self.assertTrue(role.is_role())
role_players = list(parentship.role_players())
self.assertEqual(len(role_players), 1)
self.assertTrue(role_players[0].is_thing())
def test_assign_unassign(self):
parentship_type = self.tx.get_schema_concept("parentship")
person_type = self.tx.get_schema_concept("person")
parent_role = self.tx.get_schema_concept("parent")
person = person_type.create()
parentship = parentship_type.create()
empty_role_players = list(parentship.role_players())
self.assertEqual(len(empty_role_players), 0)
parentship.assign(parent_role, person)
role_players = list(parentship.role_players())
self.assertEqual(len(role_players), 1)
self.assertEqual(role_players[0].id, person.id)
parentship.unassign(parent_role, person)
post_remove_role_players = list(parentship.role_players())
self.assertEqual(len(post_remove_role_players), 0)
def test_role_players_filtered_by_role(self):
parentship_type = self.tx.get_schema_concept("parentship")
person_type = self.tx.get_schema_concept("person")
parent_role = self.tx.get_schema_concept("parent")
child_role = self.tx.get_schema_concept("child")
parent = person_type.create()
child = person_type.create()
parentship = parentship_type.create()
parentship.assign(parent_role, parent)
parentship.assign(child_role, child)
# no filter
role_players = list(parentship.role_players())
self.assertEqual(len(role_players), 2)
# single filter
filtered_role_players = list(parentship.role_players(child_role))
self.assertEqual(len(filtered_role_players), 1)
self.assertEqual(filtered_role_players[0].id, child.id)
# allow both
double_filter_role_players = list(parentship.role_players(child_role, parent_role))
self.assertEqual(len(double_filter_role_players), 2)
if __name__ == "__main__":
with GraknServer():
unittest.main(verbosity=2)
``` |
{
"source": "2xyo/cti-python-stix2",
"score": 3
} |
#### File: stix2/datastore/filters.py
```python
import collections
from datetime import datetime
import six
import stix2.utils
"""Supported filter operations"""
FILTER_OPS = ['=', '!=', 'in', '>', '<', '>=', '<=', 'contains']
"""Supported filter value types"""
FILTER_VALUE_TYPES = (
bool, dict, float, int, list, tuple, six.string_types,
datetime,
)
def _check_filter_components(prop, op, value):
"""Check that filter meets minimum validity.
Note:
Currently can create Filters that are not valid STIX2 object common
properties, as filter.prop value is not checked, only filter.op,
filter value are checked here. They are just ignored when applied
within the DataSource API. For example, a user can add a TAXII Filter,
that is extracted and sent to a TAXII endpoint within TAXIICollection
and not applied locally (within this API).
"""
if op not in FILTER_OPS:
# check filter operator is supported
raise ValueError("Filter operator '%s' not supported for specified property: '%s'" % (op, prop))
if not isinstance(value, FILTER_VALUE_TYPES):
# check filter value type is supported
raise TypeError("Filter value of '%s' is not supported. The type must be a Python immutable type or dictionary" % type(value))
if prop == 'type' and '_' in value:
# check filter where the property is type, value (type name) cannot have underscores
raise ValueError("Filter for property 'type' cannot have its value '%s' include underscores" % value)
return True
class Filter(collections.namedtuple('Filter', ['property', 'op', 'value'])):
"""STIX 2 filters that support the querying functionality of STIX 2
DataStores and DataSources.
Initialized like a Python tuple.
Args:
property (str): filter property name, corresponds to STIX 2 object property
op (str): operator of the filter
value (str): filter property value
Example:
Filter("id", "=", "malware--0f862b01-99da-47cc-9bdb-db4a86a95bb1")
"""
__slots__ = ()
def __new__(cls, prop, op, value):
# If value is a list, convert it to a tuple so it is hashable.
if isinstance(value, list):
value = tuple(value)
_check_filter_components(prop, op, value)
self = super(Filter, cls).__new__(cls, prop, op, value)
return self
def _check_property(self, stix_obj_property):
"""Check a property of a STIX Object against this filter.
Args:
stix_obj_property: value to check this filter against
Returns:
True if property matches the filter,
False otherwise.
"""
# If filtering on a timestamp property and the filter value is a string,
# try to convert the filter value to a datetime instance.
if isinstance(stix_obj_property, datetime) and \
isinstance(self.value, six.string_types):
filter_value = stix2.utils.parse_into_datetime(self.value)
else:
filter_value = self.value
if self.op == "=":
return stix_obj_property == filter_value
elif self.op == "!=":
return stix_obj_property != filter_value
elif self.op == "in":
return stix_obj_property in filter_value
elif self.op == "contains":
if isinstance(filter_value, dict):
return filter_value in stix_obj_property.values()
else:
return filter_value in stix_obj_property
elif self.op == ">":
return stix_obj_property > filter_value
elif self.op == "<":
return stix_obj_property < filter_value
elif self.op == ">=":
return stix_obj_property >= filter_value
elif self.op == "<=":
return stix_obj_property <= filter_value
else:
raise ValueError("Filter operator: {0} not supported for specified property: {1}".format(self.op, self.property))
def apply_common_filters(stix_objs, query):
"""Evaluate filters against a set of STIX 2.0 objects.
Supports only STIX 2.0 common property properties.
Args:
stix_objs (iterable): iterable of STIX objects to apply the query to
query (non-iterator iterable): iterable of filters. Can't be an
iterator (e.g. generator iterators won't work), since this is
used in an inner loop of a nested loop. So we require the ability
to traverse the filters repeatedly.
Yields:
STIX objects that successfully evaluate against the query.
"""
for stix_obj in stix_objs:
clean = True
for filter_ in query:
match = _check_filter(filter_, stix_obj)
if not match:
clean = False
break
# if object unmarked after all filters, add it
if clean:
yield stix_obj
def _check_filter(filter_, stix_obj):
"""Evaluate a single filter against a single STIX 2.0 object.
Args:
filter_ (Filter): filter to match against
stix_obj: STIX object to apply the filter to
Returns:
True if the stix_obj matches the filter,
False if not.
"""
# For properties like granular_markings and external_references
# need to extract the first property from the string.
prop = filter_.property.split('.')[0]
if prop not in stix_obj.keys():
# check filter "property" is in STIX object - if cant be
# applied to STIX object, STIX object is discarded
# (i.e. did not make it through the filter)
return False
if '.' in filter_.property:
# Check embedded properties, from e.g. granular_markings or external_references
sub_property = filter_.property.split('.', 1)[1]
sub_filter = filter_._replace(property=sub_property)
if isinstance(stix_obj[prop], list):
for elem in stix_obj[prop]:
if _check_filter(sub_filter, elem) is True:
return True
return False
else:
return _check_filter(sub_filter, stix_obj[prop])
elif isinstance(stix_obj[prop], list):
# Check each item in list property to see if it matches
for elem in stix_obj[prop]:
if filter_._check_property(elem) is True:
return True
return False
else:
# Check if property matches
return filter_._check_property(stix_obj[prop])
class FilterSet(object):
"""Internal STIX2 class to facilitate the grouping of Filters
into sets. The primary motivation for this class came from the problem
that Filters that had a dict as a value could not be added to a Python
set as dicts are not hashable. Thus this class provides set functionality
but internally stores filters in a list.
"""
def __init__(self, filters=None):
"""
Args:
filters: see FilterSet.add()
"""
self._filters = []
if filters:
self.add(filters)
def __iter__(self):
"""Provide iteration functionality of FilterSet."""
for f in self._filters:
yield f
def __len__(self):
"""Provide built-in len() utility of FilterSet."""
return len(self._filters)
def add(self, filters=None):
"""Add a Filter, FilterSet, or list of Filters to the FilterSet.
Operates like set, only adding unique stix2.Filters to the FilterSet
Note:
method designed to be very accomodating (i.e. even accepting filters=None)
as it allows for blind calls (very useful in DataStore)
Args:
filters: stix2.Filter OR list of stix2.Filter OR stix2.FilterSet
"""
if not filters:
# so add() can be called blindly, useful for
# DataStore/Environment usage of filter operations
return
if not isinstance(filters, (FilterSet, list)):
filters = [filters]
for f in filters:
if f not in self._filters:
self._filters.append(f)
def remove(self, filters=None):
"""Remove a Filter, list of Filters, or FilterSet from the FilterSet.
Note:
method designed to be very accomodating (i.e. even accepting filters=None)
as it allows for blind calls (very useful in DataStore)
Args:
filters: stix2.Filter OR list of stix2.Filter or stix2.FilterSet
"""
if not filters:
# so remove() can be called blindly, useful for
# DataStore/Environemnt usage of filter ops
return
if not isinstance(filters, (FilterSet, list)):
filters = [filters]
for f in filters:
self._filters.remove(f)
```
#### File: test/v21/test_indicator.py
```python
import datetime as dt
import re
import pytest
import pytz
import stix2
from .constants import FAKE_TIME, INDICATOR_ID, INDICATOR_KWARGS
EXPECTED_INDICATOR = """{
"type": "indicator",
"spec_version": "2.1",
"id": "indicator--a740531e-63ff-4e49-a9e1-a0a3eed0e3e7",
"created": "2017-01-01T00:00:01.000Z",
"modified": "2017-01-01T00:00:01.000Z",
"pattern": "[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
"pattern_type": "stix",
"pattern_version": "2.1",
"valid_from": "1970-01-01T00:00:01Z"
}"""
EXPECTED_INDICATOR_REPR = "Indicator(" + " ".join("""
type='indicator',
spec_version='2.1',
id='indicator--a740531e-63ff-4e49-a9e1-a0a3eed0e3e7',
created='2017-01-01T00:00:01.000Z',
modified='2017-01-01T00:00:01.000Z',
pattern="[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
pattern_type='stix',
pattern_version='2.1',
valid_from='1970-01-01T00:00:01Z'
""".split()) + ")"
def test_indicator_with_all_required_properties():
now = dt.datetime(2017, 1, 1, 0, 0, 1, tzinfo=pytz.utc)
epoch = dt.datetime(1970, 1, 1, 0, 0, 1, tzinfo=pytz.utc)
ind = stix2.v21.Indicator(
type="indicator",
id=INDICATOR_ID,
created=now,
modified=now,
pattern="[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
pattern_type="stix",
valid_from=epoch,
)
assert ind.revoked is False
assert str(ind) == EXPECTED_INDICATOR
rep = re.sub(r"(\[|=| )u('|\"|\\\'|\\\")", r"\g<1>\g<2>", repr(ind))
assert rep == EXPECTED_INDICATOR_REPR
def test_indicator_autogenerated_properties(indicator):
assert indicator.type == 'indicator'
assert indicator.spec_version == '2.1'
assert indicator.id == 'indicator--00000000-0000-4000-8000-000000000001'
assert indicator.created == FAKE_TIME
assert indicator.modified == FAKE_TIME
assert indicator.indicator_types == ['malicious-activity']
assert indicator.pattern == "[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']"
assert indicator.valid_from == FAKE_TIME
assert indicator['type'] == 'indicator'
assert indicator['spec_version'] == '2.1'
assert indicator['id'] == 'indicator--00000000-0000-4000-8000-000000000001'
assert indicator['created'] == FAKE_TIME
assert indicator['modified'] == FAKE_TIME
assert indicator['indicator_types'] == ['malicious-activity']
assert indicator['pattern'] == "[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']"
assert indicator['valid_from'] == FAKE_TIME
def test_indicator_type_must_be_indicator():
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.v21.Indicator(type='xxx', **INDICATOR_KWARGS)
assert excinfo.value.cls == stix2.v21.Indicator
assert excinfo.value.prop_name == "type"
assert excinfo.value.reason == "must equal 'indicator'."
assert str(excinfo.value) == "Invalid value for Indicator 'type': must equal 'indicator'."
def test_indicator_id_must_start_with_indicator():
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.v21.Indicator(id='my-prefix--', **INDICATOR_KWARGS)
assert excinfo.value.cls == stix2.v21.Indicator
assert excinfo.value.prop_name == "id"
assert excinfo.value.reason == "must start with 'indicator--'."
assert str(excinfo.value) == "Invalid value for Indicator 'id': must start with 'indicator--'."
def test_indicator_required_properties():
with pytest.raises(stix2.exceptions.MissingPropertiesError) as excinfo:
stix2.v21.Indicator()
assert excinfo.value.cls == stix2.v21.Indicator
assert excinfo.value.properties == ["pattern", "pattern_type", "valid_from"]
assert str(excinfo.value) == "No values for required properties for Indicator: (pattern, pattern_type, valid_from)."
def test_indicator_required_property_pattern():
with pytest.raises(stix2.exceptions.MissingPropertiesError) as excinfo:
stix2.v21.Indicator(indicator_types=['malicious-activity'])
assert excinfo.value.cls == stix2.v21.Indicator
assert excinfo.value.properties == ["pattern", "pattern_type", "valid_from"]
def test_indicator_created_ref_invalid_format():
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.v21.Indicator(created_by_ref='myprefix--12345678', **INDICATOR_KWARGS)
assert excinfo.value.cls == stix2.v21.Indicator
assert excinfo.value.prop_name == "created_by_ref"
def test_indicator_revoked_invalid():
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.v21.Indicator(revoked='no', **INDICATOR_KWARGS)
assert excinfo.value.cls == stix2.v21.Indicator
assert excinfo.value.prop_name == "revoked"
assert excinfo.value.reason == "must be a boolean value."
def test_cannot_assign_to_indicator_attributes(indicator):
with pytest.raises(stix2.exceptions.ImmutableError) as excinfo:
indicator.valid_from = dt.datetime.now()
assert str(excinfo.value) == "Cannot modify 'valid_from' property in 'Indicator' after creation."
def test_invalid_kwarg_to_indicator():
with pytest.raises(stix2.exceptions.ExtraPropertiesError) as excinfo:
stix2.v21.Indicator(my_custom_property="foo", **INDICATOR_KWARGS)
assert excinfo.value.cls == stix2.v21.Indicator
assert excinfo.value.properties == ['my_custom_property']
assert str(excinfo.value) == "Unexpected properties for Indicator: (my_custom_property)."
def test_created_modified_time_are_identical_by_default():
"""By default, the created and modified times should be the same."""
ind = stix2.v21.Indicator(**INDICATOR_KWARGS)
assert ind.created == ind.modified
@pytest.mark.parametrize(
"data", [
EXPECTED_INDICATOR,
{
"type": "indicator",
"id": INDICATOR_ID,
"created": "2017-01-01T00:00:01Z",
"modified": "2017-01-01T00:00:01Z",
"pattern": "[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
"pattern_type": "stix",
"valid_from": "1970-01-01T00:00:01Z",
},
],
)
def test_parse_indicator(data):
idctr = stix2.parse(data, version="2.1")
assert idctr.type == 'indicator'
assert idctr.spec_version == '2.1'
assert idctr.id == INDICATOR_ID
assert idctr.created == dt.datetime(2017, 1, 1, 0, 0, 1, tzinfo=pytz.utc)
assert idctr.modified == dt.datetime(2017, 1, 1, 0, 0, 1, tzinfo=pytz.utc)
assert idctr.valid_from == dt.datetime(1970, 1, 1, 0, 0, 1, tzinfo=pytz.utc)
assert idctr.pattern == "[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']"
def test_invalid_indicator_pattern():
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.v21.Indicator(
indicator_types=['malicious-activity'],
pattern="file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e'",
pattern_type="stix",
valid_from="2017-01-01T12:34:56Z",
)
assert excinfo.value.cls == stix2.v21.Indicator
assert excinfo.value.prop_name == 'pattern'
assert 'input is missing square brackets' in excinfo.value.reason
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.v21.Indicator(
indicator_types=['malicious-activity'],
pattern='[file:hashes.MD5 = "d41d8cd98f00b204e9800998ecf8427e"]',
pattern_type="stix",
valid_from="2017-01-01T12:34:56Z",
)
assert excinfo.value.cls == stix2.v21.Indicator
assert excinfo.value.prop_name == 'pattern'
assert 'mismatched input' in excinfo.value.reason
def test_indicator_with_custom_embedded_objs():
now = dt.datetime(2017, 1, 1, 0, 0, 1, tzinfo=pytz.utc)
epoch = dt.datetime(1970, 1, 1, 0, 0, 1, tzinfo=pytz.utc)
ext_ref = stix2.v21.ExternalReference(
source_name="Test",
description="Example Custom Ext Ref",
random_custom_prop="This is a custom property",
allow_custom=True,
)
ind = stix2.v21.Indicator(
type="indicator",
id=INDICATOR_ID,
created=now,
modified=now,
pattern="[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
pattern_type="stix",
valid_from=epoch,
indicator_types=['malicious-activity'],
external_references=[ext_ref],
)
assert ind.indicator_types == ['malicious-activity']
assert len(ind.external_references) == 1
assert ind.external_references[0] == ext_ref
def test_indicator_with_custom_embed_objs_extra_props_error():
ext_ref = stix2.v21.ExternalReference(
source_name="Test",
description="Example Custom Ext Ref",
random_custom_prop="This is a custom property",
allow_custom=True,
)
with pytest.raises(stix2.exceptions.ExtraPropertiesError) as excinfo:
stix2.v21.Indicator(external_references=[ext_ref], bad_custom_prop="shouldn't be here", **INDICATOR_KWARGS)
assert excinfo.value.cls == stix2.v21.Indicator
assert excinfo.value.properties == ['bad_custom_prop']
assert str(excinfo.value) == "Unexpected properties for Indicator: (bad_custom_prop)."
def test_indicator_stix20_invalid_pattern():
now = dt.datetime(2017, 1, 1, 0, 0, 1, tzinfo=pytz.utc)
epoch = dt.datetime(1970, 1, 1, 0, 0, 1, tzinfo=pytz.utc)
patrn = "[win-registry-key:key = 'hkey_local_machine\\\\foo\\\\bar'] WITHIN 5 SECONDS WITHIN 6 SECONDS"
with pytest.raises(stix2.exceptions.InvalidValueError) as excinfo:
stix2.v21.Indicator(
type="indicator",
id=INDICATOR_ID,
created=now,
modified=now,
pattern=patrn,
pattern_type="stix",
valid_from=epoch,
indicator_types=['malicious-activity'],
)
assert excinfo.value.cls == stix2.v21.Indicator
assert "FAIL: Duplicate qualifier type encountered: WITHIN" in str(excinfo.value)
ind = stix2.v21.Indicator(
type="indicator",
id=INDICATOR_ID,
created=now,
modified=now,
pattern=patrn,
pattern_type="stix",
pattern_version="2.0",
valid_from=epoch,
indicator_types=['malicious-activity'],
)
assert ind.id == INDICATOR_ID
assert ind.indicator_types == ['malicious-activity']
assert ind.pattern == patrn
assert ind.pattern_type == "stix"
assert ind.pattern_version == "2.0"
``` |
{
"source": "2xyo/cti-stix-elevator",
"score": 2
} |
#### File: cti-stix-elevator/stix2elevator/common.py
```python
from stix2elevator.options import warn
SOCKET_OPTIONS = [
"ip_multicast_if",
"ip_multicast_if2",
"ip_multicast_loop",
"ip_tos",
"so_broadcast",
"so_conditional_accept",
"so_keepalive",
"so_dontroute",
"so_linger",
"so_dontlinger",
"so_oobinline",
"so_rcvbuf",
"so_group_priority",
"so_reuseaddr",
"so_debug",
"so_rcvtimeo",
"so_sndbuf",
"so_sndtimeo",
"so_update_accept_context",
"so_timeout",
"tcp_nodelay"
]
ADDRESS_FAMILY_ENUMERATION = [
"AF_UNSPEC",
"AF_INET",
"AF_IPX",
"AF_APPLETALK",
"AF_NETBIOS",
"AF_INET6",
"AF_IRDA",
"AF_BTH",
]
PDF_DOC_INFO = [
"author",
"creationdate",
"creator",
"keywords",
"producer",
"moddate",
"subject",
"trapped"
]
PDF_DOC_INFO_DICT = {
"author": "Author",
"creationdate": "CreationDate",
"creator": "Creator",
"keywords": "Keywords",
"producer": "Producer",
"moddate": "ModDate",
"subject": "Subject",
"trapped": "Trapped"
}
def determine_socket_address_direction(sock_add_1x, obj1x_id):
if sock_add_1x.ip_address:
if sock_add_1x.ip_address.is_destination and not sock_add_1x.ip_address.is_source:
return "dst"
elif sock_add_1x.ip_address.is_source and not sock_add_1x.ip_address.is_destination:
return "src"
else:
# ((sock_add_1x.ip_address.is_destination and sock_add_1x.ip_address.is_source) or
# (not sock_add_1x.ip_address.is_destination and not sock_add_1x.ip_address.is_source)):
warn("Address direction in %s is inconsistent, using 'src'", 614, obj1x_id)
return "src"
else:
warn("Address direction in %s is not provided, using 'src'", 636, obj1x_id)
return "src"
```
#### File: cti-stix-elevator/stix2elevator/confidence.py
```python
from math import ceil
import sys
# external
from six import text_type
# internal
from stix2elevator.options import warn
if sys.version_info > (3,):
long = int
_NONE_LOW_MED_HIGH = {
"None": 0,
"Low": 15,
"Medium": 50, # from xsi:type="stixVocabs:HighMediumLowVocab-1.0"
"Med": 50,
"High": 85
}
_ADMIRALTY_CREDIBILITY = {
"6 - Truth cannot be judged": None,
"5 - Improbable": 10,
"4 - Doubtful": 30,
"3 - Possibly True": 50,
"2 - Probably True": 70,
"1 - Confirmed by other sources": 90,
}
_WEP = {
"Impossible": 0,
"Highly Unlikely/Almost Certainly Not": 10,
"Unlikely/Probably Not": 30,
"Even Chance": 50,
"Likely/Probable": 70,
"Highly likely/Almost Certain": 90,
"Certain": 100
}
_DNI = {
"Almost No Chance / Remote": 5,
"Very Unlikely / Highly Improbable": 15,
"Unlikely / Improbable": 30,
"Roughly Even Chance / Roughly Even Odds": 50,
"Likely / Probable": 70,
"Very Likely / Highly Probable": 85,
"Almost Certain / Nearly Certain": 95
}
def convert_confidence_string(value):
if value in _NONE_LOW_MED_HIGH:
# check xsi:type?
return _NONE_LOW_MED_HIGH[value]
elif value in _ADMIRALTY_CREDIBILITY:
return _ADMIRALTY_CREDIBILITY[value]
elif value in _WEP:
return _WEP[value]
elif value in _DNI:
return _DNI[value]
else:
warn(
"The confidence value %s is not found on one of the confidence scales from the specification. No confidence can be inferred",
430, value)
return None
def convert_numeric_string(value):
if value.find(".") == -1:
return int(value)
else:
return float(value)
def convert_confidence_value(value, id_of_sdo):
if isinstance(value, (int, long)):
# look for percentage?
if value < 0 or value > 100:
warn(
"The confidence value %s is not between 0 and 100, which is required for STIX 2.1. No confidence can be inferred",
431, value)
return None
else:
warn("The confidence value %s assumed to be a value on a scale between 0 and 100", 723, value)
confidentiality2_1_value = value
elif isinstance(value, float):
if value < 0 or value > 100:
warn(
"The confidence value %s is not between 0 and 100, which is required for STIX 2.1. No confidence can be inferred",
431, value)
return None
else:
warn("The confidence value %s in %s has been converted to an integer so it is valid in STIX 2.1", 724,
value, id_of_sdo)
confidentiality2_1_value = ceil(value)
elif isinstance(value, str):
value = text_type(value)
if value.isnumeric():
confidentiality2_1_value = convert_confidence_value(convert_numeric_string(value), id_of_sdo)
else:
confidentiality2_1_value = convert_confidence_string(value)
elif isinstance(value, object):
confidentiality2_1_value = convert_confidence_value(value.value, id_of_sdo)
else:
warn(
"The confidence value %s cannot be converted", 432, value)
return None
return confidentiality2_1_value
def convert_confidence(confidence1x, id_of_sdo):
# should confidence description be included in a note or opinion?
return convert_confidence_value(confidence1x.value, id_of_sdo)
```
#### File: stix2elevator/test/test_utils.py
```python
import pytest
from stix.indicator import Indicator
# internal
from stix2elevator import convert_stix, utils
from stix2elevator.options import _convert_to_int_list
from stix2elevator.utils import Environment
def test_strftime_with_appropriate_fractional_seconds():
base_timestamp = "2017-03-29T05:05:05.555Z"
mili_expected_timestamp = "2017-03-29T05:05:05.555000Z"
milisecond_timestamp = utils.strftime_with_appropriate_fractional_seconds(base_timestamp, True)
assert base_timestamp == milisecond_timestamp
trunc_timestamp = utils.strftime_with_appropriate_fractional_seconds(base_timestamp, False)
assert mili_expected_timestamp == trunc_timestamp
def test_convert_timestamp_string():
# Create v1 and v2 indicator, test timestamp pre and post convert_timestamp_call
# Maybe take a v1 idiom
# child_timestamp = "2017-03-29T05:05:05.555Z"
parent_timestamp = "2017-03-29T05:09:09.999Z"
env = Environment(timestamp=parent_timestamp)
indicator = Indicator()
indicator_instance = convert_stix.create_basic_object("indicator", indicator, env)
assert indicator_instance is not None
@pytest.mark.parametrize("data", [
[123, 245, 344],
"123,245,344",
["123", "245", 344],
])
def test_convert_int_function(data):
assert _convert_to_int_list(data) == [123, 245, 344]
@pytest.mark.parametrize("data", [
"12 3,245,344",
"212,garbage,33",
"definitely-not,an_int",
234,
])
def test_convert_int_function_bad(data):
with pytest.raises((RuntimeError, ValueError)):
_convert_to_int_list(data)
``` |
{
"source": "2xyo/faup",
"score": 3
} |
#### File: examples/multithreads/thread_faup.py
```python
import threading
class ThreadFaup(threading.Thread):
def __init__(self,list_url,f):
self.list_url=list_url
threading.Thread.__init__(self)
self.f=f
def run(self):
for url in self.list_url:
self.f.decode(url)
print self.f.get()
```
#### File: python/pyfaup/faup.py
```python
import sys
import chardet
from .functions import *
class UrlNotDecoded(Exception):
pass
class Faup(object):
"""
Faup Python Library
"""
def __init__(self):
self.options = faup_options_new()
self.fh = faup_init(self.options)
self.decoded = False
self.retval = {}
self.python_ver = sys.version_info.major
def __del__(self):
faup_terminate(self.fh)
faup_options_free(self.options)
def decode_str(self, string):
if self.python_ver >= 3:
if string:
return string.decode("utf-8")
return string
def decode(self, url):
"""
This function creates a dict of all the url fields.
:param url: The URL to normalize
"""
self._url = None
if self.python_ver >= 3:
self._url = bytes(url.encode('utf-8'))
else:
self._url = bytes(url)
self._url = faup_decode(self.fh, self._url, len(self._url))
self.decoded = True
self.retval = {}
@property
def url(self):
return self._url
def get_version(self):
return faup_get_version()
def _get_param_from_pos_and_size(self, pos, size):
if pos < 0:
return None
else:
return self._url[pos:(pos+size)]
def get_scheme(self):
"""
Get the scheme of the url given in the decode function
:returns: The URL scheme
"""
if not self.decoded:
raise UrlNotDecoded("You must call faup.decode() first")
pos = faup_get_scheme_pos(self.fh)
size = faup_get_scheme_size(self.fh)
return self.decode_str(self._get_param_from_pos_and_size(pos, size))
def get_credential(self):
if not self.decoded:
raise UrlNotDecoded("You must call faup.decode() first")
pos = faup_get_credential_pos(self.fh)
size = faup_get_credential_size(self.fh)
return self.decode_str(self._get_param_from_pos_and_size(pos, size))
def get_subdomain(self):
if not self.decoded:
raise UrlNotDecoded("You must call faup.decode() first")
pos = faup_get_subdomain_pos(self.fh)
size = faup_get_subdomain_size(self.fh)
return self.decode_str(self._get_param_from_pos_and_size(pos, size))
def get_domain(self):
if not self.decoded:
raise UrlNotDecoded("You must call faup.decode() first")
pos = faup_get_domain_pos(self.fh)
size = faup_get_domain_size(self.fh)
return self.decode_str(self._get_param_from_pos_and_size(pos, size))
def get_domain_without_tld(self):
if not self.decoded:
raise UrlNotDecoded("You must call faup.decode() first")
pos = faup_get_domain_without_tld_pos(self.fh)
size = faup_get_domain_without_tld_size(self.fh)
return self.decode_str(self._get_param_from_pos_and_size(pos, size))
def get_host(self):
if not self.decoded:
raise UrlNotDecoded("You must call faup.decode() first")
pos = faup_get_host_pos(self.fh)
size = faup_get_host_size(self.fh)
return self.decode_str(self._get_param_from_pos_and_size(pos, size))
def get_tld(self):
if not self.decoded:
raise UrlNotDecoded("You must call faup.decode() first")
pos = faup_get_tld_pos(self.fh)
size = faup_get_tld_size(self.fh)
return self.decode_str(self._get_param_from_pos_and_size(pos, size))
def get_port(self):
if not self.decoded:
raise UrlNotDecoded("You must call faup.decode() first")
pos = faup_get_port_pos(self.fh)
size = faup_get_port_size(self.fh)
return self.decode_str(self._get_param_from_pos_and_size(pos, size))
def get_resource_path(self):
if not self.decoded:
raise UrlNotDecoded("You must call faup.decode() first")
pos = faup_get_resource_path_pos(self.fh)
size = faup_get_resource_path_size(self.fh)
return self.decode_str(self._get_param_from_pos_and_size(pos, size))
def get_query_string(self):
if not self.decoded:
raise UrlNotDecoded("You must call faup.decode() first")
pos = faup_get_query_string_pos(self.fh)
size = faup_get_query_string_size(self.fh)
return self.decode_str(self._get_param_from_pos_and_size(pos, size))
def get_fragment(self):
if not self.decoded:
raise UrlNotDecoded("You must call faup.decode() first")
pos = faup_get_fragment_pos(self.fh)
size = faup_get_fragment_size(self.fh)
return self.decode_str(self._get_param_from_pos_and_size(pos, size))
def get(self):
self.retval["scheme"] = self.get_scheme()
self.retval["tld"] = self.get_tld()
self.retval["domain"] = self.get_domain()
self.retval["domain_without_tld"] = self.get_domain_without_tld()
self.retval["subdomain"] = self.get_subdomain()
self.retval["host"] = self.get_host()
self.retval["port"] = self.get_port()
self.retval["resource_path"] = self.get_resource_path()
self.retval["query_string"] = self.get_query_string()
self.retval["fragment"] = self.get_fragment()
self.retval["url"] = self.url
return self.retval
```
#### File: tests/benchs/test_regex.py
```python
import sys
import re
regex_1 = "^(http|https|ftp)\://([a-zA-Z0-9\.\-]+(\:[a-zA-Z0-9\.&%\$\-]+)*@)*((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|localhost|([a-zA-Z0-9\-]+\.)*[a-zA-Z0-9\-]+\.(com|edu|gov|int|mil|net|org|biz|arpa|info|name|pro|aero|coop|museum|[a-zA-Z]{2}))(\:[0-9]+)*(/($|[a-zA-Z0-9\.\,\?\'\\\+&%\$#\=~_\-]+))*$"
def test_regex(urls_file):
success = 0
failures = 0
pcre = re.compile(regex_1)
for line in urls_file.readlines():
m = pcre.match(line)
if m:
success += 1
# print(m.group(4))
else:
# print("failure with '%s'" % (line))
failures += 1
print("Success:%d;Failures:%d" % (success, failures))
if __name__ == "__main__":
urls_file = open(sys.argv[1], "r")
test_regex(urls_file)
urls_file.close()
```
#### File: tests/benchs/test_urllib.py
```python
import sys
import re
from urllib.parse import urlparse
def test_urllib(urls_file):
for line in urls_file.readlines():
o = urlparse(line)
if __name__ == "__main__":
urls_file = open(sys.argv[1], "r")
test_urllib(urls_file)
urls_file.close()
```
#### File: faup/bin/faup.py
```python
import csv
import sys
import os
import json
import logging, logging.handlers
import platform
#import subprocess
import envoy
import pprint, StringIO
def where_is_faup():
if platform.system() == "Darwin":
return os.environ['SPLUNK_HOME'] + "/etc/apps/faup/opt/faup-darwin"
if platform.system() == "Linux":
return os.environ['SPLUNK_HOME'] + "/etc/apps/faup/opt/faup-linux"
# I don't know, so let's trust the system
return "faup"
faup_bin = where_is_faup()
def setup_logger():
"""
Setup a logger for our lookup
"""
logger = logging.getLogger('faup')
logger.setLevel(logging.DEBUG)
file_handler = logging.handlers.RotatingFileHandler(os.environ['SPLUNK_HOME'] + '/var/log/splunk/faup.log' )
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def run_faup(logger, url_value):
url_value.replace("'", "''")
url_value.replace('"', '""')
url_value.decode('utf-8', 'ignore')
# known bug:
# urls with '\xAA' pattern in it.
run_command = 'echo -n "%s" |%s -o json' % (url_value, faup_bin)
try:
faup_r = envoy.run(run_command)
json_from_faup = faup_r.std_out
except:
logger.info("Error running the command (url=[%s]: %s" % (url_value, run_command))
return None
if not faup_r.std_err == "":
logger.info("faup_r.std_err=%s" % (faup_r.std_err))
try:
json_tree = json.loads(json_from_faup)
json_modified = {}
for k, v in json_tree.iteritems():
json_modified["url_" + k] = v
#logger.info("json=%s" % json_modified)
return json_modified
except:
logger.info("Error loading Json (url=[%s]): %s" % (url_value, json_from_faup))
return None
# Faup output fields
# scheme credential subdomain domain domain_without_tld host tld port resource_path query_string fragment
def main():
logger = setup_logger()
if( len(sys.argv) != 2 ):
print "Usage: python faup.py url"
sys.exit(0)
header = [
'url', 'url_scheme', 'url_credential', 'url_subdomain', 'url_domain',
'url_domain_without_tld', 'url_host', 'url_tld', 'url_port', 'url_resource_path',
'url_query_string', 'url_fragment'
]
csv_in = csv.DictReader(sys.stdin) # automatically use the first line as header
csv_out = csv.DictWriter(sys.stdout, header)
# write header
csv_out.writerow(dict(zip(header,header)))
for row in csv_in:
json_res = run_faup(logger, row['url'])
if json_res:
row.update(json_res)
csv_out.writerow(row)
if __name__ == "__main__":
main()
``` |
{
"source": "2xyo/msticpy",
"score": 3
} |
#### File: msticpy/nbtools/data_viewer.py
```python
from collections import namedtuple
from typing import Dict, List, Union
import ipywidgets as widgets
import pandas as pd
from bokeh.io import push_notebook, show, output_notebook
from bokeh.models import (
BooleanFilter,
CDSView,
ColumnDataSource,
DataTable,
DateFormatter,
TableColumn,
)
from IPython.display import display
from . import nbwidgets
from .._version import VERSION
__version__ = VERSION
__author__ = "<NAME>"
FilterExpr = namedtuple("FilterExpr", "column, inv, operator, expr")
# pylint: disable=too-many-instance-attributes
class DataViewer:
"""Data viewer class."""
_DEF_HEIGHT = 550
def __init__(
self, data: pd.DataFrame, selected_cols: List[str] = None, debug=False
):
"""
Initialize the DataViewer class.
Parameters
----------
data : pd.DataFrame
The DataFrame to view
selected_cols : List[str], optional
Initial subset of columns to show, by default None (all cols)
debug : bool
Output additional debugging info to std out.
"""
if data.empty:
raise ValueError("No data available in 'data'")
output_notebook(hide_banner=True)
# Drop empty columns
data = data.dropna(axis="columns", how="all")
self.cds = ColumnDataSource(data)
self._columns = _get_cols_from_df(data)
self._dt_columns = list(self._columns.values())
self.data = data
self._debug = debug
self.nb_handle = None
self.data_table = DataTable(
source=self.cds,
columns=self._dt_columns,
view=CDSView(source=self.cds),
height=self._calc_df_height(data),
width_policy="max",
auto_edit=True,
editable=True,
reorderable=True,
)
self.column_chooser = DataTableColumnChooser(data, selected_cols=selected_cols)
self.data_filter = DataTableFilter(data)
if selected_cols is not None:
self._update_columns(btn=None)
self.column_chooser.apply_button.on_click(self._update_columns)
self.data_filter.apply_button.on_click(self._apply_filter)
self.accordion = widgets.Accordion(
children=[self.column_chooser.layout, self.data_filter.layout]
)
self.accordion.set_title(0, "Choose columns")
self.accordion.set_title(1, "Filter data")
self.accordion.selected_index = None
self.layout = self.accordion
@property
def filtered_data(self) -> pd.DataFrame:
"""Return filtered dataframe."""
return self.data_filter.filtered_dataframe[self.column_chooser.selected_columns]
@property
def filters(self) -> Dict[str, FilterExpr]:
"""Return current filters as a dict."""
return self.data_filter.filters
def import_filters(self, filters: Dict[str, FilterExpr]):
"""
Import filter set replacing current filters.
Parameters
----------
filters : Dict[str, FilterExpr]
dict of filter name, FilterExpr
FilterExpr is a tuple of:
column [str], inv [bool], operator [str], expr [str]
"""
self.data_filter.import_filters(filters)
self._apply_filter(btn=None)
def _calc_df_height(self, data):
df_height = 20 + (len(data) * 20)
return min(df_height, self._DEF_HEIGHT)
def show(self):
"""Display the data table control."""
if self._debug:
print("_update_data_table")
self.nb_handle = show(self.data_table, notebook_handle=True)
def _update_data_table(self):
if self._debug:
print("_update_data_table")
print(self.data_filter.filters)
print(len(self.filtered_data))
print(self.filtered_data.iloc[:2])
if self.nb_handle:
push_notebook(handle=self.nb_handle)
def display(self):
"""Display the widget."""
self.show()
display(self.layout)
def _ipython_display_(self):
"""Display in IPython."""
self.display()
def _update_columns(self, btn):
del btn
self.data_table.columns = self.column_chooser.datatable_columns
self._update_data_table()
def _apply_filter(self, btn):
del btn
if self._debug:
print("_apply_filter")
self.data_table.view = CDSView(
source=self.cds, filters=[BooleanFilter(self.data_filter.bool_filters)]
)
self.data_table.height = self._calc_df_height(
self.data_filter.filtered_dataframe
)
self._update_data_table()
class DataTableColumnChooser:
"""DataTableColumnChooser class."""
def __init__(self, data, selected_cols=None):
"""Initialize the DataTableColumnChooser class."""
self.data = data
self._all_col_names = list(data.columns)
self._initial_cols = selected_cols or self._all_col_names
self._col_select = nbwidgets.SelectSubset(
default_selected=self._initial_cols,
source_items=self._all_col_names,
auto_display=False,
)
self.apply_button = widgets.Button(description="Apply columns")
self.layout = widgets.VBox([self._col_select.layout, self.apply_button])
@property
def datatable_columns(self):
"""Return a list of Bokeh column definitions for the DataFrame."""
return list(_get_cols_from_df(self.dataframe_columns).values())
@property
def dataframe_columns(self):
"""Return the selected set of DataFrame columns."""
return self.data[self._reorder_cols(self.selected_columns)]
def _reorder_cols(self, columns):
"""Return column list in original order."""
# order the columns as originally specified (or as the DF)
col_init = [col for col in self._initial_cols if col in columns]
# If any new columns, add them to the end of the list
col_init.extend(list(set(columns) - set(col_init)))
return col_init
def display(self):
"""Display in IPython."""
display(self.layout)
def _ipython_display_(self):
"""Display in IPython."""
self.display()
@property
def selected_columns(self):
"""Return the selected columns."""
return self._reorder_cols(self._col_select.selected_items)
def _layout(width, height=None, desc_width=None, **kwargs):
"""Layout creation for widgets."""
wgt_dict = {}
lo_dict = {"width": width}
if height:
lo_dict["height"] = height
border = kwargs.pop("border", None)
if border:
lo_dict.update(
{
"border": "solid gray 1px",
"margin": "1pt",
"padding": "5pt",
}
)
wgt_dict["layout"] = widgets.Layout(**lo_dict)
style_dict = {}
if desc_width:
style_dict["description_width"] = desc_width
if kwargs:
style_dict.update(kwargs)
if style_dict:
wgt_dict["style"] = style_dict
return wgt_dict
class DataTableFilter:
"""Data filtering class."""
_OPERATORS = {
"string": ["==", "contains", "matches", "in", "between", "query"],
"other": ["==", ">", "<", ">=", "<=", "in", "between", "query"],
}
def __init__(self, data: pd.DataFrame):
"""Initialize the DataTableFilter class."""
self.all_cols = list(data.columns)
self.data = data
# Widgets
self._add_button = widgets.Button(description="Add filter")
self._del_button = widgets.Button(description="Delete filter")
self._upd_button = widgets.Button(description="Update filter")
self._clear_button = widgets.Button(description="Clear all filters")
self.apply_button = widgets.Button(description="Apply filter")
self._col_select = widgets.Dropdown(options=self.all_cols, **(_layout("200px")))
self._oper_sel = widgets.Dropdown(
options=self._col_operators(self.current_col), **(_layout("100px"))
)
self._not_cb = widgets.Checkbox(
description="not", value=False, **(_layout("60px", desc_width="initial"))
)
self._filter_value = widgets.Textarea(
description="Filter value", **(_layout("400px"))
)
self._curr_filters = widgets.Select(description="Filters", **(_layout("500px")))
self._oper_label = widgets.Label(" in ")
self.filters: Dict[str, FilterExpr] = {}
self._curr_filters.observe(self._select_filter, names="value")
self._col_select.observe(self._update_operators, names="value")
self._add_button.on_click(self._add_filter)
self._upd_button.on_click(self._update_filter)
self._del_button.on_click(self._del_filter)
self._clear_button.on_click(self._clear_filters)
filt_help_lbl = widgets.Label(
value="Enter multiple values separated by commas. Strings do not need quotes."
)
top_row = widgets.VBox(
[
filt_help_lbl,
widgets.HBox(
[
self._col_select,
self._not_cb,
self._oper_sel,
self._filter_value,
]
),
]
)
mid_row = widgets.HBox(
[
self._add_button,
self._upd_button,
]
)
curr_filt_lbl = widgets.Label(value="Current filters")
bottom_row = widgets.VBox(
[
curr_filt_lbl,
widgets.HBox(
[
self._curr_filters,
widgets.VBox([self._del_button, self._clear_button]),
]
),
],
**_layout(width="80%", border=True),
)
self.layout = widgets.VBox([top_row, mid_row, bottom_row, self.apply_button])
def display(self):
"""Display in IPython."""
display(self.layout)
def _ipython_display_(self):
"""Display in IPython."""
self.display()
def import_filters(self, filters: Dict[str, FilterExpr]):
"""
Replace the current filters with `filters`.
Parameters
----------
filters : Dict[str, FilterExpr]
dict of filter name, FilterExpr
FilterExpr is a tuple of:
column [str], inv [bool], operator [str], expr [str]
"""
self.filters = {
f_name: FilterExpr(*f_expr) for f_name, f_expr in filters.items()
}
self._curr_filters.options = list(filters.keys())
@property
def bool_filters(self):
"""Return current set of boolean filters."""
df_filt = None
for filt in self.filters.values():
new_filt = self._make_filter(
filt.column, filt.operator, filt.expr, filt.inv
)
new_filt = new_filt.values if isinstance(new_filt, pd.Series) else new_filt
df_filt = new_filt if df_filt is None else df_filt & new_filt
return df_filt if df_filt is not None else self.data.index.isin(self.data.index)
@property
def filtered_dataframe(self) -> pd.DataFrame:
"""Return current filtered DataFrame."""
return self.data[self.bool_filters]
def _select_filter(self, change):
filter_name = change["new"]
if not filter_name:
return
(
self._col_select.value,
self._not_cb.value,
self._oper_sel.value,
self._filter_value.value,
) = self.filters[filter_name]
def _update_operators(self, change):
del change
self._oper_sel.options = self._col_operators(self._col_select.value)
def _add_filter(self, btn):
del btn
if self._curr_filter_name in self.filters:
return
self.filters[self._curr_filter_name] = FilterExpr(
column=self._col_select.value,
inv=self._not_cb.value,
operator=self._oper_sel.value,
expr=self._filter_value.value,
)
curr_opts = list(self._curr_filters.options)
curr_opts.append(self._curr_filter_name)
self._curr_filters.options = curr_opts
def _update_filter(self, btn):
selected_filter = self._curr_filters.value
self._add_filter(btn)
if selected_filter in self.filters:
del self.filters[selected_filter]
self._curr_filters.options = list(self.filters.keys())
def _del_filter(self, btn):
del btn
selected_filter = self._curr_filters.value
if selected_filter in self.filters:
del self.filters[selected_filter]
self._curr_filters.options = list(self.filters.keys())
@property
def _curr_filter_name(self):
not_str = " not " if self._not_cb.value else ""
return (
f"{self._col_select.value} {not_str}{self._oper_sel.value}"
f" '{self._filter_value.value}'"
)
def _clear_filters(self, btn):
del btn
self.filters.clear()
self._curr_filters.options = []
@property
def current_col(self):
"""Return the currently selected column."""
return self._col_select.value
def _col_operators(self, col):
if pd.api.types.is_string_dtype(self.data[col]):
return self._OPERATORS["string"]
return self._OPERATORS["other"]
def _make_filter(self, col, operator, expr, not_true):
if not_true:
return ~self._create_filter(col, operator, expr)
return self._create_filter(col, operator, expr)
# pylint: disable=too-many-return-statements
def _create_filter(self, col: str, operator: str, expr: str) -> pd.Series:
if operator == "query":
return pd.Series(self.data.index.isin(self.data.query(expr).index))
if operator in ("in", "between"):
return self._filter_in_or_between(col, operator, expr)
test_expr = self._conv_expr_type(col, expr)
if operator == "==":
return self.data[col] == test_expr
if operator == "contains":
return self.data[col].str.contains(test_expr)
if operator == "matches":
return self.data[col].str.match(test_expr)
if operator == ">":
return self.data[col] > test_expr
if operator == ">=":
return self.data[col] >= test_expr
if operator == "<":
return self.data[col] < test_expr
if operator == "<=":
return self.data[col] >= test_expr
raise TypeError(
f"Unsupported operator for operator {operator} and column {col}"
)
def _filter_in_or_between(self, col: str, operator: str, expr: str) -> pd.Series:
"""Return filter for `in` and `between` operators."""
test_expr: List[Union[str, int, float]]
if pd.api.types.is_string_dtype(self.data[col]):
test_expr = [item.strip("\"' ") for item in expr.split(",")]
elif pd.api.types.is_numeric_dtype(self.data[col]):
test_expr = [
int(item) if "." not in item else float(item)
for item in expr.split(",")
]
elif pd.api.types.is_datetime64_any_dtype(self.data[col]):
test_expr = [pd.Timestamp(item.strip()) for item in expr.split(",")]
else:
raise TypeError(
f"Unsupported column type {self.data[col].dtype}",
f"for operator {operator} and column {col}",
)
if operator == "in":
return self.data[col].isin(test_expr)
if len(test_expr) != 2:
raise ValueError(
f"Must have two operands for expression {expr}",
f"for operator {operator} and column {col}",
)
return self.data[col].between(test_expr[0], test_expr[1], inclusive=True)
def _conv_expr_type(self, col: str, expr: str):
"""Convert string expression to required type."""
test_expr: Union[str, int, float]
if pd.api.types.is_numeric_dtype(self.data[col]):
test_expr = int(expr) if "." not in expr else float(expr)
elif pd.api.types.is_datetime64_any_dtype(self.data[col]):
test_expr = pd.Timestamp(expr.strip())
elif pd.api.types.is_string_dtype(self.data[col]):
test_expr = expr.strip("\"' ")
else:
raise TypeError(
f"Unsupported column type {self.data[col].dtype}",
f"for column {col}",
)
return test_expr
def _get_col_width(data, col):
if data[col].iloc[:10].dropna().empty:
return 8
if data[col].dtype == "O":
return int(data[col].iloc[:10].str.len().mean())
if pd.api.types.is_datetime64_any_dtype(data[col]):
return 50
return 8
def _get_cols_from_df(data):
"""Get list of TableColumn columns from DataFrame."""
# save the existing column order
col_order = data.columns
dt_cols = data.select_dtypes("datetime").columns
columns = {
col: TableColumn(field=col, title=col, width=_get_col_width(data, col))
for col in data.columns
if col not in dt_cols
}
date_fmt = "%F %T"
dt_columns = {
col: TableColumn(
field=col,
title=col,
formatter=DateFormatter(format=date_fmt),
width=_get_col_width(data, col),
)
for col in dt_cols
}
columns.update(dt_columns)
return {col: columns[col] for col in col_order}
```
#### File: msticpy/nbtools/nbdisplay.py
```python
from typing import Any, Mapping, Union, Tuple, List
import networkx as nx
import pandas as pd
from bokeh.io import output_notebook
from bokeh.plotting import figure, from_networkx, show
from bokeh.models import Circle, HoverTool, Label
from deprecated.sphinx import deprecated
import IPython
from IPython.core.display import HTML, display
from IPython.display import Javascript
from .._version import VERSION
from .security_alert import SecurityAlert
# pylint: disable=unused-import
from .timeline import display_timeline, display_timeline_values # noqa
from .process_tree import build_and_show_process_tree, plot_process_tree # noqa
# pylint: enable=unused-import
from ..common.utility import export
__version__ = VERSION
__author__ = "<NAME>"
@export
def display_alert(
alert: Union[Mapping[str, Any], SecurityAlert], show_entities: bool = False
):
"""
Display a Security Alert.
Parameters
----------
alert : Union[Mapping[str, Any], SecurityAlert]
The alert to display as Mapping (e.g. pd.Series)
or SecurityAlert
show_entities : bool, optional
Whether to display entities (the default is False)
"""
output = format_alert(alert, show_entities)
if not isinstance(output, tuple):
output = [output]
for disp_obj in output:
display(disp_obj)
@export
def format_alert(
alert: Union[Mapping[str, Any], SecurityAlert], show_entities: bool = False
) -> Union[IPython.display.HTML, Tuple[IPython.display.HTML, pd.DataFrame]]:
"""
Get IPython displayable Security Alert.
Parameters
----------
alert : Union[Mapping[str, Any], SecurityAlert]
The alert to display as Mapping (e.g. pd.Series)
or SecurityAlert
show_entities : bool, optional
Whether to display entities (the default is False)
Returns
-------
Union[IPython.display.HTML, Tuple[IPython.display.HTML, pd.DataFrame]]
Single or tuple of displayable IPython objects
Raises
------
ValueError
If the alert object is in an unknown format
"""
if isinstance(alert, SecurityAlert):
return HTML(alert.to_html(show_entities=show_entities))
# Display subset of raw properties
if isinstance(alert, pd.Series):
entity = alert["CompromisedEntity"] if "CompromisedEntity" in alert else ""
title = f"""
<h3>Selected Alert: '{alert["AlertDisplayName"]}'</h3>
<b>Alert_time:</b> {alert["StartTimeUtc"]},
<b>Compr_entity:</b> {entity},
<b>Alert_id:</b> {alert["SystemAlertId"]}
<br/>
"""
return HTML(title), pd.DataFrame(alert)
raise ValueError("Unrecognized alert object type " + str(type(alert)))
@export
def display_process_tree(process_tree: pd.DataFrame):
"""
Display process tree data frame. (Deprecated).
Parameters
----------
process_tree : pd.DataFrame
Process tree DataFrame
The display module expects the columns NodeRole and Level to
be populated. NoteRole is one of: 'source', 'parent', 'child'
or 'sibling'. Level indicates the 'hop' distance from the 'source'
node.
"""
build_and_show_process_tree(process_tree)
@export
def exec_remaining_cells():
"""Execute all cells below currently selected cell."""
Javascript("Jupyter.notebook.execute_cells_below()")
@deprecated(
reason=(
"Matplotlib version 'draw_alert_entity_graph' "
"no longer supported - use 'plot_entity_graph'"
),
version="0.3.2",
)
@export
# pylint: disable=too-many-arguments
def draw_alert_entity_graph(
nx_graph: nx.Graph,
font_size: int = 12,
height: int = 8,
width: int = 8,
margin: float = 0.3,
scale: int = 1,
):
"""
Draw networkX graph with matplotlib.
Parameters
----------
nx_graph : nx.Graph
The NetworkX graph to draw
font_size : int, optional
base font size (the default is 12)
height : int, optional
Image height (the default is 8)
width : int, optional
Image width (the default is 8)
margin : float, optional
Image margin (the default is 0.3)
scale : int, optional
Position scale (the default is 1)
"""
del margin
return plot_entity_graph(
entity_graph=nx_graph,
font_size=font_size,
height=height * 100,
width=width * 100,
scale=scale * 2,
)
def plot_entity_graph(
entity_graph: nx.Graph,
node_size: int = 25,
font_size: Union[int, str] = 10,
height: int = 800,
width: int = 800,
scale: int = 2,
hide: bool = False,
) -> figure:
"""
Plot entity graph with Bokeh.
Parameters
----------
entity_graph : nx.Graph
The entity graph as a networkX graph
node_size : int, optional
Size of the nodes in pixels, by default 25
font_size : int, optional
Font size for node labels, by default 10
Can be an integer (point size) or a string (e.g. "10pt")
width : int, optional
Width in pixels, by default 800
height : int, optional
Image height (the default is 800)
scale : int, optional
Position scale (the default is 2)
hide : bool, optional
Don't show the plot, by default False. If True, just
return the figure.
Returns
-------
bokeh.plotting.figure
The network plot.
"""
output_notebook()
font_pnt = f"{font_size}pt" if isinstance(font_size, int) else font_size
node_attrs = {
node: attrs.get("color", "green")
for node, attrs in entity_graph.nodes(data=True)
}
nx.set_node_attributes(entity_graph, node_attrs, "node_color")
plot = figure(
title="Alert Entity graph",
x_range=(-3, 3),
y_range=(-3, 3),
width=width,
height=height,
)
plot.add_tools(
HoverTool(
tooltips=[
("node_type", "@node_type"),
("name", "@name"),
("description", "@description"),
]
)
)
graph_renderer = from_networkx(
entity_graph, nx.spring_layout, scale=scale, center=(0, 0)
)
graph_renderer.node_renderer.glyph = Circle(
size=node_size, fill_color="node_color", fill_alpha=0.5
)
# pylint: disable=no-member
plot.renderers.append(graph_renderer)
# Create labels
for name, pos in graph_renderer.layout_provider.graph_layout.items():
label = Label(
x=pos[0],
y=pos[1],
x_offset=5,
y_offset=5,
text=name,
text_font_size=font_pnt,
)
plot.add_layout(label)
# pylint: enable=no-member
if not hide:
show(plot)
return plot
# Constants for Windows logon
_WIN_LOGON_TYPE_MAP = {
0: "Unknown",
2: "Interactive",
3: "Network",
4: "Batch",
5: "Service",
7: "Unlock",
8: "NetworkCleartext",
9: "NewCredentials",
10: "RemoteInteractive",
11: "CachedInteractive",
}
_WINDOWS_SID = {
"S-1-0-0": "Null SID",
"S-1-5-18": "LOCAL_SYSTEM",
"S-1-5-19": "LOCAL_SERVICE",
"S-1-5-20": "NETWORK_SERVICE",
}
_ADMINISTRATOR_SID = "500"
_GUEST_SID = "501"
_DOM_OR_MACHINE_SID = "S-1-5-21"
@export
def display_logon_data(
logon_event: pd.DataFrame, alert: SecurityAlert = None, os_family: str = None
):
"""
Display logon data for one or more events as HTML table.
Parameters
----------
logon_event : pd.DataFrame
Dataframe containing one or more logon events
alert : SecurityAlert, optional
obtain os_family from the security alert
(the default is None)
os_family : str, optional
explicitly specify os_family (Linux or Windows)
(the default is None)
Notes
-----
Currently only Windows Logon events.
"""
display(format_logon(logon_event, alert, os_family))
@export
def format_logon(
logon_event: Union[pd.DataFrame, pd.Series],
alert: SecurityAlert = None,
os_family: str = None,
) -> IPython.display.HTML:
"""
Return logon data for one or more events as HTML table.
Parameters
----------
logon_event : Union[pd.DataFrame, pd.Series]
Dataframe containing one or more logon events
or Series containing a single logon event.
alert : SecurityAlert, optional
obtain os_family from the security alert
(the default is None)
os_family : str, optional
explicitly specify os_family (Linux or Windows)
(the default is None)
Returns
-------
IPython.display.HTML :
HTML display object
"""
if not os_family:
os_family = alert.os_family if alert else "Windows"
logon_output = []
if isinstance(logon_event, pd.DataFrame):
for _, logon_row in logon_event.iterrows():
logon_record = _fmt_single_row(logon_row, os_family)
logon_output.append(
"<tr class='cell_logon'><td class='cell_logon'>"
+ f"{'<br>'.join(logon_record)}</td></tr>"
)
elif isinstance(logon_event, pd.Series):
logon_record = _fmt_single_row(logon_event, os_family)
logon_output.append(
"<tr class='cell_logon'><td class='cell_logon'>"
+ f"{'<br>'.join(logon_record)}</td></tr>"
)
t_style = """
<style>
.table_logon {border-collapse: collapse; width: 50%;}
.cell_logon {border: 1px solid #ddd !important;
text-align: left !important; padding: 15px !important;}
</style>
"""
return HTML(f"{t_style}<table class='table_logon'>{''.join(logon_output)}</table>")
def _fmt_single_row(logon_row: pd.Series, os_family: str) -> List[str]:
"""Format a pandas series logon record."""
logon_record = []
logon_record.append(f"<b>Account: </b>{logon_row['TargetUserName']}")
logon_record.append(f"<b>Account Domain: </b>{logon_row['TargetDomainName']}")
logon_record.append(f"<b>Logon Time: </b>{logon_row['TimeGenerated']}")
if os_family == "Windows":
logon_type = logon_row["LogonType"]
logon_desc_idx = logon_type
if logon_type not in _WIN_LOGON_TYPE_MAP:
logon_desc_idx = 0
logon_record.append(
f"<b>Logon type: </b>{logon_type}"
+ f"({_WIN_LOGON_TYPE_MAP[logon_desc_idx]})"
)
account_id = logon_row.TargetUserSid
logon_record.append(f"<b>User Id/SID: </b>{account_id}")
if os_family == "Windows":
logon_record.extend(_format_sid_info(account_id))
else:
logon_record.append(f"<b>Audit user: </b>{logon_row['audit_user']}")
session_id = logon_row["TargetLogonId"]
sess_id = f"<b>Session id: </b>'{session_id}'"
if session_id in ["0x3e7", "-1"]:
sess_id += "System logon session"
logon_record.append("")
domain = logon_row["SubjectDomainName"]
if not domain:
subj_account = logon_row.SubjectUserName
else:
subj_account = f"{domain}/{logon_row.SubjectUserName}"
logon_record.append(f"<b>Subject (source) account: </b>{subj_account}")
logon_record.append(f"<b>Logon process: </b>{logon_row['LogonProcessName']}")
logon_record.append(
f"<b>Authentication: </b>{logon_row['AuthenticationPackageName']}"
)
logon_record.append(f"<b>Source IpAddress: </b>{logon_row['IpAddress']}")
logon_record.append(f"<b>Source Host: </b>{logon_row['WorkstationName']}")
logon_record.append(f"<b>Logon status: </b>{logon_row['Status']}")
logon_record.append("")
return logon_record
def _format_sid_info(sid):
sid_info = []
if not sid:
return sid_info
if sid in _WINDOWS_SID:
sid_info.append(f" SID {sid} is {_WINDOWS_SID[sid]}")
elif sid.endswith(_ADMINISTRATOR_SID):
sid_info.append(f" SID {sid} is administrator")
elif sid.endswith(_GUEST_SID):
sid_info.append(f" SID {sid} is guest")
if sid.startswith(_DOM_OR_MACHINE_SID):
sid_info.append(f" SID {sid} is local machine or domain account")
return sid_info
```
#### File: tests/common/test_azure_auth_core.py
```python
import pytest
import pytest_check as check
from msrestazure import azure_cloud
from msticpy.common.azure_auth_core import AzureCloudConfig, default_auth_methods
from ..unit_test_lib import custom_mp_config, get_test_data_path
__author__ = "<NAME>"
# pylint: disable=redefined-outer-name
@pytest.fixture(scope="module")
def mp_config_file():
"""Fixture_docstring."""
return get_test_data_path().joinpath("msticpyconfig.yaml")
def test_default_auth_methods(mp_config_file):
"""Test default auth methods function."""
with custom_mp_config(mp_config_file):
check.is_in("env", default_auth_methods())
check.is_in("msi", default_auth_methods())
check.is_in("cli", default_auth_methods())
check.is_in("interactive", default_auth_methods())
def test_azure_cloud_config(mp_config_file):
"""Test the Azure cloud config."""
with custom_mp_config(mp_config_file):
az_config = AzureCloudConfig()
check.equal(az_config.cloud, "global")
check.is_in("env", az_config.auth_methods)
check.is_in("msi", az_config.auth_methods)
check.is_in("cli", az_config.auth_methods)
check.is_in("interactive", az_config.auth_methods)
glob_rm_uri = azure_cloud.AZURE_PUBLIC_CLOUD.endpoints.resource_manager
check.equal(f"{glob_rm_uri}.default", az_config.token_uri)
``` |
{
"source": "2xyo/stix-shifter",
"score": 2
} |
#### File: stix_shifter_modules/cloudsql/entry_point.py
```python
from stix_shifter_utils.utils.base_entry_point import BaseEntryPoint
import json
class EntryPoint(BaseEntryPoint):
def __init__(self, connection={}, configuration={}, options={}):
super().__init__(options)
if connection:
self.setup_transmission_simple(connection, configuration)
``` |
{
"source": "2xyo/tram-1",
"score": 2
} |
#### File: management/commands/attackdata.py
```python
import json
from django.conf import settings
from django.core.management.base import BaseCommand
from tram.models import AttackTechnique
LOAD = 'load'
CLEAR = 'clear'
class Command(BaseCommand):
help = 'Machine learning pipeline commands'
def add_arguments(self, parser):
sp = parser.add_subparsers(title='subcommands',
dest='subcommand',
required=True)
sp_load = sp.add_parser(LOAD, help='Load ATT&CK Data into the Database') # noqa: F841
sp_clear = sp.add_parser(CLEAR, help='Clear ATT&CK Data from the Database') # noqa: F841
def clear_attack_data(self):
AttackTechnique.objects.all().delete()
def load_attack_data(self, filepath):
num_revoked = 0
num_saved = 0
with open(filepath, 'r') as f:
attack_json = json.load(f)
assert attack_json['spec_version'] == '2.0'
assert attack_json['type'] == 'bundle'
for obj in attack_json['objects']:
if obj.get('revoked', False): # Skip revoked objects
num_revoked += 1
continue
if obj['type'] != 'attack-pattern': # Skip non-attack patterns
continue
t = AttackTechnique()
t.name = obj['name']
t.stix_id = obj['id']
for external_reference in obj['external_references']:
if external_reference['source_name'] not in ('mitre-attack', 'mitre-pre-attack', 'mitre-mobile-attack'):
continue
t.attack_id = external_reference['external_id']
t.attack_url = external_reference['url']
t.matrix = external_reference['source_name']
assert t.attack_id is not None
assert t.attack_url is not None
assert t.matrix is not None
t.save()
num_saved += 1
def handle(self, *args, **options):
subcommand = options['subcommand']
if subcommand == LOAD:
self.load_attack_data(settings.DATA_DIRECTORY / 'attack/enterprise-attack.json')
self.load_attack_data(settings.DATA_DIRECTORY / 'attack/mobile-attack.json')
self.load_attack_data(settings.DATA_DIRECTORY / 'attack/pre-attack.json')
elif subcommand == CLEAR:
self.clear_attack_data()
``` |
{
"source": "2xyo/vt-py",
"score": 3
} |
#### File: vt-py/examples/url_feed.py
```python
import argparse
import json
import os
import vt
def main():
parser = argparse.ArgumentParser(
description='Get URLs from the VirusTotal feed. '
'For each file in the feed a <url_id>.json file is created in the output '
'directory containing information about the file.')
parser.add_argument('--apikey',
required=True, help='your VirusTotal API key')
parser.add_argument('--output',
default='./url-feed', help='path to output directory')
parser.add_argument('--cursor',
required=False,
help='cursor indicating where to start')
args = parser.parse_args()
if not os.path.exists(args.output):
os.makedirs(args.output)
with vt.Client(args.apikey) as client:
# Iterate over the file feed, one file at a time. This loop doesn't
# finish, when the feed is consumed it will keep waiting for more files.
for url_obj in client.feed(vt.FeedType.URLS, cursor=args.cursor):
# Write the file's metadata into a JSON-encoded file.
url_path = os.path.join(args.output, url_obj.id)
with open(url_path + '.json', mode='w') as f:
f.write(json.dumps(url_obj.to_dict()))
print(url_obj.id)
if __name__ == '__main__':
main()
``` |
{
"source": "2yangk23/Plex-Scanner-Logger",
"score": 2
} |
#### File: 2yangk23/Plex-Scanner-Logger/Logger.py
```python
import os
import sys
import inspect
import traceback
import logging
from logging.handlers import RotatingFileHandler
# Set Plex root directory 2 levels above Scanners/SCANNER_TYPE
PLEX_ROOT = os.path.abspath(os.path.join(os.path.dirname(
inspect.getfile(inspect.currentframe())), "..", ".."))
PLEX_LOGS = os.path.join(PLEX_ROOT, 'Logs')
# Logger class imitates LogKit from Plex Plug-in Framework to be used with scanners
class Logger():
def __init__(self, filename, logLevel=logging.DEBUG):
handler = RotatingFileHandler(
os.path.join(PLEX_LOGS, filename + '.log'),
mode='w',
maxBytes=1024 * 1024, # 1MB
backupCount=5
)
formatter = logging.Formatter(
'%(asctime)s %(levelname)s - %(message)s')
handler.setFormatter(formatter)
self.logger = logging.getLogger(filename)
self.logger.addHandler(handler)
self.logger.setLevel(logLevel)
def Debug(self, msg, *args, **kwargs):
self.logger.debug(msg, *args, **kwargs)
def Info(self, msg, *args, **kwargs):
self.logger.info(msg, *args, **kwargs)
def Warn(self, msg, *args, **kwargs):
self.logger.warn(msg, *args, **kwargs)
def Error(self, msg, *args, **kwargs):
self.logger.error(msg, *args, **kwargs)
def Critical(self, msg, *args, **kwargs):
self.logger.critical(msg, *args, **kwargs)
def Exception(self, msg, *args, **kwargs):
self.logger.exception(msg, *args, **kwargs)
def Stack(self):
stack = ''
lines = traceback.format_stack()[3:-3]
for line in lines:
if sys.prefix not in line:
stack += ' %s\n' % line.strip()
self.Debug("Current stack:\n" + stack)
def __call__(self, msg, *args, **kwargs):
self.Info(msg, *args, **kwargs)
``` |
{
"source": "2yar2/Combin",
"score": 2
} |
#### File: 2yar2/Combin/test_combin_main_page.py
```python
from pages.product_page import ProductPage
from selenium import webdriver
import conftest
class TestUserWhatIsIncluded:
def test_open_page(self, browser):
product_page = ProductPage(browser, 'https://www.combin.com/product/instagram-growth/')
product_page.open()
product_page.cookies_close()
product_page.click_to_growth()
product_page.click_to_advanced()
product_page.click_to_gender()
product_page.click_to_machine()
product_page.click_to_audience()
product_page.click_to_repetitive()
product_page.click_to_multiple()
``` |
{
"source": "2ykwang/django-import-export",
"score": 3
} |
#### File: core/tests/test_mixins.py
```python
from unittest import mock
from unittest.mock import MagicMock
from core.models import Book, Category
from django.http import HttpRequest
from django.test.testcases import TestCase
from django.urls import reverse
from import_export import admin, formats, forms, mixins
class ExportViewMixinTest(TestCase):
class TestExportForm(forms.ExportForm):
cleaned_data = dict()
def setUp(self):
self.url = reverse('export-category')
self.cat1 = Category.objects.create(name='Cat 1')
self.cat2 = Category.objects.create(name='Cat 2')
self.form = ExportViewMixinTest.TestExportForm(formats.base_formats.DEFAULT_FORMATS)
self.form.cleaned_data["file_format"] = "0"
def test_get(self):
response = self.client.get(self.url)
self.assertContains(response, self.cat1.name, status_code=200)
self.assertEqual(response['Content-Type'], 'text/html; charset=utf-8')
def test_post(self):
data = {
'file_format': '0',
}
response = self.client.post(self.url, data)
self.assertContains(response, self.cat1.name, status_code=200)
self.assertTrue(response.has_header("Content-Disposition"))
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_response_raises_TypeError_when_content_type_kwarg_used(self):
"""
Test that HttpResponse is instantiated using the correct kwarg.
"""
content_type = "text/csv"
class TestMixin(mixins.ExportViewFormMixin):
def __init__(self):
self.model = MagicMock()
self.request = MagicMock(spec=HttpRequest)
self.model.__name__ = "mockModel"
def get_queryset(self):
return MagicMock()
m = TestMixin()
with mock.patch("import_export.mixins.HttpResponse") as mock_http_response:
# on first instantiation, raise TypeError, on second, return mock
mock_http_response.side_effect = [TypeError(), mock_http_response]
m.form_valid(self.form)
self.assertEqual(content_type, mock_http_response.call_args_list[0][1]["content_type"])
self.assertEqual(content_type, mock_http_response.call_args_list[1][1]["mimetype"])
def test_implements_get_filterset(self):
"""
test that if the class-under-test defines a get_filterset()
method, then this is called as required.
"""
class TestMixin(mixins.ExportViewFormMixin):
mock_get_filterset_call_count = 0
mock_get_filterset_class_call_count = 0
def __init__(self):
self.model = MagicMock()
self.request = MagicMock(spec=HttpRequest)
self.model.__name__ = "mockModel"
def get_filterset(self, filterset_class):
self.mock_get_filterset_call_count += 1
return MagicMock()
def get_filterset_class(self):
self.mock_get_filterset_class_call_count += 1
return MagicMock()
m = TestMixin()
res = m.form_valid(self.form)
self.assertEqual(200, res.status_code)
self.assertEqual(1, m.mock_get_filterset_call_count)
self.assertEqual(1, m.mock_get_filterset_class_call_count)
class BaseImportMixinTest(TestCase):
def test_get_import_formats(self):
class Format(object):
def __init__(self, id, can_import):
self.id = id
self.val = can_import
def can_import(self):
return self.val
class CanImportFormat(Format):
def __init__(self):
super().__init__(1, True)
class CannotImportFormat(Format):
def __init__(self):
super().__init__(2, False)
m = mixins.BaseImportMixin()
m.formats = [CanImportFormat, CannotImportFormat]
formats = m.get_import_formats()
self.assertEqual(1, len(formats))
self.assertEqual('CanImportFormat', formats[0].__name__)
class MixinModelAdminTest(TestCase):
"""
Tests for regression where methods in ModelAdmin with BaseImportMixin / BaseExportMixin
do not get called.
see #1315.
"""
request = MagicMock(spec=HttpRequest)
class BaseImportModelAdminTest(mixins.BaseImportMixin):
call_count = 0
def get_resource_class(self):
self.call_count += 1
def get_resource_kwargs(self, request, *args, **kwargs):
self.call_count += 1
class BaseExportModelAdminTest(mixins.BaseExportMixin):
call_count = 0
def get_resource_class(self):
self.call_count += 1
def get_resource_kwargs(self, request, *args, **kwargs):
self.call_count += 1
def test_get_import_resource_class_calls_self_get_resource_class(self):
admin = self.BaseImportModelAdminTest()
admin.get_import_resource_class()
self.assertEqual(1, admin.call_count)
def test_get_import_resource_kwargs_calls_self_get_resource_kwargs(self):
admin = self.BaseImportModelAdminTest()
admin.get_import_resource_kwargs(self.request)
self.assertEqual(1, admin.call_count)
def test_get_export_resource_class_calls_self_get_resource_class(self):
admin = self.BaseExportModelAdminTest()
admin.get_export_resource_class()
self.assertEqual(1, admin.call_count)
def test_get_export_resource_kwargs_calls_self_get_resource_kwargs(self):
admin = self.BaseExportModelAdminTest()
admin.get_export_resource_kwargs(self.request)
self.assertEqual(1, admin.call_count)
class BaseExportMixinTest(TestCase):
class TestBaseExportMixin(mixins.BaseExportMixin):
def get_export_resource_kwargs(self, request, *args, **kwargs):
self.args = args
self.kwargs = kwargs
return super().get_resource_kwargs(request, *args, **kwargs)
def test_get_data_for_export_sets_args_and_kwargs(self):
"""
issue 1268
Ensure that get_export_resource_kwargs() handles the args and kwargs arguments.
"""
request = MagicMock(spec=HttpRequest)
m = self.TestBaseExportMixin()
m.model = Book
target_args = (1,)
target_kwargs = {"a": 1}
m.get_data_for_export(request, Book.objects.none(), *target_args, **target_kwargs)
self.assertEqual(m.args, target_args)
self.assertEqual(m.kwargs, target_kwargs)
def test_get_export_formats(self):
class Format(object):
def __init__(self, can_export):
self.val = can_export
def can_export(self):
return self.val
class CanExportFormat(Format):
def __init__(self):
super().__init__(True)
class CannotExportFormat(Format):
def __init__(self):
super().__init__(False)
m = mixins.BaseExportMixin()
m.formats = [CanExportFormat, CannotExportFormat]
formats = m.get_export_formats()
self.assertEqual(1, len(formats))
self.assertEqual('CanExportFormat', formats[0].__name__)
class ExportMixinTest(TestCase):
class TestExportMixin(admin.ExportMixin):
def __init__(self, export_form) -> None:
super().__init__()
self.export_form = export_form
def get_export_form(self):
return self.export_form
class TestExportForm(forms.ExportForm):
pass
def test_get_export_form(self):
m = admin.ExportMixin()
self.assertEqual(forms.ExportForm, m.get_export_form())
def test_get_export_form_with_custom_form(self):
m = self.TestExportMixin(self.TestExportForm)
self.assertEqual(self.TestExportForm, m.get_export_form())
``` |
{
"source": "2yz/MMdnn",
"score": 2
} |
#### File: conversion/caffe/common_graph.py
```python
from six import string_types as _string_types
from mmdnn.conversion.caffe.errors import ConversionError
from mmdnn.conversion.common.IR.graph_pb2 import GraphDef, NodeDef, TensorShape
from mmdnn.conversion.caffe.utils import get_real_name
def assign_attr_value(attr, val):
'''Assign value to AttrValue proto according to data type.'''
if isinstance(val, bool):
attr.b = val
elif isinstance(val, int):
attr.i = val
elif isinstance(val, float):
attr.f = val
elif isinstance(val, str):
attr.s = val.encode('utf-8')
elif isinstance(val, TensorShape):
attr.shape.MergeFromString(val.SerializeToString())
elif isinstance(val, list):
if len(val) == 0: return
if isinstance(val[0], int):
attr.list.i.extend(val)
elif isinstance(val[0], TensorShape):
attr.list.shape.extend(val)
else:
raise NotImplementedError('AttrValue cannot be of %s %s' % (type(val), type(val[0])))
else:
raise NotImplementedError('AttrValue cannot be of %s' % type(val))
def fetch_attr_value(attr):
'''Fetch valid value from AttrValue proto.'''
field = attr.WhichOneof('value')
val = getattr(attr, field) if field else None
return val.decode('utf-8') if isinstance(val, bytes) else val
class Node(object):
'''An intermediate representation for DL operations.'''
def __init__(self, node_pb2):
assert isinstance(node_pb2, NodeDef)
self.node_pb2 = node_pb2
self.output = []
@staticmethod
def create(op, **kwargs):
node_pb2 = NodeDef()
node_pb2.op = op
for k, v in kwargs.items():
assign_attr_value(node_pb2.attr[k], v)
return Node(node_pb2)
@property
def op(self):
return self.node_pb2.op
@property
def name(self):
return self.node_pb2.name
@name.setter
def name(self, value):
assert isinstance(value, _string_types)
self.node_pb2.name = value
@property
def input(self):
return self.node_pb2.input
@property
def attr(self):
return self.node_pb2.attr.items()
class Graph(object):
'''An intermediate representation for DL graph.'''
def __init__(self, name, node_list, version=0):
if node_list and len(node_list):
assert isinstance(node_list[0], Node)
self.node_dict = {node.name: node for node in node_list}
else:
self.node_dict = {}
self.name = name
self.version = version
def topologically_sorted(self):
visited = set()
sorted_nodes = []
def topo_sort_dfs(node, visited, sorted_nodes):
if node in visited:
return
visited.add(node)
for n in self.get_input(node):
topo_sort_dfs(n, visited, sorted_nodes)
sorted_nodes.append(node)
for node in self.node_dict.values():
topo_sort_dfs(node, visited, sorted_nodes)
return sorted_nodes
def get_node(self, name):
return self.node_dict[name]
def add_node(self, node):
assert node.name not in self.node_dict
self.node_dict[node.name] = node
def remove_node(self, name):
return self.node_dict.pop(name)
def get_input(self, node):
input_nodes = []
for name in node.input:
name = get_real_name(name)
if name in self.node_dict:
input_nodes.append(self.get_node(name))
return input_nodes
def as_graph_def(self):
graph_pb2 = GraphDef()
graph_pb2.version = self.version
graph_pb2.node.extend([node.node_pb2 for node in self.node_dict.values()])
return graph_pb2
```
#### File: conversion/caffe/network.py
```python
import numpy as np
DEFAULT_PADDING = 'SAME'
def layer(op):
'''Decorator for composable network layers.'''
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
assert len(args) >= 1
if len(args) == 1:
layer_inputs = args[0]
else:
layer_inputs = list(args)
layer_output = op(self, layer_inputs, **kwargs)
# print('op: %s shape: %s' % (op, layer_output._keras_shape))
# print('op: %s shape: %s' % (op, layer_output.get_shape().as_list()))
# Add to layer LUT.
self.layers[name] = layer_output
self.output = layer_output
return layer_output
return layer_decorated
class Network(object):
def __init__(self, trainable=False):
self.output = None
self.layers = {}
self.trainable = trainable
self.setup()
def setup(self):
raise NotImplementedError('Must be implemented by the subclass')
def load(self, data_path, session, ignore_missing=False):
raise NotImplementedError('Must be implemented by the subclass')
def input(self, shape, name):
raise NotImplementedError('Must be implemented by the subclass')
def get_output(self):
raise NotImplementedError('Must be implemented by the subclass')
def get_unique_name(self, prefix):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def conv(self, input, k_h, k_w, c_o, s_h, s_w, p_h, p_w, name, group=1, biased=True):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def deconv(self, input, c_o, k_h, k_w, s_h, s_w, p_h, p_w, name):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def relu(self, input, name):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def sigmoid(self, input, name):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def max_pool(self, input, k_h, k_w, s_h, s_w, p_h, p_w, name):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def avg_pool(self, input, k_h, k_w, s_h, s_w, p_h, p_w, name):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def lrn(self, input, local_size, alpha, beta, name, bias=1):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def concat(self, inputs, axis, name):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def add(self, inputs, name):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def fc(self, input, num_out, name):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def softmax(self, input, name):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def batch_normalization(self, input, name, epsilon=0.00001, scale_offset=True):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def dropout(self, input, keep_prob, name):
raise NotImplementedError('Must be implemented by the subclass')
@layer
def crop(self, inputs, offset, name):
raise NotImplementedError('Must be implemented by the subclass')
```
#### File: conversion/caffe/writer.py
```python
import base64
from google.protobuf import json_format
from importlib import import_module
import json
import numpy as np
import os
import sys
from mmdnn.conversion.caffe.errors import ConversionError
from mmdnn.conversion.caffe.common_graph import fetch_attr_value
from mmdnn.conversion.caffe.utils import get_lower_case, get_upper_case, get_real_name
class JsonFormatter(object):
'''Dumpt a DL graph into a Json file.'''
def __init__(self, graph):
self.graph_def = graph.as_graph_def()
def dump(self, json_path):
json_txt = json_format.MessageToJson(self.graph_def)
parsed = json.loads(json_txt)
formatted = json.dumps(parsed, indent=4, sort_keys=True)
with open(json_path, 'w') as f:
f.write(formatted)
class PyWriter(object):
'''Dumpt a DL graph into a Python script.'''
def __init__(self, graph, data, target):
self.graph = graph
self.data = data
self.tab = ' ' * 4
self.prefix = ''
target = target.lower()
if target == 'tensorflow':
self.target = target
self.net = 'TensorFlowNetwork'
elif target == 'keras':
self.target = target
self.net = 'KerasNetwork'
elif target == 'caffe':
self.target = target
self.net = 'CaffeNetwork'
else:
raise ConversionError('Target %s is not supported yet.' % target)
def indent(self):
self.prefix += self.tab
def outdent(self):
self.prefix = self.prefix[:-len(self.tab)]
def statement(self, s):
return self.prefix + s + '\n'
def emit_imports(self):
return self.statement('from dlconv.%s import %s\n' % (self.target, self.net))
def emit_class_def(self, name):
return self.statement('class %s(%s):' % (name, self.net))
def emit_setup_def(self):
return self.statement('def setup(self):')
def emit_node(self, node):
'''Emits the Python source for this node.'''
def pair(key, value):
return '%s=%s' % (key, value)
args = []
for input in node.input:
input = input.strip().split(':')
name = ''.join(input[:-1])
idx = int(input[-1])
assert name in self.graph.node_dict
parent = self.graph.get_node(name)
args.append(parent.output[idx])
#FIXME:
output = [node.output[0]]
# output = node.output
for k, v in node.attr:
if k == 'cell_type':
args.append(pair(k, "'" + fetch_attr_value(v) + "'"))
else:
args.append(pair(k, fetch_attr_value(v)))
args.append(pair('name', "'" + node.name + "'")) # Set the node name
args = ', '.join(args)
return self.statement('%s = self.%s(%s)' % (', '.join(output), node.op, args))
def dump(self, code_output_dir):
if not os.path.exists(code_output_dir):
os.makedirs(code_output_dir)
file_name = get_lower_case(self.graph.name)
code_output_path = os.path.join(code_output_dir, file_name + '.py')
data_output_path = os.path.join(code_output_dir, file_name + '.npy')
with open(code_output_path, 'w') as f:
f.write(self.emit())
with open(data_output_path, 'wb') as f:
np.save(f, self.data)
return code_output_path, data_output_path
def emit(self):
# Decompose DAG into chains
chains = []
for node in self.graph.topologically_sorted():
attach_to_chain = None
if len(node.input) == 1:
parent = get_real_name(node.input[0])
for chain in chains:
if chain[-1].name == parent: # Node is part of an existing chain.
attach_to_chain = chain
break
if attach_to_chain is None: # Start a new chain for this node.
attach_to_chain = []
chains.append(attach_to_chain)
attach_to_chain.append(node)
# Generate Python code line by line
source = self.emit_imports()
source += self.emit_class_def(self.graph.name)
self.indent()
source += self.emit_setup_def()
self.indent()
blocks = []
for chain in chains:
b = ''
for node in chain:
b += self.emit_node(node)
blocks.append(b[:-1])
source += '\n\n'.join(blocks)
return source
class ModelSaver(object):
def __init__(self, code_output_path, data_output_path):
self.code_output_path = code_output_path
self.data_output_path = data_output_path
def dump(self, model_output_dir):
'''Return the file path containing graph in generated model files.'''
if not os.path.exists(model_output_dir):
os.makedirs(model_output_dir)
sys.path.append(os.path.dirname(self.code_output_path))
file_name = os.path.splitext(os.path.basename(self.code_output_path))[0]
module = import_module(file_name)
class_name = get_upper_case(file_name)
net = getattr(module, class_name)
return net.dump(self.data_output_path, model_output_dir)
class GraphDrawer(object):
def __init__(self, toolkit, meta_path):
self.toolkit = toolkit.lower()
self.meta_path = meta_path
def dump(self, graph_path):
if self.toolkit == 'tensorflow':
from dlconv.tensorflow.visualizer import TensorFlowVisualizer
if self._is_web_page(graph_path):
TensorFlowVisualizer(self.meta_path).dump_html(graph_path)
else:
raise NotImplementedError('Image format or %s is unsupported!' % graph_path)
elif self.toolkit == 'keras':
from dlconv.keras.visualizer import KerasVisualizer
png_path, html_path = (None, None)
if graph_path.endswith('.png'):
png_path = graph_path
elif self._is_web_page(graph_path):
png_path = graph_path + ".png"
html_path = graph_path
else:
raise NotImplementedError('Image format or %s is unsupported!' % graph_path)
KerasVisualizer(self.meta_path).dump_png(png_path)
if html_path:
self._png_to_html(png_path, html_path)
os.remove(png_path)
else:
raise NotImplementedError('Visualization of %s is unsupported!' % self.toolkit)
def _is_web_page(self, path):
return path.split('.')[-1] in ('html', 'htm')
def _png_to_html(self, png_path, html_path):
with open(png_path, "rb") as f:
encoded = base64.b64encode(f.read()).decode('utf-8')
source = """<!DOCTYPE>
<html>
<head>
<meta charset="utf-8">
<title>Keras</title>
</head>
<body>
<img alt="Model Graph" src="data:image/png;base64,{base64_str}" />
</body>
</html>""".format(base64_str=encoded)
with open(html_path, 'w', encoding='utf-8') as f:
f.write(source)
```
#### File: examples/mxnet/extract_model.py
```python
import argparse
from six import text_type as _text_type
import mxnet as mx
from mmdnn.conversion.examples.imagenet_test import TestKit
from mmdnn.conversion.common.utils import download_file
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])
network_name_key = ['resnet', 'vgg19', 'squeezenet', 'inception-bn', 'resnext']
_base_model_url = 'http://data.mxnet.io/models/'
_default_model_info = {
'imagenet1k-inception-bn' : {'symbol' : _base_model_url+'imagenet/inception-bn/Inception-BN-symbol.json',
'params' : _base_model_url+'imagenet/inception-bn/Inception-BN-0126.params',
'image_size' : 224},
'imagenet1k-resnet-18' : {'symbol' : _base_model_url+'imagenet/resnet/18-layers/resnet-18-symbol.json',
'params' : _base_model_url+'imagenet/resnet/18-layers/resnet-18-0000.params',
'image_size' : 224},
'imagenet1k-resnet-34' : {'symbol' : _base_model_url+'imagenet/resnet/34-layers/resnet-34-symbol.json',
'params' : _base_model_url+'imagenet/resnet/34-layers/resnet-34-0000.params',
'image_size' : 224},
'imagenet1k-resnet-50' : {'symbol' : _base_model_url+'imagenet/resnet/50-layers/resnet-50-symbol.json',
'params' : _base_model_url+'imagenet/resnet/50-layers/resnet-50-0000.params',
'image_size' : 224},
'imagenet1k-resnet-101' : {'symbol' : _base_model_url+'imagenet/resnet/101-layers/resnet-101-symbol.json',
'params' : _base_model_url+'imagenet/resnet/101-layers/resnet-101-0000.params',
'image_size' : 224},
'imagenet1k-resnet-152' : {'symbol' : _base_model_url+'imagenet/resnet/152-layers/resnet-152-symbol.json',
'params' : _base_model_url+'imagenet/resnet/152-layers/resnet-152-0000.params',
'image_size' : 224},
'imagenet1k-resnext-50' : {'symbol' : _base_model_url+'imagenet/resnext/50-layers/resnext-50-symbol.json',
'params' : _base_model_url+'imagenet/resnext/50-layers/resnext-50-0000.params',
'image_size' : 224},
'imagenet1k-resnext-101' : {'symbol' : _base_model_url+'imagenet/resnext/101-layers/resnext-101-symbol.json',
'params' : _base_model_url+'imagenet/resnext/101-layers/resnext-101-0000.params',
'image_size' : 224},
'imagenet1k-resnext-101-64x4d' : {'symbol' : _base_model_url+'imagenet/resnext/101-layers/resnext-101-64x4d-symbol.json',
'params' : _base_model_url+'imagenet/resnext/101-layers/resnext-101-64x4d-0000.params',
'image_size' : 224},
'imagenet11k-resnet-152' : {'symbol' : _base_model_url+'imagenet-11k/resnet-152/resnet-152-symbol.json',
'params' : _base_model_url+'imagenet-11k/resnet-152/resnet-152-0000.params',
'image_size' : 224},
'imagenet11k-place365ch-resnet-152' : {'symbol' : _base_model_url+'imagenet-11k-place365-ch/resnet-152-symbol.json',
'params' : _base_model_url+'imagenet-11k-place365-ch/resnet-152-0000.params',
'image_size' : 224},
'imagenet11k-place365ch-resnet-50' : {'symbol' : _base_model_url+'imagenet-11k-place365-ch/resnet-50-symbol.json',
'params' : _base_model_url+'imagenet-11k-place365-ch/resnet-50-0000.params',
'image_size' : 224},
'vgg19' : {'symbol' : _base_model_url+'imagenet/vgg/vgg19-symbol.json',
'params' : _base_model_url+'imagenet/vgg/vgg19-0000.params',
'image_size' : 224},
'vgg16' : {'symbol' : _base_model_url+'imagenet/vgg/vgg16-symbol.json',
'params' : _base_model_url+'imagenet/vgg/vgg16-0000.params',
'image_size' : 224},
'squeezenet_v1.0' : {'symbol' : _base_model_url+'imagenet/squeezenet/squeezenet_v1.0-symbol.json',
'params' : _base_model_url+'imagenet/squeezenet/squeezenet_v1.0-0000.params',
'image_size' : 224},
'squeezenet_v1.1' : {'symbol' : _base_model_url+'imagenet/squeezenet/squeezenet_v1.1-symbol.json',
'params' : _base_model_url+'imagenet/squeezenet/squeezenet_v1.1-0000.params',
'image_size' : 224}
}
def _search_preprocess_key(original_network_name):
import re
for key in network_name_key:
if re.search(key, original_network_name):
return key
raise ValueError('preprocess module cannot support [{}]'.format(original_network_name))
def _main():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--network', type=_text_type, help='Model Type', required=True,
choices=_default_model_info.keys())
parser.add_argument('-i', '--image', default=None,
type=_text_type, help='Test Image Path')
parser.add_argument('-o', '--output_dir', default='./',
type=_text_type, help='Tensorflow Checkpoint file name')
args = parser.parse_args()
if not download_file(_default_model_info[args.network]['symbol'], directory=args.output_dir):
return -1
if not download_file(_default_model_info[args.network]['params'], directory=args.output_dir):
return -1
print("Model {} saved.".format(args.network))
file_name = _default_model_info[args.network]['params'].split('/')[-1]
prefix, epoch_num = file_name[:-7].rsplit('-', 1)
sym, arg_params, aux_params = mx.model.load_checkpoint(args.output_dir + prefix, int(epoch_num))
model = mx.mod.Module(symbol=sym)
model.bind(for_training=False,
data_shapes=[('data', (1, 3, _default_model_info[args.network]['image_size'],
_default_model_info[args.network]['image_size']))])
model.set_params(arg_params, aux_params, allow_missing=True, allow_extra=True)
if args.image:
import numpy as np
# need to be updated
network = _search_preprocess_key(args.network)
func = TestKit.preprocess_func['mxnet'][network]
img = func(args.image)
img = np.swapaxes(img, 0, 2)
img = np.swapaxes(img, 1, 2)
img = np.expand_dims(img, axis=0)
model.forward(Batch([mx.nd.array(img)]))
predict = model.get_outputs()[0].asnumpy()
predict = np.squeeze(predict)
top_indices = predict.argsort()[-5:][::-1]
result = [(i, predict[i]) for i in top_indices]
print(result)
return 0
if __name__ == '__main__':
_main()
``` |
{
"source": "2z1c/Community-document",
"score": 3
} |
#### File: 2z1c/Community-document/auto_creat_toc.py
```python
import argparse
from argparse import RawTextHelpFormatter
import sys
from loguru import logger
import os
import yaml
import subprocess
import fnmatch
import glob
def find_file(dir_path, filename):
if os.path.isdir(dir_path) is not True:
logger.error("{0} 不是正确的文件路径".format(dir_path))
return None
# 去除路径最后一个 /
file_path = os.path.dirname(dir_path) + '/' + filename
if os.path.exists(file_path) is not True:
logger.error("在 {0} 中没有找到 {1} 文件".format(dir_path, filename))
return None
return file_path
def creat_toc(args):
# 1. 找到需要 url 字符串
dir_list = []
with open(args.input_file, 'r', encoding='utf-8') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.SafeLoader)
List = cfg["navbar"]["items"][1]['items']
for _ in List:
# 去除第一个字符
dir_list.append(_["url"][1:])
# 2. 根据 url 找到 某一个目录下所有的指定文件
List_file_path = []
for _ in dir_list:
r = find_file("docs/" + _, "sidebar.yaml")
if r is not None:
List_file_path.append(r)
logger.debug("找到了 {}".format(List_file_path))
# 3. 最后开始合并yml 文件
if os.path.exists(args.out_file) is True:
logger.debug("删除 {}".format(args.out_file))
os.remove(args.out_file)
for _ in List_file_path:
cmd = "cat {0} >> {1}".format(_, args.out_file)
logger.debug("执行 {} 命令".format(cmd))
os.system(cmd)
pass
def copy_file(args):
for f_path in glob.iglob('docs/**/' + os.path.basename(args.input_file),
recursive=True):
# logger.debug("复制 {0} 到 {1}".format(args.input_file, f_path))
subprocess.run(["cp", args.input_file, f_path, "-v"])
# 找到所有的 sidebar.yaml
def main(argv):
parser = argparse.ArgumentParser(description="自动生成目录",
formatter_class=RawTextHelpFormatter)
parser.add_argument("--out_file",
"-o",
type=str,
default="Quecpython_toc.yml",
help='输出目录 (eg: -o Quecpython_toc.yml)')
parser.add_argument(
"--input_file",
"-i",
type=str,
default="docs/Quecpython_intro/zh/config.json",
help='输出目录 (eg: -i docs/Quecpython_intro/zh/config.json)')
parser.add_argument("--action",
type=str,
default="toc",
help="""指定动作
--action {toc|copy}
toc 生成总目录
copy 将 {input_file} 文件同时复制到其他的路径下面去""")
args = parser.parse_args(args=argv)
logger.debug(args.out_file)
logger.debug(args.input_file)
if os.path.isfile(args.input_file) is False:
logger.error("错误的 --input_file 参数, 没有找到 {0}".format(args.input_file))
exit(1)
if args.action == "toc":
creat_toc(args)
elif args.action == "copy":
copy_file(args)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: QuecPythonCloud/code/TencentCloud.py
```python
from umqtt import MQTTClient
import modem
CLIENT_ID = b'X3Z30XABBU001'
SERVER = b'X3Z30XABBU.iotcloud.tencentdevices.com'
PORT = 1883
USER = b'X3Z30XABBU001;12010126;M8STP;1647306844'
PASSWORD = b'<PASSWORD>'
IMEI = None # modem.getDevImei()
SUB_TOPIC = 'X3Z30XABBU/{}/data'
PUB_TOPIC = SUB_TOPIC
def GetDevImei():
global IMEI
# IMEI = modem.getDevImei()
IMEI= '001'
print(IMEI)
state = 0
def sub_cb(topic, msg):
global state
print(
"Subscribe Recv: Topic={},Msg={}".format(
topic.decode(),
msg.decode()))
state = 1
def MQTT_Init():
# 创建一个mqtt实例
c = MQTTClient(
client_id=CLIENT_ID,
server=SERVER,
port=PORT,
user=USER,
password=PASSWORD,
keepalive=30) # 必须要 keepalive=30 ,否则连接不上
# 设置消息回调
c.set_callback(sub_cb)
# 建立连接
try:
c.connect()
except Exception as e:
print('!!!,e=%s' % e)
return
# c.connect()
# 订阅主题
c.subscribe(SUB_TOPIC.format(IMEI))
# 发布消息
Payload = '{"DeviceName":"{}","msg":"test publish"}'.format(IMEI)
c.publish(PUB_TOPIC.format(IMEI), Payload)
while True:
c.wait_msg()
if state == 1:
break
# 关闭连接
c.disconnect()
def main():
GetDevImei()
MQTT_Init()
if __name__ == "__main__":
main()
```
#### File: QuecPythonWirelessNetwork/code/sim_base.py
```python
import sim
import utime as time
import urandom as random
# 打印所有通讯录
def print_sim_phonebook():
for i in range(1, 1000):
# 一次读一个
info = sim.readPhonebook(9, i, i+1, "")
if info == -1:
print("read has error")
break
else:
print(info)
time.sleep_ms(5)
# 生成随机名字
def CreatRandomStr(length):
# The limit for the extended ASCII Character set
MAX_LENGTH = 16
random_string = ''
if length > MAX_LENGTH:
length = length % MAX_LENGTH
if length == 0:
length = random.randint(1, MAX_LENGTH)
for _ in range(length):
# 0 ~ z
random_integer = random.randint(48, 122)
# Keep appending random characters using chr(x)
random_string += (chr(random_integer))
return random_string
def CreatRandomPhoneNum(count=8):
pre_lst = ["130", "131", "132", "133", "134", "135", "136", "137", "138", "139", "147", "150",
"151", "152", "153", "155", "156", "157", "158", "159", "186", "187", "188"]
# 生成8个随机数个位数
tail_str = [str(random.randint(0, 9)) for i in range(count)]
# 将其转化为字符串
tail_str = ''.join(tail_str)
return random.choice(pre_lst) + tail_str
pass
def write_random_sim_phonebook():
for i in range(1, 10):
# 一次写一个
name = CreatRandomStr(random.randint(4, 6))
number = CreatRandomPhoneNum()
sim.writePhonebook(9, i, name, number)
def test_sim_base():
# check sim statsu
ret = sim.getStatus()
if ret == 1:
write_random_sim_phonebook()
print_sim_phonebook()
else:
# 状态不对
print("sim status has error , value is {0}".format(ret))
print("test_sim_base has exited")
if __name__ == "__main__":
test_sim_base()
```
#### File: QuecPythonTest/code/03_pm.py
```python
import pm
import utime
def main():
lpm_fd = pm.create_wakelock("test_lock", len("test_lock")) # 创建wake_lock锁
pm.autosleep(1) # 自动休眠模式控制
while True:
print("sleep")
utime.sleep(5) # 延时 并 休眠5秒钟
res = pm.wakelock_lock(lpm_fd) # 加锁 禁止进入休眠状态
print(res)
print("ql_lpm_idlelock_lock, g_c1_axi_fd = %d" % lpm_fd)
print("not sleep")
utime.sleep(5) # 只延时,不休眠
res = pm.wakelock_unlock(lpm_fd) # 解锁 继续 自动休眠模式
print(res)
print("ql_lpm_idlelock_unlock, g_c1_axi_fd = %d" % lpm_fd)
num = pm.get_wakelock_num() # 获取已创建锁的数量
print(num) # 打印已创建锁的数量
if __name__ == "__main__":
main()
```
#### File: QuecPythonTest/code/External_interrupt.py
```python
from machine import ExtInt
import utime as time
'''
EC600SCN平台引脚对应关系如下:
GPIO1 – 引脚号71
GPIO2 – 引脚号72
GPIO3 – 引脚号73
GPIO4 – 引脚号74
GPIO5 – 引脚号75
GPIO6 – 引脚号76
GPIO7 – 引脚号77
'''
# 参考自 http://qpy.quectel.com/wiki/#/zh-cn/api/?id=extint
state = 2
def callBack(args):
global state
print("###interrupt %d ###" % args)
state = state - 1
def main():
# 映射GPIO71的下降沿触发回调函数
extint = ExtInt(ExtInt.GPIO1, ExtInt.IRQ_FALLING, ExtInt.PULL_PU, callBack)
# 等待按键按下,触发
while state:
time.sleep_ms(10)
pass
# 停止映射外部中断
extint.disable()
print("The main function has exited")
if __name__ == "__main__":
main()
```
#### File: QuecPythonTest/code/i2c_aht10.py
```python
import log
from machine import I2C
import utime as time
"""
1. calibration
2. Trigger measurement
3. read data
"""
# API 手册 http://qpy.quectel.com/wiki/#/zh-cn/api/?id=i2c
# AHT10 说明书
# https://server4.eca.ir/eshop/AHT10/Aosong_AHT10_en_draft_0c.pdf
class aht10class():
i2c_log = None
i2c_dev = None
i2c_addre = None
# Initialization command
AHT10_CALIBRATION_CMD = 0xE1
# Trigger measurement
AHT10_START_MEASURMENT_CMD = 0xAC
# reset
AHT10_RESET_CMD = 0xBA
def write_data(self, data):
self.i2c_dev.write(self.i2c_addre,
bytearray(0x00), 0,
bytearray(data), len(data))
pass
def read_data(self, length):
r_data = [0x00 for i in range(length)]
r_data = bytearray(r_data)
self.i2c_dev.read(self.i2c_addre,
bytearray(0x00), 0,
r_data, length,
0)
return list(r_data)
def aht10_init(self, addre=0x38, Alise="Ath10"):
self.i2c_log = log.getLogger(Alise)
self.i2c_dev = I2C(I2C.I2C1, I2C.STANDARD_MODE) # 返回i2c对象
self.i2c_addre = addre
self.sensor_init()
pass
def aht10_transformation_temperature(self, data):
r_data = data
# 根据数据手册的描述来转化温度
humidity = (r_data[0] << 12) | (
r_data[1] << 4) | ((r_data[2] & 0xF0) >> 4)
humidity = (humidity/(1 << 20)) * 100.0
print("current humidity is {0}%".format(humidity))
temperature = ((r_data[2] & 0xf) << 16) | (
r_data[3] << 8) | r_data[4]
temperature = (temperature * 200.0 / (1 << 20)) - 50
print("current temperature is {0}°C".format(temperature))
def sensor_init(self):
# calibration
self.write_data([self.AHT10_CALIBRATION_CMD, 0x08, 0x00])
time.sleep_ms(300) # at last 300ms
pass
def ath10_reset(self):
self.write_data([self.AHT10_RESET_CMD])
time.sleep_ms(20) # at last 20ms
def Trigger_measurement(self):
# Trigger data conversion
self.write_data([self.AHT10_START_MEASURMENT_CMD, 0x33, 0x00])
time.sleep_ms(200) # at last delay 75ms
# check has success
r_data = self.read_data(6)
# check bit7
if (r_data[0] >> 7) != 0x0:
print("Conversion has error")
else:
self.aht10_transformation_temperature(r_data[1:6])
def i2c_aht10_test():
ath_dev = aht10class()
ath_dev.aht10_init()
# 测试十次
for i in range(10):
ath_dev.Trigger_measurement()
time.sleep(1)
if __name__ == "__main__":
i2c_aht10_test()
```
#### File: QuecPythonTest/code/Photoresistor.py
```python
from misc import ADC
import utime as time
import _thread
# unit as Ω
def Voltage_to_Resistance(Volt):
#
Va = 2 * Volt
resistance = (2 * 4700 * 40200 * Va)/(2 * 4700 * (3300 - Va) - (40200 * Va))
return resistance
def Photoresistor_thread(delay, retryCount):
# creat a adc device
AdcDevice = ADC()
while retryCount:
retryCount = retryCount - 1
# get ADC.ADC0 value
adcvalue = AdcDevice.read(ADC.ADC0)
print("get ADC.ADC0 Voltage value as {0}mv".format(adcvalue))
# Converted to resistance
resistance = Voltage_to_Resistance(adcvalue)
print("Photoresistor resistance as {0}Ω".format(resistance))
time.sleep(delay)
pass
if __name__ == "__main__":
# creat a thread Convert ADC to Voltage
_thread.start_new_thread(Photoresistor_thread, (1, 10))
print("creent main thread has exit")
```
#### File: QuecPythonTest/code/record.py
```python
import utime
import checkNet
import audio
from machine import Pin
'''
下面两个全局变量是必须有的,用户可以根据自己的实际项目修改下面两个全局变量的值,
在执行用户代码前,会先打印这两个变量的值。
'''
PROJECT_NAME = "QuecPython_Record_example"
PROJECT_VERSION = "1.0.0"
checknet = checkNet.CheckNetwork(PROJECT_NAME, PROJECT_VERSION)
'''
外接喇叭播放录音文件,参数选择0
'''
aud = audio.Audio(0)
tts = audio.TTS(0)
'''
外接喇叭播放录音文件,需要下面这一句来使能
'''
audio_EN = Pin(Pin.GPIO11, Pin.OUT, Pin.PULL_PD, 1)
def record_callback(args):
print('file_name:{}'.format(args[0]))
print('file_size:{}'.format(args[1]))
print('record_sta:{}'.format(args[2]))
record_sta = args[2]
if record_sta == 3:
print('The recording is over, play it')
tts.play(1, 0, 2, '录音结束,准备播放录音文件')
aud.play(1, 0, record.getFilePath())
elif record_sta == -1:
print('The recording failure.')
tts.play(1, 0, 2, '录音失败')
if __name__ == '__main__':
'''
手动运行本例程时,可以去掉该延时,如果将例程文件名改为main.py,希望开机自动运行时,需要加上该延时,
否则无法从CDC口看到下面的 poweron_print_once() 中打印的信息
'''
# utime.sleep(5)
checknet.poweron_print_once()
'''
如果用户程序包含网络相关代码,必须执行 wait_network_connected() 等待网络就绪(拨号成功);
如果是网络无关代码,可以屏蔽 wait_network_connected()
'''
# checknet.wait_network_connected()
# 用户代码
'''######################【User code star】###################################################'''
print('the recording will begin in 2 seconds. Please be ready!')
utime.sleep(2)
print('start recording!')
record = audio.Record('recordfile.wav', record_callback)
record.start(10)
'''######################【User code end 】###################################################'''
```
#### File: QuecPythonTest/code/uart_demo1.py
```python
from machine import UART
import utime as time
"""
端口号
EC100YCN平台与EC600SCN平台,UARTn作用如下:
UART0 - DEBUG PORT
UART1 – BT PORT
UART2 – MAIN PORT
UART3 – USB CDC PORT
"""
def main():
"""
config uart Baud rate as 115200,data bits as 8bit, Do not use parity,
Stop bit as 0bit,Do not use Flow control,
UART(UART.UARTn, buadrate, databits, parity, stopbits, flowctl)
"""
uart = UART(UART.UART2, 115200, 8, 0, 1, 0)
# write string
delay = 100
for i in range(2):
# write string
uart.write("hello world\r\n")
# write string and & integer
uart.write("delay num as {0}ms\r\n".format(delay))
# write float
uart.write("π as {0}\r\n".format(3.14159))
# read something
read_btyes = 6
uart.write("please input {0} bytes:\r\n".format(read_btyes))
while True:
if uart.any() > read_btyes:
break
else:
time.sleep_ms(10)
# !!! Before reading buffer, please make sure there is data in buffer
input_date = uart.read(read_btyes)
uart.write("The data you entered is {0}\r\n".format(input_date))
time.sleep_ms(delay)
if __name__ == "__main__":
main()
``` |
{
"source": "2ZeroSix/magic-ping",
"score": 3
} |
#### File: magic-ping/magicPing/utils.py
```python
import shutil
def carry_around_add(a, b):
"""
дополняющая сумма
:param a: первое слагаемое
:param b: второе слагаемое
:return: дополняющая сумма a и b
"""
c = a + b
return (c & 0xFFFF) + (c >> 16)
def checksum(msg, avoid_range=range(0)):
"""
обратный код 16 битной дополняющей суммы елементов msg
:param msg: сообщения для подсчёта контрольной суммы
:param avoid_range: диапазон индексов елементов, которые не должны учавствовать в подсчёте
:return:
"""
s = 0
for i in range(1, len(msg), 2):
a, b = 0, 0
if i - 1 not in avoid_range:
a = int(msg[i])
if i not in avoid_range:
b = int(msg[i - 1])
w = a | (b << 8)
s = carry_around_add(s, w)
if len(msg) % 2 == 1:
w = 0
if len(msg) - 1 not in avoid_range:
w = int(msg[len(msg) - 1]) << 8
s = carry_around_add(s, w)
return ~s & 0xFFFF
def print_progress_bar(iteration: int, total: int, prefix: str = '', suffix: str = '',
decimals: int = 1, length: int = None, fill: str = '█') -> None:
"""
вывод строки состояния для работы в цикле
:param iteration: текущая итерация
:param total: общее число итераций
:param prefix: префикс
:param suffix: суффикс
:param decimals: кол-во знаков после запятой
:param length: длина строки состояния
:param fill: заполняющий символ
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
if length is None:
length = max(shutil.get_terminal_size()[0] - len(prefix) - len(suffix) - len(percent) - 5, 10)
filled_length = int(length * iteration // total)
bar = fill * filled_length + '-' * (length - filled_length)
print('\r%s|%s| %s%% %s' % (prefix, bar, percent, suffix), end='\r')
if iteration == total:
print()
``` |
{
"source": "300000kms/mapModern",
"score": 2
} |
#### File: mapModern/01_getdata/02 getbookdata.py
```python
import sqlite3
import pprint
import requests
from pyld import jsonld
import json
from lxml import etree
pp = pprint.PrettyPrinter(indent=4)
# la conversion de lis_adv.csv a sqlite ha sido manual
DB= 'db.sqlite'
########################################################################################################################
########################################################################################################################
########################################################################################################################
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def getBooksList(id ):
conn = sqlite3.connect(DB)
conn.row_factory = dict_factory
c = conn.cursor()
c.execute('''select * from llista1 where rowid >'''+ str(id))
results = c.fetchall()
return results
def getDataBook(oclc):
print 'book : http://www.worldcat.org/title/fabulas-selectas/oclc/%s' %(oclc)
url = 'http://experiment.worldcat.org/oclc/%s.jsonld' %(oclc)
r = requests.get(url)
#pp.pprint(r.json())
#js = jsonld.expand(url)
# pp.pprint ( js )
# for j in js:
# print j['@type'], j.keys()
return r.text
def biblios(oclc, tt, n=1):
print 'tt:', tt
if tt == 'ed':
url = 'https://www.worldcat.org/wcpa/servlet/org.oclc.lac.ui.ajax.ServiceServlet?wcoclcnum=%s&ht=edition&start_holding=%s&serviceCommand=holdingsdata' % (oclc,n)
elif tt == 'all':
url = 'https://www.worldcat.org/wcpa/servlet/org.oclc.lac.ui.ajax.ServiceServlet?wcoclcnum=%s&start_holding=%s&serviceCommand=holdingsdata' % (oclc,n)
print url
r = requests.get(url)
page = r.text
tree = etree.HTML(page)
nbiblios = 0
nediciones = 0
if len( tree.xpath('.//*[@class="lib"]') ) > 0:
#numero biblios
nb = int(tree.xpath('//*[@class="libsdisplay"]/strong[2]/text()')[0])
#numero ediciones
ne = int(tree.xpath('//*[@class="libsdisplay"]/strong[3]/text()')[0].split(' ')[0]) if tree.xpath('//*[@class="libsdisplay"]/strong[3]/text()') else ''
print nb, ne
l=[]
tr = tree.xpath('//*[@class="name"]')
for t in tr:
name = t.xpath('.//*[@class="lib"]/a/text()')[0]
av = t.xpath('.//*[@class="lib"]/a/@onclick')[0].split(',')[1].replace("'",'') if len(t.xpath('.//*[@class="lib"]/a/@onclick'))>0 else None
loc = t.xpath('.//*[@class="geoloc"]/text()')[0]
l.append([name, av, loc])
if n+6 >= nb:
return {'ed':ne, 'nb':nb, 'bib':l }
else:
return {'ed':ne, 'nb':nb, 'bib':l+biblios(oclc, tt, n+6)['bib'] }
else:
return {'ed': None, 'nb':None, 'bib':[''] }
def saveBookData(id, js, bib_all_bib, bib_all_ne, bib_all_nb, bib_ed_bib, bib_ed_nb):
conn = sqlite3.connect(DB)
c = conn.cursor()
#c.execute("update llista1 set description = '%s', bib_all = '%s', bib_ed= '%s' where rowid is %s" %(js, bib_all, bib_ed, id))
c.execute("update llista1 set description = ?, bib_all_bib = ?, bib_all_ne= ? , bib_all_nb= ? , bib_ed_bib= ? , bib_ed_nb= ? where rowid is ?" , (js, bib_all_bib, bib_all_ne, bib_all_nb, bib_ed_bib, bib_ed_nb, id))
conn.commit()
return
def createFields(fields):
conn = sqlite3.connect(DB)
c = conn.cursor()
for f in fields:
try:
c.execute ('ALTER TABLE llista1 ADD COLUMN %s' %(f))
conn.commit()
except Exception as e:
print e
conn.close()
return
def getLastId():
try:
with open('logbooks', 'r') as f:
n = f.read()
except:
with open('logbooks', 'w+') as f:
n = f.read()
if n == None:
n = 0
return n
def saveId(id):
with open('logbooks', 'w+') as f:
f.write(str(id))
return
########################################################################################################################
########################################################################################################################
########################################################################################################################
'''
https://www.worldcat.org/wcpa/servlet/org.oclc.lac.ui.ajax.ServiceServlet?serviceCommand=spotlightLibrarySearch&oclcnum=912043880&_=1513351180160
https://www.worldcat.org/wcpa/servlet/org.oclc.lac.ui.ajax.ServiceServlet?wcoclcnum=431915731&ht=edition&serviceCommand=holdingsdata
https://www.worldcat.org/wcpa/servlet/org.oclc.lac.ui.ajax.ServiceServlet?wcoclcnum=431915731&serviceCommand=holdingsdata
https://www.worldcat.org/wcpa/servlet/org.oclc.lac.ui.ajax.ServiceServlet?wcoclcnum=431915731&ht=edition&serviceCommand=holdingsdata
'''
createFields(['description', 'bib_all_bib', 'bib_all_ne', 'bib_all_nb', 'bib_ed_bib', 'bib_ed_nb'])
#saveId(0)
def scrap():
id = getLastId()
books = getBooksList(id)
t = {}
for b in books:
id = str(b['PK_UID'])
b = str(int(b['oclc']))
print id, '|', b
print 1
js = unicode(getDataBook(b))
print 2
bib_all = biblios(b, 'all')
bib_all_ne = unicode(bib_all['ed'])
bib_all_nb = unicode(bib_all['nb'])
bib_all_bib = unicode(bib_all['bib'])
print 3
bib_ed = biblios(b, 'ed')
bib_ed_nb = unicode(bib_ed['nb'])
bib_ed_bib = unicode(bib_ed['bib'])
print 'saving...'
#for e in [id, js, bib_all_bib, bib_all_ne, bib_all_nb, bib_ed_bib, bib_ed_nb]:
# print e
saveBookData(id, js, bib_all_bib, bib_all_ne, bib_all_nb, bib_ed_bib, bib_ed_nb)
saveId(id)
return
################################################################################################
out =''
while out != None:
try:
scrap()
except Exception as e:
print e
```
#### File: mapModern/old/06 getAuthors_op2.py
```python
import sqlite
'''
este codigo coge todos los fields que contiene la mencion creator o autor o contributor o ilustrator
y entonces alamcena el contenido de todos estos campos para poder hacer un listado que permita conocer todos los autores unicos
dados que esto parte de la extraccion que se realizaó inicial nos encontramos con nombres yuxtapuestos, sobretodo en el campo controbutors
por ello de aqui tendremos que ir hacia el codigo getauthor_op3 para limpiar esto
'''
def getCols(x):
sql = 'select name from booksfull_fields where name like \'%'+x+'%\''
fields = sqlite.sql(db, sql)
af = []
for f in fields:
#print f
af.append(f['name'])
return af
################################################
db = 'books2.sqlite'
table = 'booksfull'
authors = getCols('author')
creators = getCols('creator')
contri = getCols('contributor')
illus =getCols('illustrator')
sql = ''
for a in authors:
sql+= 'select %s as autor, \'%s\' as tipo from booksfull ' %(a, 'authors')
sql += ' union '
for a in creators:
sql+= 'select %s, \'%s\' from booksfull ' %(a, 'creators')
sql += ' union '
for a in contri:
sql+= 'select %s, \'%s\' from booksfull ' %(a, 'contri')
sql += ' union '
for a in illus:
sql+= 'select %s, \'%s\' from booksfull ' %(a, 'illus')
sql += ' union '
sql = sql[0:-6]
print sql
autores = sqlite.sql(db, sql)
sqlite.dict2sqlite('authors2.sqlite', 'authors', autores)
## ahora toca navegar en cada link y buscar su viaf o su denominacion unica para encontrar a los unicos
'''
select schema__Book_author, schema__CreativeWork_author, schema__Person_name
from booksfull
schema__book_creator
schema__creativework_creator
select rowid, schema__book_creator,
schema__creativework_illustrator,
schema__book_illustrator,
schema__creativework_creator,
schema__creativework_contributor,
schema__Book_author, schema__CreativeWork_author, schema__Person_name
from booksfull
where schema__book_creator is null
'''
```
#### File: mapModern/old/utils.py
```python
import sqlite
def getCols(x):
db = 'books2.sqlite'
table = 'booksfull'
'''selecciona las columnas ocn uncontenido x'''
sql = 'select name from booksfull_fields where name like \'%'+x+'%\''
fields = sqlite.sql(db, sql)
af = []
for f in fields:
#print f
af.append(f['name'])
return af
``` |
{
"source": "3005200317/pythonbirds",
"score": 3
} |
#### File: pythonbirds/oo/teste_carro.py
```python
from unittest import \
def TestCase(self):
from oo.carro import Motor
class CarroTestCase(testCase) :
def teste_velocidade_inicial(self):
motor = Motor()
self.assertEqual(0, motor.velocidade)
``` |
{
"source": "300bps/threadify",
"score": 4
} |
#### File: 300bps/threadify/examples.py
```python
import time
from threadify import Threadify
import queue
def task_dots(storage):
"""
Task that prints dots forever.
"""
print(".", sep=" ", end="", flush=True)
time.sleep(.25)
return True
def task_symbols(storage):
"""
Task that prints first character of contents of storage["symbol"] forever.
"""
sym = storage.get("symbol", ".")
print(sym[0], sep=" ", end="", flush=True)
time.sleep(.25)
return True
def task_storage(storage):
"""
Demonstrates a periodic task accessing and modifying storage.
"""
# Values in storage persist from call to call. Local variables do not
# In case storage values haven't been passed in, set defaults
storage.setdefault("a", 1)
storage.setdefault("b", "A")
# Do work
print(storage)
# Operate on storage
storage["a"] += 1
# Fetch from storage, operate
tmp = storage["b"]
tmp = chr((ord(tmp) - ord("A") + 1) % 26 + ord("A"))
# Update storage
# Note: for value to persist, it must be assigned back to storage
storage["b"] = tmp
# Sleep allows other threads to run
time.sleep(1)
return True
def task_run_5s(storage):
"""
Demonstrates self-terminating task.
Use storage to pass in a start time so that task can decide when to self-terminate.
"""
# Get start time from storage
start = storage.get("start_time")
# Compute elapsed time and print
delta = time.time() - start
print("Elapsed time: {:4.2f}".format(delta))
# Time to die?
if delta >= 5:
print("Stopping after {:4.2f} seconds".format(delta))
# Signal thread to terminate
return False
# Sleep allows other threads to run
time.sleep(0.5)
# Signal thread to keep running
return True
def exception_task(storage):
# Get start time from storage
start = storage.get("start_time")
# Compute elapsed time and print
delta = time.time() - start
print("Elapsed time: {:4.2f}".format(delta))
# Sleep allows other threads to run
time.sleep(1)
# Time to kill?
if delta >= 5:
print("Raising exception after {:4.2f} seconds".format(delta))
# Signal thread to terminate
raise Exception("Exception thrown from task!")
# Signal thread to keep running
return True
def task_checkqueue(storage):
"""
Task that watches a queue for messages and acts on them when received.
"""
# Get the queue object from the storage dictionary
thequeue = storage.get("queue")
try:
# Use a timeout so it blocks for at-most 0.5 seconds while waiting for a message. Smaller values can be used to
# increase the cycling of the task and responsiveness to Threadify control signals (like pause) if desired.
msg = thequeue.get(block=True, timeout=.5)
except queue.Empty:
print("_", end="")
else:
if msg == "QUIT":
return False
# Print received message
print("{:s}".format(msg), end="")
return True
def task_dosomething(storage):
"""
Task that gets launched to handle something in the background until it is completed and then terminates. Note that
this task doesn't return until it is finished, so it won't be listening for Threadify pause or kill requests.
"""
# An important task that we want to run in the background.
for i in range(10):
print(i, end="")
time.sleep(1)
return False
# Sequentially run through a series of examples
if __name__ == "__main__":
# Enable debug outputs
Threadify.ENABLE_DEBUG_OUTPUT = True
# ** EXAMPLE 1) Simplest example - built-in task displays '.' to the screen each 0.25 seconds. **
print("\nEX 1) Print '.' approximately every 0.25 seconds.")
t1 = Threadify(start=True)
# Main program sleeps here while task continues to run
time.sleep(5)
# Send kill request and wait for it to complete
t1.kill(wait_until_dead=True)
print("Done")
# ** EXAMPLE 2) Demonstrate two tasks running with one being paused and later continued. **
print("\nEX 2) Starting 2 tasks - one will be paused and later continued while the first runs continuously.")
# Pass initial value of "symbol" via the storage dictionary to each task
t1 = Threadify(task_symbols, {"symbol": "X"})
t2 = Threadify(task_symbols, {"symbol": "O"})
# Start tasks manually (could also have been automatically started by using the start parameter)
t1.start()
t2.start()
time.sleep(5.1)
print("\nPausing 'X' for 5 seconds.")
t1.pause(True)
time.sleep(5)
print("\nUnpausing 'X' for 5 seconds.")
t1.unpause()
time.sleep(5)
t1.kill()
t2.kill()
t1.join()
t2.join()
print("Done")
# ** EXAMPLE 3) Demonstrate a task that self-terminates after 5 seconds. **
print("\nEX 3) Demonstrate a task that self-terminates after 5 seconds.")
t1 = Threadify(task_run_5s, {"start_time": time.time()}, daemon=False, start=True)
# Join instructs main program to wait on t1 to complete before continuing
t1.join()
print("Done")
# ** EXAMPLE 4) Demonstrate communication with a task via a queue passed in through storage. **
print("\nEX 4) Demonstrate communication with a task via a queue passed in through storage.")
# Create a thread-safe queue for message passing
q = queue.Queue()
# This instance REQUIRES deep_copy=FALSE since Queue is not pickleable.
t1 = Threadify(task_checkqueue, {"queue": q}, deep_copy=False, start=True)
# Wait 3 seconds - then send some messages with varying delays interspersed
time.sleep(3)
q.put("HE")
q.put("LLO")
q.put(" WORLD")
time.sleep(2)
q.put("1")
time.sleep(1)
q.put("2")
time.sleep(2)
q.put("3")
time.sleep(3)
# Send the QUIT message to have task kill itself and then wait for it to die
q.put("QUIT")
t1.join()
print("Done.")
# ** EXAMPLE 5) Fire and forget. Launch a function in a separate thread and have it run to completion. **
print("\nEX 5) Fire and forget. Launch a function in a separate thread and have it run to completion.")
t1 = Threadify(task_dosomething, start=True)
# Join instructs main program to wait on t1 to complete before continuing
t1.join()
print("Done")
# Additional examples to be explored
"""
# ## EXAMPLE) Example of a periodic task accessing and modifying persistent storage ##
print("\nExample of a periodic task accessing and modifying persistent storage.")
t1 = Threadify(task_storage, storage={"a": 10, "b": "A"}, start=True)
time.sleep(10)
t1.kill(wait_until_dead=True)
print("Done")
# ## Exceptions thrown from task body and not ignored ##
t1 = Threadify(exception_task, {"start_time": time.time()}, ignore_task_exceptions=False)
t1.daemon = False
t1.start()
print("\nTask raises exceptions after 5 seconds and doesn't dispose.")
time.sleep(8)
t1.kill()
t1.join()
print("Done")
# ## Exceptions thrown from task body and ignored ##
t1 = Threadify(exception_task, {"start_time": time.time()}, ignore_task_exceptions=True)
t1.daemon = False
t1.start()
print("\nTask raises exceptions after 5 seconds; disposes of them an continues for 3 more seconds.")
time.sleep(8)
t1.kill()
t1.join()
print("Done")
# ## kill a paused thread ##
t1 = Threadify(task_dots, {"a": 123, "b": "M"}, daemon=False)
t1.start()
print("\nStarting a thread that will be paused and then killed.")
time.sleep(5)
t1.pause(True)
print("\nPaused for 5 seconds.")
time.sleep(5)
print("kill paused thread.")
t1.kill()
t1.join()
print("Done")
"""
```
#### File: threadify/threadify/threadify.py
```python
import copy
import threading
import time
from typing import Callable, Optional
class Threadify(threading.Thread):
"""
Extend the builtin python 'threading.Thread' class to add cooperative pause, unpause, and kill
capability to python threads.
"""
VERSION = "1.0.0"
# Enable printing debug info
ENABLE_DEBUG_OUTPUT = False
def __init__(self, task: Optional[Callable] = None, storage: Optional[dict] = None, *, name: str = None,
daemon: bool = True, deep_copy: bool = True, ignore_task_exceptions: bool = False,
start: bool = False):
"""
:param task: The callable to be repeatedly executed by the thread in the thread's context.
:param storage: Dictionary containing data for 'task'. It is persistent and mutable across invocations of 'task'
by the thread for the life of the thread. The task can access, modify, and add variables to
the dictionary and have them persist across each task invocation (which happens repeatedly).
:param name: Name for the thread or None for an autogenerated default name.
:param daemon: True - Run as a daemon thread (ie: if main program exits, thread exits);
False - Thread continues to run even if the main program that created it exits.
:param deep_copy: True - Make independent, deep copy of storage for use by thread; False - Shallow copy. A
deep copy may require less programmer care since independent copies are made. A shallow
copy is potentially faster, but requires the programmer to be careful not to create
data contention between various threads of execution. Items that can't be pickled
can't be deep-copied.
:param ignore_task_exceptions: True - Ignore unhandled exceptions raised in task and continue;
False - Re-raise task exception thereby terminating the thread.
:param start: True - Automatically start thread after construction; False - Thread must be manually started
by calling its builtin 'start' method.
"""
super().__init__(name=name, daemon=daemon)
# If no storage passed, create default empty dict
if not storage:
storage = {}
deep_copy = False
# Deep copy requires less programmer care since truly independent copies are made; shallow copy is potentially
# faster, but requires application programmer not to create data contention.
# Note: Items that can't be pickled can't be deep-copied.
if deep_copy:
try:
self.task_storage = copy.deepcopy(storage)
except TypeError as ex:
raise TypeError("Storage contains an item that cannot be deep-copied.") from ex
else:
self.task_storage = copy.copy(storage)
# Select task that the thread continuously executes
if task:
if callable(task):
# User-supplied task
self.task = task
else:
raise TypeError("The 'task' parameter must be a callable or None.")
else:
# Default do-nothing-but-sleep task
self.task = Threadify.task
# Specify how to handle task exceptions
self.ignore_task_exceptions = ignore_task_exceptions
# ## Create and configure thread controls ##
# Control for terminating the thread
self._killthread_event = threading.Event()
self._killthread_event.clear()
# Control for cooperative pausing/running of thread: start with set so thread runs as soon as start is called
self._do_run_event = threading.Event()
self._do_run_event.set()
# Flag indicates when pause has taken effect (thread is actually paused instead of just been told to pause)
self._is_paused_event = threading.Event()
self._is_paused_event.clear()
# Automatically start the thread?
if start:
self.start()
def pause(self, wait_until_paused: bool = False, timeout_secs: Optional[int] = None):
"""
Use to cooperatively pause the thread. Note that unless 'wait_until_paused' is True, this
method can return before the pause has taken effect since thread pausing is affected by the responsiveness
of and the blocking in the user task.
:param wait_until_paused: True - wait until thread has paused before returning; False - return immediately.
:param timeout_secs: 'None' or maximum number of seconds to wait for thread to pause when
'wait_until_paused' is True; None means ignore timeout and wait as long as required
for thread to pause.
:return: True - Thread paused before return; False - Thread not yet paused before return
"""
# Clear run event to signal a pause request to the run function
self._do_run_event.clear()
# If wait desired, wait until pause has occurred (or timed out if timeout_secs is not None)
timeout = (time.time() + timeout_secs) if timeout_secs else time.time()
while all([wait_until_paused,
not self._is_paused_event.is_set(),
((timeout_secs is None) or (time.time() < timeout))]):
time.sleep(0.010)
return self._is_paused_event.is_set()
def unpause(self):
"""
Unpause a paused thread.
:return: None
"""
# Set event to wake up the paused thread
self._do_run_event.set()
def is_paused(self) -> bool:
"""
Indicate if the thread is currently paused. This represents the current actual state of the thread - not
whether or not a pause was requested.
:return: True - Thread is currently paused; False - Thread is not paused
"""
return self._is_paused_event.is_set()
def kill(self, wait_until_dead: bool = False, timeout_secs: Optional[int] = None):
"""
Cooperatively end execution of the thread.
:param wait_until_dead: True - Wait with timeout for thread to terminate; False - Return immediately
:param timeout_secs: 'None' or maximum number of seconds to wait for termination when
'wait_until_dead' is True; None means ignore timeout and wait as long as required
for thread to terminate.
:return: True - Thread terminated before return; False - Thread not yet terminated before returning
"""
# Signal thread to terminate
self._killthread_event.set()
# If thread was paused, wake it so it can proceed with termination
if not self._do_run_event.is_set():
self._do_run_event.set()
# If waiting for termination, sleep until killed (or timed out if timeout_secs is not None)
timeout = (time.time() + timeout_secs) if timeout_secs else time.time()
while all([wait_until_dead, self.is_alive(), ((timeout_secs is None) or (time.time() < timeout))]):
time.sleep(0.010)
return not self.is_alive()
def run(self):
"""
The template callable executed by the thread. It implements cooperative pause/restart and kill features.
It acts as the superloop that repeatedly calls the user task.
:return: None
"""
if self.ENABLE_DEBUG_OUTPUT:
print("<{:s} Setup>".format(self.name), flush=True)
try:
# Loop until killed
while not self._killthread_event.is_set():
try:
# If cooperative pause is signaled, handle it here
was_paused = self._handle_pause_request()
# If resuming from a pause, proceed back to top of loop to test the kill condition to
# allow terminating a paused thread.
if was_paused:
continue
# ** Execute Task **
task_running = True
try:
task_running = self.task(self.task_storage)
except Exception as ex:
if self.ENABLE_DEBUG_OUTPUT:
print("Exception in task: "+str(ex), flush=True)
# If not ignoring task exceptions, raise and kill thread
if not self.ignore_task_exceptions:
raise Exception("Exception raised from task with 'ignore_task_exceptions' False.") from ex
# Check if task requests thread termination
if not task_running:
# Directly set kill event
self._killthread_event.set()
except Exception as ex:
raise
finally:
if self.ENABLE_DEBUG_OUTPUT:
print("\n<{:s} Cleanup>".format(self.name), flush=True)
@staticmethod
def task(storage: dict) -> bool:
"""
The periodic work to be done by the thread. This stub routine is replaced by the task callable passed
by the user when the initial object is created. Blocking affects the responsiveness to cooperative
pause and kill signals; however, at least some small sleep delay (ex: time.sleep(0.010) ) or IO blocking
should be included to allow opportunities for context-switches for other threads. Note that changes made to
'storage' persist across each invocation of task for the life of the thread.
:param storage: Dict to provide persistent, mutable task variable storage.
:returns: True - continue to run; False - kill thread
"""
# **********************************
# ** This is an example task body **
# **********************************
# Demonstrate the use of the 'storage' parameter
symbol = storage.get("symbol", ".")[0] # Get symbol if passed during thread creation, otherwise use '.'
count = storage.get("count", 0) # Demonstrate persistent variable storage across calls to task
# Do something observable
print(symbol, sep=" ", end="", flush=True)
# Line wrap after 25 symbols and update 'count' value in storage
if count > 24:
storage["count"] = 0
print()
else:
storage["count"] = count + 1
# Sleep allows other threads to execute so as not to hog the processor, but it also controls the responsiveness
# of this thread to commands like pause and kill.
time.sleep(.25)
# True signals thread to continue running
return True
def _handle_pause_request(self) -> bool:
"""
Called from run function to include/implement cooperative pause functionality.
:return: True - a pause occurred; False - no pause occurred
"""
# Test for cooperative pause and sleep here if pause indicated
pause_occurred = False
if not self._do_run_event.is_set():
pause_occurred = True
self._is_paused_event.set() # Set flag indicating that cooperative pause is taking effect now
self._do_run_event.wait() # Sleeps here until '_do_run_event' is set
self._is_paused_event.clear() # After thread is re-awakened, indicate no longer paused
return pause_occurred
``` |
{
"source": "300wFtpRider/hw1-decision-trees",
"score": 3
} |
#### File: hw1-decision-trees/tests/test_metrics.py
```python
import numpy as np
from .test_utils import make_fake_data
def test_accuracy():
from sklearn.metrics import accuracy_score
from code import accuracy
y_true, y_pred = make_fake_data()
_actual = accuracy_score(y_true, y_pred)
_est = accuracy(y_true, y_pred)
assert np.allclose(_actual, _est)
def test_f1_measure():
from sklearn.metrics import f1_score
from code import f1_measure
y_true, y_pred = make_fake_data()
_actual = f1_score(y_true, y_pred)
_est = f1_measure(y_true, y_pred)
assert np.allclose(_actual, _est)
def test_precision_and_recall():
from sklearn.metrics import precision_score, recall_score
from code import precision_and_recall
y_true, y_pred = make_fake_data()
_actual = [precision_score(y_true, y_pred), recall_score(y_true, y_pred)]
_est = precision_and_recall(y_true, y_pred)
assert np.allclose(_actual, _est)
def test_confusion_matrix():
from sklearn.metrics import confusion_matrix as ref_confusion_matrix
from code import confusion_matrix as est_confusion_matrix
y_true, y_pred = make_fake_data()
_actual = ref_confusion_matrix(y_true, y_pred)
_est = est_confusion_matrix(y_true, y_pred)
assert np.allclose(_actual, _est)
``` |
{
"source": "3013216027/live_count",
"score": 3
} |
#### File: live_count/live_count/panda.py
```python
import json
import requests
from settings import DEBUG
class Panda(object):
"""
检查熊猫TV全平台观众人数
"""
@staticmethod
def fetch(url):
"""
=.=
"""
html_data = requests.get(url)
html_data = html_data.content
html_data = json.loads(html_data)
return html_data['data']
@staticmethod
def work():
"""
o.o
"""
url = 'http://www.panda.tv/ajax_sort?token=6f53df41fbc278569c8dfe4e7f5b2b09&pageno={pageno}&pagenum=120&classification={cla}&_=1484831614936'
classifications = ['lol', 'yzdr', 'overwatch', 'hwzb', 'hearthstone', 'zhuji', 'deadbydaylight', 'starve', 'dota2', 'war3', 'dnf', 'cf', 'wow', 'csgo', 'diablo3', 'heroes', 'spg', 'mc', 'ftg', 'kof97', 'jxol3', 'tymyd', 'liufang', 'hjjd', 'pokemon', 'popkart', 'foreigngames', 'starcraft', 'wy', 'music', 'shoot', 'pets', 'kingglory', 'ro', 'yys', 'mobilegame', 'fishes', 'clashroyale', 'qipai', 'boardgames', 'cartoon', 'technology', 'finance']
sum_fans = 0
for cla in classifications:
if DEBUG:
print('check PandaTV.%s' % cla)
total = int(Panda.fetch(url.format(cla=cla, pageno=1))['total'])
cur = 0
pageno = 1
while cur < total:
data = Panda.fetch(url.format(cla=cla, pageno=pageno))
items = data['items']
# print(json.dumps(items, ensure_ascii=False, indent=2))
for room in items:
# print(room['id'], room['person_num'])
sum_fans += int(room['person_num'])
pageno += 1
cur += 120
return sum_fans
if __name__ == '__main__':
print('PandaTV = %s' % Panda.work())
``` |
{
"source": "3016203099/tacotron",
"score": 2
} |
#### File: 3016203099/tacotron/hparams.py
```python
from text import symbols
class Hparams:
def __init__(self):
################################
# Experiment Parameters #
################################
self.epochs = 500
self.iters_per_checkpoint = 1000
self.iters_per_validation = 1000
self.seed = 1234
self.dynamic_loss_scaling = True
self.fp16_run = False
self.distributed_run = False
self.cudnn_enabled = True
self.cudnn_benchmark = False
self.ignore_layers = ["embedding.weight"]
################################
# Data Parameters #
################################
self.training_files = "DATASET/train.csv.txt"
self.validation_files = "DATASET/val.csv.txt"
self.text_cleaners = ["basic_cleaners"]
self.symbols_lang = "en" # en: English characters; py: Chinese Pinyin symbols
################################
# Model Parameters #
################################
self.tacotron_version = "2" # 1: Tacotron; 2: Tacotron-2
self.tacotron_config = "tacotron2.json"
self.num_symbols = len(symbols(self.symbols_lang))
self.symbols_embed_dim = 512
self.mel_dim = 80
self.r = 3
self.max_decoder_steps = 1000
self.stop_threshold = 0.5
################################
# Optimization Hyperparameters #
################################
self.use_saved_learning_rate = False
self.learning_rate = 1e-3
self.weight_decay = 1e-6
self.grad_clip_thresh = 1.0
self.batch_size = 32
self.mask_padding = True # set model's padded outputs to padded values
def __str__(self):
return "\n".join(
["Hyper Parameters:"]
+ ["{}:{}".format(key, getattr(self, key, None)) for key in self.__dict__]
)
def create_hparams():
"""Create model hyperparameters. Parse nondefault from object args."""
return Hparams()
``` |
{
"source": "303sec/git-fingerprint",
"score": 2
} |
#### File: 303sec/git-fingerprint/set_repo_path.py
```python
import argparse
import os
# define the inputs and help for the findextensions command
set_repo_path_parser = argparse.ArgumentParser()
set_repo_path_parser.add_argument("path", help='local path to target git repository')
# return the argparse where required
def get_argparse():
return set_repo_path_parser
```
#### File: 303sec/git-fingerprint/utils.py
```python
import subprocess
import hashlib
import os
import requests
import colorama
import click
import tempfile
import shutil
import itertools
from colorama import Fore, Back, Style
from tqdm import tqdm
from operator import itemgetter
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
# My classes
import globalvars
# Print error messages in RED. Brutal style
def print_error(msg):
print(Fore.RED + msg + Style.RESET_ALL)
# return the sha1 hash of a file
def get_sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, 'rb', buffering=0) as f:
for b in iter(lambda : f.read(128*1024), b''):
h.update(b)
return h.hexdigest()
# Excute an OS command and get the output as a string
# Supresses stderr.
def exec_cmd_get_stdout(cmd):
with open(os.devnull,"w") as devnull:
output = subprocess.run(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=devnull,
universal_newlines=True)
answer = output.stdout.strip()
return answer
# Find the commit version of the file if possible
def find_commit_version(file_list, tmpdir):
file = file_list[0]
commit_count= file_list[1]
(local_folder, filename) = os.path.split(file)
(meh, file_extension) = os.path.splitext(filename)
# Ignore this file
if file_extension in globalvars.ignore_extensions:
return None
tqdm.write("[*] Checking " + file + " : " + str(commit_count))
#### TODO MEGA HACKY, this makes my relative path ../ go away to make it work
# This only works if the local folder path is one directory higher than where git-version is
# Mega horrible.
folder = local_folder[3:]
if "/" in folder:
folder = folder[folder.find("/"):len(folder)]
folder = folder + "/"
#print("target_url: " + globalvars.target_url)
url = ""
# If the file is in the root of the git folder there was a bug meaning the wrong URL was accessed
if folder.count("/") == 1:
url = globalvars.target_url + "/" + filename
else:
url = globalvars.target_url + "/" + folder + filename
tmpfolder = tmpdir + "/" + folder
# Make all folders in path if they don't already exist
if os.path.exists(tmpfolder) == False:
os.makedirs(tmpfolder)
tmpfile = tmpfolder + filename
tqdm.write("[*] Trying URL: " + url)
r = requests.get(url, verify=False)
if r.status_code != 200:
tqdm.write(Fore.RED + "[*] Failed to fetch file with error code: " + str(r.status_code) + Style.RESET_ALL)
#tqdm.write("Ignoring files with extension '" + file_extension + "'" )
#if file_extension not in globalvars.ignore_extensions and file_extension != "":
# globalvars.ignore_extensions.append(file_extension)
#if click.confirm("Error code for file with extension '" + file_extension + "'. Ignore all files with that extension? "):
# globalvars.ignore_extensions.append(file_extension)
else:
open(tmpfile, 'wb').write(r.content)
#print("[*] 200 OK, content saved to: " + tmpfile)
# This is the sha1 of the file contents of the downloaded file
downloaded_sha1 = get_sha1_of_file(tmpfile)
# cmd is the full shell command to get the list of SHA1 hashes for commits
cmd = "(cd " + local_folder + " ; git log " + filename + " | grep '^commit ' | cut -d \" \" -f 2)"
# answer is a list of SHA1 hashes one per line
answer = exec_cmd_get_stdout(cmd)
lines = answer.split()
count = 0
found = False
# We need to loop through the commit history, checkout each version, and compare hashes.
for commit_sha1 in lines:
count = count + 1
real_repo_path = ""
# If the file is in the root of the git folder there was a bug meaning the wrong folder is used in this command
if folder.count("/") == 1:
real_repo_path = filename
else:
real_repo_path = folder + filename
# command reverts a file back to a pevious commit
cmd = "(cd " + globalvars.repo_path + " ; git checkout " + commit_sha1 + " ./" + real_repo_path + ")"
#print("\t" + cmd)
# answer is not important here. If it errors stderr will be displayed. No errors == chill.
answer = exec_cmd_get_stdout(cmd)
# Coping with files in the root of the repo which triggered a bug
repofile = ""
if folder.count("/") == 1:
repofile = globalvars.repo_path + "/" + filename
else:
repofile = globalvars.repo_path + folder + filename
# calculate SHA1 of file from the repo
repo_sha1 = get_sha1_of_file(repofile)
# Check if we have a match
if downloaded_sha1 == repo_sha1:
# Get commit date
cmd = "(cd " + globalvars.repo_path + "; git show -s --format=%cd " + commit_sha1 + " --date=format:'%Y-%b-%d %H:%M:%S' )"
answer = exec_cmd_get_stdout(cmd)
msg = (Fore.GREEN if count == 1 else Fore.RED) + "MATCH FOUND [" + str(count) + " of " + str(len(lines)) + "] commited on: " + answer + Style.RESET_ALL
tqdm.write( msg )
# We have a matched file which is outdated so add it to the evidence
#if count != 1:
globalvars.outdated_files.append([file, commit_sha1, count, len(lines), answer])
found = True
#return None
if found == False:
tqdm.write( "[*] File match NOT found. Possibly modified by user." )
``` |
{
"source": "30440r/Macmoji",
"score": 3
} |
#### File: Macmoji/scripts/update-output.py
```python
from __future__ import unicode_literals
import binascii
import io
import json
import os
import plistlib
import sys
import unicodedata
import zipfile
def make_random_uid():
return binascii.b2a_hex(os.urandom(15))
def titlecase_phrase(phrase):
return ' '.join([word.title() for word in phrase])
def make_emoji_name(emoji, shortcut):
try:
return titlecase_phrase(unicodedata.name(emoji).split())
except (ValueError, TypeError):
return titlecase_phrase(shortcut.strip(':').split('_'))
def fill_single_file_templates(settings, emoji_substitutions, scripts_directory):
formatted_data = {}
for item in emoji_substitutions:
emoji, placeholder = item['phrase'], item['shortcut']
for setting in settings:
if setting['name'] in formatted_data:
formatted_data[setting['name']] += setting['data_format'].format(
placeholder=placeholder, emoji=emoji)
else:
formatted_data[setting['name']] = setting['data_format'].format(
placeholder=placeholder, emoji=emoji)
# Fill out each template
for setting in settings:
template_file = os.path.join(
scripts_directory,
'../templates/{}.template'.format(setting['name'])
)
with io.open(template_file, 'r', encoding='utf8') as s:
template = s.read()
with io.open('../' + setting['name'], 'w', encoding='utf8') as f:
f.write(template.format(formatted_data[setting['name']]))
def generate_alfred_snippets(emoji_substitutions):
alfred_outfile = '../Emoji.alfredsnippets'
with zipfile.ZipFile(alfred_outfile, 'w') as snippets:
for item in emoji_substitutions:
phrase, shortcut = item['phrase'], item['shortcut']
uid = make_random_uid()
name = make_emoji_name(phrase, shortcut)
snippets.writestr(
'{} [{}].json'.format(name, uid),
json.dumps({
"alfredsnippet" : {
"snippet" : phrase,
"uid" : uid,
"name" : name,
"keyword" : shortcut
}
})
)
def main():
# Gets the script's directory so we can access files relative to it
scripts_directory = os.path.dirname(os.path.realpath(__file__))
settings_file_dir = os.path.join(scripts_directory, 'settings.json')
with open(settings_file_dir) as settings_file:
settings = json.load(settings_file)
# Load in the canonical emoji substitutions
emoji_substitutions_file = os.path.join(scripts_directory, '../emojiSubstitutions.plist')
emoji_substitutions = plistlib.readPlist(emoji_substitutions_file)
generate_alfred_snippets(emoji_substitutions)
fill_single_file_templates(settings, emoji_substitutions, scripts_directory)
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "305120262/ArcGISServerManageTools",
"score": 3
} |
#### File: 305120262/ArcGISServerManageTools/updatesoe.py
```python
from __future__ import print_function
# 3rd-party library: requests
# http://docs.python-requests.org/en/latest/
# pip install requests
import requests
PROTOCOL = "https://"
HOST = "www.seapc.com"
USER = "portaladmin"
PASSWORD = "<PASSWORD>"
# note that services are suffixed by type when passed to admin REST API
SERVICES = [r"MajorCity.MapServer"]
# path to rebuilt SOE file
SOE_FILE = r"D:\市场活动\2016广州研讨会\数据及源码\时态地图\NetTimeServiceSOI\bin\Debug\NetTimeServiceSOI.soe"
class AGSRestError(Exception): pass
class ServerError(Exception): pass
def _validate_response(response):
""" Tests response for HTTP 200 code, tests that response is json,
and searches for typical AGS error indicators in json.
Raises an exception if response does not validate successfully.
"""
if not response.ok:
raise ServerError("Server Error: {}".format(response.text))
try:
response_json = response.json()
if "error" in response_json:
raise AGSRestError(response_json["error"])
if "status" in response_json and response_json["status"] != "success":
error = response_json["status"]
if "messages" in response_json:
for message in response_json["messages"]:
error += "\n" + message
raise AGSRestError(error)
except ValueError:
print(response.text)
raise ServerError("Server returned HTML: {}".format(response.text))
def _get_token(username, password):
""" Returns token from server """
token_url = "{protocol}{host}/ags/tokens/".format(
protocol=PROTOCOL, host=HOST)
data = { "f": "json",
"username": username,
"password": password,
"client": "requestip",
"expiration": 5 }
response = requests.post(token_url, data)
_validate_response(response)
token = response.json()['token']
return token
def _upload_soe_file(soe_path, token):
""" Uploads .soe file to ArcGIS Server and returns itemID from
uploaded file
"""
upload_url = "{protocol}{host}/ags/admin/uploads/upload?f=json".format(
protocol=PROTOCOL, host=HOST)
with open(soe_path, 'rb') as soe_file:
files = {'itemFile': soe_file}
data = {
"token": token
}
response = requests.post(upload_url, data, files=files)
_validate_response(response)
response_json = response.json()
item_id = response_json['item']['itemID']
return item_id
def _update_soe(item_id, token):
""" Updates SOE based on uploaded files itemID """
update_url = "{protocol}{host}/arcgis/admin/services/types/extensions/update".format(
protocol=PROTOCOL, host=HOST)
data = {
"f": "json",
"token": token,
"id": item_id
}
response = requests.post(update_url, data)
_validate_response(response)
def _start_services(services, token):
""" starts ArcGIS Server services """
start_services_url = "{protocol}{host}/arcgis/admin/services/{service}/start"
for service in services:
url = start_services_url.format(protocol=PROTOCOL,
host=HOST,
service=service)
print("Starting {}".format(service))
data = {
"f": "json",
"token": token,
}
response = requests.post(url, data)
_validate_response(response)
print("Started!")
if __name__ == "__main__":
print("Retrieving token...")
token = _get_token(USER, PASSWORD)
print("Retrieved: {}".format(token))
print("Uploading SOE...")
item_id = _upload_soe_file(SOE_FILE, token)
print("Uploaded: {}".format(item_id))
print("Updating SOE...")
_update_soe(item_id, token)
print("Updated!")
print("Starting services...")
_start_services(SERVICES, token)
``` |
{
"source": "307509256/alphagozero",
"score": 2
} |
#### File: 307509256/alphagozero/engine.py
```python
from symmetry import random_symmetry_predict
from math import sqrt
import numpy as np
import numpy.ma as ma
from numpy.ma.core import MaskedConstant
from conf import conf
from play import (
legal_moves, index2coord, make_play,
coord2index,
)
SIZE = conf['SIZE']
MCTS_BATCH_SIZE = conf['MCTS_BATCH_SIZE']
DIRICHLET_ALPHA = conf['DIRICHLET_ALPHA']
DIRICHLET_EPSILON = conf['DIRICHLET_EPSILON']
RESIGNATION_PERCENT = conf['RESIGNATION_PERCENT']
RESIGNATION_ALLOWED_ERROR = conf['RESIGNATION_ALLOWED_ERROR']
COLOR_TO_PLAYER = {'B': 1, 'W': -1}
Cpuct = 1
def isplane(x,y,z):
if x==0 or x==SIZE-1:
return True
if y==0 or y==SIZE-1:
return True
if z==0 or z==SIZE-1:
return True
return False
def new_subtree(policy, board, parent, add_noise=False):
leaf = {}
# We need to check for legal moves here because MCTS might not have expanded
# this subtree
mask = legal_moves(board)
policy = ma.masked_array(policy, mask=mask)
# Add Dirichlet noise.
tmp = policy.reshape(-1)
if add_noise:
noise = np.random.dirichlet([DIRICHLET_ALPHA for i in range(tmp.shape[0])])
tmp = (1 - DIRICHLET_EPSILON) * tmp + DIRICHLET_EPSILON * noise
parent_move = parent['move']
parent_player = board[0, 0, 0, 0, -1]
assert (parent_player == 1 and parent_move % 2 == 1) or (parent_player == -1 and parent_move % 2 == 0)
move = parent_move + 1
for action, p in enumerate(tmp):
if isinstance(p, MaskedConstant):
continue
leaf[action] = {
'count': 0,
'value': 0,
'mean_value': 0,
'p': p,
'subtree':{},
'parent': parent,
'move': move,
}
return leaf
def top_n_actions(subtree, top_n):
total_n = sqrt(sum(dic['count'] for dic in subtree.values()))
if total_n == 0:
total_n = 1
# Select exploration
max_actions = []
for a, dic in subtree.items():
u = Cpuct * dic['p'] * total_n / (1. + dic['count'])
v = dic['mean_value'] + u
if len(max_actions) < top_n or v > max_actions[-1]['value']:
max_actions.append({'action': a, 'value': v, 'node': dic})
max_actions.sort(key=lambda x: x['value'], reverse=True)
if len(max_actions) > top_n:
max_actions = max_actions[:-1]
return max_actions
def simulate(node, board, model, mcts_batch_size, original_player):
node_subtree = node['subtree']
max_actions = top_n_actions(node_subtree, mcts_batch_size)
max_a = max_actions[0]['action']
selected_action = max_a
selected_node = node_subtree[selected_action]
if selected_node['subtree'] == {}:
# This is a leaf
boards = np.zeros((mcts_batch_size, SIZE, SIZE, SIZE, 17), dtype=np.float32)
for i, dic in enumerate(max_actions):
action = dic['action']
if dic['node']['subtree'] != {}:
# already expanded
tmp_node = dic['node']
tmp_action = action
tmp_board = np.copy(board)
x, y, z = index2coord(tmp_action)
if isplane(x, y, z) == False:
continue
tmp_board, _ = make_play(x, y, z, tmp_board)
while tmp_node['subtree'] != {}:
tmp_max_actions = top_n_actions(tmp_node['subtree'], mcts_batch_size)
tmp_d = tmp_max_actions[0]
tmp_node = tmp_d['node']
tmp_action = tmp_d['action']
# The node for this action is the leaf, this is where the
# update will start, working up the tree
dic['node'] = tmp_node
x, y, z = index2coord(tmp_action)
if isplane(x, y, z) == False:
print ("while: ", x, y, z)
make_play(x, y, z, tmp_board)
boards[i] = tmp_board
else:
tmp_board = np.copy(board)
x, y, z = index2coord(action)
if isplane(x, y, z) == True:
tmp_board, _ = make_play(x, y, z, tmp_board)
boards[i] = tmp_board
# The random symmetry will changes boards, so copy them before hand
presymmetry_boards = np.copy(boards)
policies, values = random_symmetry_predict(model, boards)
for i, (policy, v, tmp_board, action) in enumerate(zip(policies, values, presymmetry_boards, max_actions)):
shape = tmp_board.shape
tmp_board = tmp_board.reshape([1] + list(shape))
player = tmp_board[0,0,0,0,-1]
if player == 0:
continue
# Inverse value if we're looking from other player perspective
value = v[0] if player == original_player else -v[0]
leaf_node = action['node']
subtree = new_subtree(policy, tmp_board, leaf_node)
move = leaf_node['move']
assert (player == 1 and move % 2 == 1) or (player == -1 and move % 2 == 0)
leaf_node['subtree'] = subtree
current_node = leaf_node
while True:
current_node['count'] += 1
current_node['value'] += value
current_node['mean_value'] = current_node['value'] / float(current_node['count'])
if current_node['parent']:
current_node = current_node['parent']
else:
break
else:
x, y, z = index2coord(selected_action)
make_play(x, y, z, board)
simulate(selected_node, board, model, mcts_batch_size, original_player)
def mcts_decision(policy, board, mcts_simulations, mcts_tree, temperature, model):
for i in range(int(mcts_simulations/MCTS_BATCH_SIZE)):
test_board = np.copy(board)
original_player = board[0,0,0,0,-1]
simulate(mcts_tree, test_board, model, MCTS_BATCH_SIZE, original_player)
if temperature == 1:
total_n = sum(dic['count'] for dic in mcts_tree['subtree'].values())
moves = []
ps = []
for move, dic in mcts_tree['subtree'].items():
n = dic['count']
if not n:
continue
p = dic['count'] / float(total_n)
moves.append(move)
ps.append(p)
selected_a = np.random.choice(moves, size=1, p=ps)[0]
elif temperature == 0:
_, _, selected_a = max((dic['count'], dic['mean_value'], a) for a, dic in mcts_tree['subtree'].items())
return selected_a
def select_play(policy, board, mcts_simulations, mcts_tree, temperature, model):
mask = legal_moves(board)
policy = ma.masked_array(policy, mask=mask)
index = mcts_decision(policy, board, mcts_simulations, mcts_tree, temperature, model)
x, y, z = index2coord(index)
return index
class Tree(object):
def __init__(self):
self.tree = None
def new_tree(self, policy, board, move=1, add_noise=False):
mcts_tree = {
'count': 0,
'value': 0,
'mean_value': 0,
'p': 1,
'subtree':{},
'parent': None,
'move': move,
}
subtree = new_subtree(policy, board, mcts_tree, add_noise=add_noise)
mcts_tree['subtree'] = subtree
self.tree = mcts_tree
return mcts_tree
def play(self, index):
if self.tree and index in self.tree['subtree']:
self.tree = self.tree['subtree'][index]
self.tree['parent'] = None # Cut the tree
else:
self.tree = None
class ModelEngine(object):
def __init__(self, model, mcts_simulations, board, resign=None, temperature=0, add_noise=False):
self.model = model
self.mcts_simulations = mcts_simulations
self.resign = resign
self.temperature = temperature
self.board = board
self.player = board[0, 0, 0, 0, -1]
self.add_noise = add_noise
self.tree = Tree()
self.move = 1
def set_temperature(self, temperature):
self.temperature = temperature
def play(self, color, x, y, z, update_tree=True):
index = coord2index(x, y, z)
if update_tree:
self.tree.play(index)
self.board, self.player = make_play(x, y, z, self.board)
self.move += 1
return self.board, self.player
def genmove(self, color):
announced_player = COLOR_TO_PLAYER[color]
assert announced_player == self.player
policies, values = self.model.predict_on_batch(self.board)
policy = policies[0]
value = values[0]
if self.resign and value <= self.resign:
x = 0
y = 0
z = SIZE + 1
return x, y, z, policy, value, self.board, self.player, policy
# Start of the game mcts_tree is None, but it can be {} if we selected a play that mcts never checked
if not self.tree.tree or not self.tree.tree['subtree']:
self.tree.new_tree(policy, self.board, move=self.move, add_noise=self.add_noise)
index = select_play(policy, self.board, self.mcts_simulations, self.tree.tree, self.temperature, self.model)
x, y, z= index2coord(index)
policy_target = np.zeros(SIZE*SIZE*SIZE + 1)
for _index, d in self.tree.tree['subtree'].items():
policy_target[_index] = d['p']
self.board, self.player = self.play(color, x, y, z)
return x, y, z, policy_target, value, self.board, self.player, policy
``` |
{
"source": "30bohdan/tencompl",
"score": 2
} |
#### File: 30bohdan/tencompl/config.py
```python
import numpy as np
from functools import partial
from models import ALS_NN, LM_completion
from utils import read_yuv2rgb, read_yuv2gray
def generate_config(
dataset, ranks, n_frames,
dim_y, dim_z, max_iter, lambda_=1,
n_entries=None, portions=None,
n_val_entries=10000, n_test_entries=10000,
methods="Kron-Altmin-LiuMoitra",
predict_frames=None, noisy=None,
randominit=True, true_rank=None,
fix_mu=False, momentum=None
):
if portions is None and n_enries is None:
raise Exception("Config is unvalid")
# if p is not None:
# n_entries = [int(dim_x*dim_y*dim_z*p) for p in portions]
config = {
"dataset": dataset,
"ranks": ranks,
"n_frames": n_frames,
"dim_y": dim_y,
"dim_z": dim_z,
"portions": portions,
"n_val_entries": n_val_entries,
"n_test_entries": n_test_entries,
"predict_frames": predict_frames,
"methods": methods,
"max_iter": max_iter,
"noisy": noisy,
"randominit": randominit,
"lambda": lambda_,
"fix_mu": fix_mu,
"momentum": momentum,
}
return config
datasets = {
"artificial":None,
"akiyo": read_yuv2gray(
height=144,
width=176,
n_frames=300,
file_name='akiyo_qcif.yuv',
file_dir='data/'
),
"bus": read_yuv2gray(
height=288,
width=352,
n_frames=150,
file_name='bus_cif.yuv',
file_dir='data/'
),
"bridge": read_yuv2gray(
height=144,
width=176,
n_frames=2001,
file_name='bridge-close_qcif.yuv',
file_dir='data/'
),
}
experiment_configs = {
"experiment1": generate_config(
dataset="akiyo",
methods=["ALS_NN"],
ranks=[(5, None), (8, None)],
n_frames=[50, 70],
dim_y=144,
dim_z=176,
portions=[0.02, 0.03, 0.05, 0.075],
n_val_entries=5000,
n_test_entries=10000,
predict_frames=[0, 10, 20],
max_iter=50,
noisy=None,
randominit=True,
lambda_=1
),
"experiment2": generate_config(
dataset="akiyo",
methods=["Kron-Altmin-LiuMoitra"],
ranks=[(5, None), (8, None)],
n_frames=[50, 70],
dim_y=144,
dim_z=176,
portions=[0.02, 0.03, 0.05, 0.075],
n_val_entries=5000,
n_test_entries=10000,
predict_frames=[0, 10, 20],
max_iter=50,
noisy=None,
randominit=True,
lambda_=1
),
"experiment3": generate_config(
dataset="akiyo",
methods=["ALS_NN"],
ranks=[(5, None), (8, None)],
n_frames=[50, 70],
dim_y=144,
dim_z=176,
portions=[0.02, 0.03, 0.05, 0.075],
n_val_entries=5000,
n_test_entries=10000,
predict_frames=[0, 10, 20],
max_iter=50,
noisy=None,
randominit=False,
lambda_=1
),
"experiment4": generate_config(
dataset="akiyo",
methods=["Kron-Altmin-LiuMoitra", "ALS_NN"],
ranks=[(5, None), (8, None)],
n_frames=[50, 70],
dim_y=144,
dim_z=176,
portions=[0.02, 0.03, 0.05, 0.075],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0, 10, 20],
max_iter=50,
noisy=None,
randominit=False,
lambda_=1
),
"target1": generate_config(
dataset="akiyo",
methods=["Kron-Altmin-LiuMoitra", "ALS_NN", "ALS"],
ranks=[(5, None), (8, None)],
n_frames=[50, 70],
dim_y=144,
dim_z=176,
portions=[0.02, 0.03, 0.05, 0.075, 0.1],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0, 20, -20, -1],
max_iter=50,
noisy=None,
randominit=False,
lambda_=1
),
"target2": generate_config(
dataset="artificial",
methods=["Kron-Altmin-LiuMoitra", "ALS_NN", "ALS"],
ranks=[(10, 10), (15, 10), (20, 10)],
n_frames=[100],
dim_y=100,
dim_z=100,
portions=[0.02, 0.025, 0.03, 0.035, 0.04],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0, 20, -20, -1],
max_iter=50,
noisy=None,
randominit=False,
lambda_=1
),
"target3": generate_config(
dataset="artificial",
methods=["ALS_NN", "ALS"],
ranks=[(15, 15), (20, 15), (25, 15)],
n_frames=[100],
dim_y=100,
dim_z=100,
portions=[0.02, 0.025, 0.03, 0.035, 0.04],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0, 20, -20, -1],
max_iter=50,
noisy=None,
randominit=False,
lambda_=1
),
"target4": generate_config(
dataset="akiyo",
methods=["ALS_NN", "ALS"],
ranks=[(15, None), (20, None), (25, None)],
n_frames=[50, 70],
dim_y=144,
dim_z=176,
portions=[0.05, 0.075, 0.1, 0.125, 0.15, 0.175, 0.2, 0.3],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0, 20, -20, -1],
max_iter=50,
noisy=None,
randominit=False,
lambda_=1
),
"target5": generate_config(
dataset="artificial",
methods=["ALS_NN", "ALS"],
ranks=[(15, 10), (20, 10)],
n_frames=[100],
dim_y=100,
dim_z=100,
portions=[0.02, 0.025, 0.03, 0.035, 0.04],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0, 20, -20, -1],
max_iter=50,
noisy=None,
randominit=True,
lambda_=1
),
"target6": generate_config(
dataset="artificial",
methods=["ALS_NN", "ALS"],
ranks=[(15, 15), (20, 20), (20, 15), (25, 20)],
n_frames=[200],
dim_y=150,
dim_z=100,
portions=[0.03, 0.035, 0.04, 0.045, 0.05],
n_val_entries=5000,
n_test_entries=15000,
predict_frames=[0],
max_iter=50,
noisy=None,
randominit=False,
lambda_=1
),
"target7": generate_config(
dataset="artificial",
methods=["ALS_NN", "ALS"],
ranks=[(70, 70), (80, 70), (70, 60)],
n_frames=[50],
dim_y=50,
dim_z=50,
portions=[0.15, 0.2, 0.225, 0.25, 0.3],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0],
max_iter=50,
noisy=None,
randominit=True,
lambda_=1
),
#TODO
"target8": generate_config(
dataset="artificial",
methods=["ALS_NN", "ALS"],
ranks=[(10, 10), (15, 10), (20, 20), (25, 20)],
n_frames=[100],
dim_y=100,
dim_z=100,
portions=[0.05, 0.075, 0.1, 0.125, 0.15, 0.175],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0],
max_iter=100,
noisy=0.1,
randominit=True,
lambda_=1
),
"target9": generate_config(
dataset="artificial",
methods=["Kron-Altmin-LiuMoitra"],
ranks=[(10, 10), (15, 10)],
n_frames=[100],
dim_y=100,
dim_z=100,
portions=[0.05, 0.075, 0.1, 0.125, 0.15, 0.175],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0],
max_iter=100,
noisy=0.1,
randominit=True,
lambda_=1
),
#TODO
"target10": generate_config(
dataset="artificial",
methods=["ALS_NN", "ALS"],
ranks=[(70, 70), (80, 70), (70, 60)],
n_frames=[50],
dim_y=50,
dim_z=50,
portions=[0.15, 0.2, 0.225, 0.25, 0.3],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0],
max_iter=100,
noisy=0.5,
randominit=True,
lambda_=1
),
#TODO
"target11": generate_config(
dataset="bus",
methods=["ALS_NN", "ALS"],
ranks=[(15, None), (20, None), (25, None)],
n_frames=[50, 70],
dim_y=288,
dim_z=352,
portions=[0.01, 0.02, 0.03, 0.05, 0.075, 0.1],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0, 20, -20, -1],
max_iter=75,
noisy=None,
randominit=False,
lambda_=1
),
"target12": generate_config(
dataset="bus",
methods=["ALS_NN", "ALS"],
ranks=[(30, None), (40, None), (50, None)],
n_frames=[50, 70],
dim_y=288,
dim_z=352,
portions=[0.2, 0.3, 0.4],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0, 20, -20, -1],
max_iter=75,
noisy=None,
randominit=False,
lambda_=1
),
"target13": generate_config(
dataset="bridge",
methods=["Kron-Altmin-LiuMoitra", "ALS_NN", "ALS"],
ranks=[(5, None), (8, None)],
n_frames=[50, 70],
dim_y=144,
dim_z=176,
portions=[0.02, 0.03, 0.05, 0.075, 0.1],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0, 20, -20, -1],
max_iter=50,
noisy=None,
randominit=False,
lambda_=1
),
"target14": generate_config(
dataset="bridge",
methods=["ALS_NN", "ALS"],
ranks=[(20, None), (25, None), (30, None)],
n_frames=[70],
dim_y=144,
dim_z=176,
portions=[0.05, 0.075, 0.15, 0.2, 0.25, 0.275, 0.3],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0, 20, -20, -1],
max_iter=50,
noisy=None,
randominit=False,
lambda_=1
),
"target15": generate_config(
dataset="bridge",
methods=["ALS_NN", "ALS"],
ranks=[(40, None), (50, None)],
n_frames=[70],
dim_y=144,
dim_z=176,
portions=[0.05, 0.075, 0.15, 0.2, 0.25, 0.275, 0.3],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0, 20, -20, -1],
max_iter=50,
noisy=None,
randominit=False,
lambda_=1
),
"target16": generate_config(
dataset="artificial",
methods=["ALS_NN", "ALS"],
ranks=[(100, 100), (150, 150), (110, 100), (160, 150)],
n_frames=[70],
dim_y=70,
dim_z=70,
portions=[0.2, 0.3, 0.4],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0],
max_iter=100,
noisy=0.5,
randominit=True,
lambda_=1
),
"target17": generate_config(
dataset="artificial",
methods=["ALS_NN", "ALS"],
ranks=[(110, 100), (160, 150)],
n_frames=[70],
dim_y=70,
dim_z=70,
portions=[0.2, 0.3, 0.4],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0],
max_iter=100,
noisy=None,
randominit=True,
lambda_=1
),
"target18": generate_config(
dataset="akiyo",
methods=["ALS_NN", "ALS"],
ranks=[(25, None), (30, None)],
n_frames=[70],
dim_y=144,
dim_z=176,
portions=[0.025, 0.05, 0.075],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0],
max_iter=50,
noisy=None,
randominit=True,
lambda_=1,
fix_mu=True
),
"target19": generate_config(
dataset="akiyo",
methods=["ALS_NN", "ALS"],
ranks=[(25, None), (30, None)],
n_frames=[70],
dim_y=144,
dim_z=176,
portions=[0.025, 0.05, 0.075],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0],
max_iter=50,
noisy=None,
randominit=True,
lambda_=1,
fix_mu=False,
momentum=0.8
),
"final": generate_config(
dataset="akiyo",
methods=["ALS_NN", "ALS"],
ranks=[(25, None), (30, None)],
n_frames=[70],
dim_y=144,
dim_z=176,
portions=[0.025, 0.05, 0.075],
n_val_entries=5000,
n_test_entries=5000,
predict_frames=[0],
max_iter=50,
noisy=None,
randominit=True,
lambda_=1,
fix_mu=False,
momentum=0.8
),
}
solvers = {
"Kron-Altmin-LiuMoitra": LM_completion,
"ALS_NN": ALS_NN,
"ALS": ALS_NN,
}
pyten_configs = {
"target1": {
"dataset": "akiyo",
"ranks": [(20, None)],
"portions": [0.05],
"init": ["eigs"],
"n_frames": [70],
"dim_y": 144,
"dim_z": 176,
"n_val_entries": 5000,
"n_test_entries": 5000,
"predict_frames": [0],
"max_iter": 100,
},
"test1": {
"dataset": "akiyo",
"ranks": [(25, None)],
"portions": [0.025, 0.05, 0.075],
"init": "random",
"n_frames": [70],
"dim_y": 144,
"dim_z": 176,
"n_val_entries": 5000,
"n_test_entries": 5000,
"predict_frames": [0],
"max_iter": 100,
},
}
```
#### File: 30bohdan/tencompl/main.py
```python
import random
import fire
import numpy as np
import wandb
from utils import get_tensor_entries
import config
from config import experiment_configs
import pdb
def main(experiment="experiment1", seed=13):
experiment_config = experiment_configs[experiment]
methods = experiment_config["methods"]
max_iter = experiment_config["max_iter"]
dataset_name = experiment_config["dataset"]
ranks = experiment_config["ranks"]
portions = experiment_config["portions"]
noisy = experiment_config["noisy"]
randominit = experiment_config["randominit"]
n_frames = experiment_config["n_frames"]
dim_y = experiment_config["dim_y"]
dim_z = experiment_config["dim_z"]
lambda_ = experiment_config.get("lambda", None)
fix_mu = experiment_config.get("fix_mu", False)
momentum = experiment_config.get("momentum", None)
n_val_entries = experiment_config["n_val_entries"]
n_test_entries = experiment_config["n_test_entries"]
predict_frames = experiment_config["predict_frames"]
dataset_full = config.datasets[dataset_name]
for dim_x in n_frames:
if dataset_full is not None:
dataset = dataset_full[:dim_x]
else:
dataset = None
for rank, true_rank in ranks:
for portion in portions:
for method in methods:
np.random.seed(seed)
random.seed(seed)
if method=="ALS":
lambda_ = 0
else:
lambda_ = experiment_config.get("lambda", None)
n = (dim_x, dim_y, dim_z)
n_entries = int(dim_x * dim_y * dim_z * portion)
# Set logger config
wandb_configs = {
"methods": method,
"num_frames": dim_x,
"height": dim_y,
"width": dim_z,
"dataset": dataset_name,
"rank": rank,
"true_rank": true_rank,
"portion": portion,
"n_entries": n_entries,
"lambda": lambda_,
"n_val_entries": n_val_entries,
"n_test_entries": n_test_entries,
"predict_frames": predict_frames,
"noisy": noisy,
"randominit": randominit,
"fix_mu": fix_mu,
"momentum": momentum,
}
group_name = f"Dim-{dim_x}x{dim_y}x{dim_z} dataset-{dataset_name} rank-{rank} portion-{portion}"
if dataset is None:
group_name += f" true_rank-{true_rank}"
logger = wandb.init(project='tensor-completion', entity='tensor-completion', group=group_name, reinit=True)
logger.config.update(wandb_configs)
run_name = "method: {}; randinit:{}; noisy:{}".format(
method, randominit, noisy
)
logger.name = run_name
entries_arr = get_tensor_entries(dataset, size=n_entries)
val_entries = get_tensor_entries(dataset, size=n_val_entries)
test_entries = get_tensor_entries(dataset, size=n_test_entries)
solver = config.solvers[method]
#init data
solver = solver(
n=n, rank=rank, n_entries=n_entries,
entries_arr=entries_arr, noisy=noisy,
randominit=randominit, true_rank=true_rank
)
if dataset is None:
val_entries = solver.get_entries(n_val_entries)
test_entries = solver.get_entries(n_test_entries)
solution = solver.fit(
test_entries=test_entries,
val_entries=val_entries,
logger=logger, max_iter=max_iter,
lam=lambda_, fix_mu=fix_mu,
momentum=momentum
)
pred = solver.predict(solution, predict_frames)
images = []
for image, idx_frame in zip(pred, predict_frames):
images.append(
wandb.Image(
image, caption=f"Frame #{idx_frame}; method: {method}; rank: {rank}; portion:{portion};"))
logger.log({"Visualize prediction:": images})
logger.finish()
if __name__=="__main__":
fire.Fire(main)
```
#### File: 30bohdan/tencompl/utils.py
```python
import os, sys, time
import random, functools, itertools
import cv2
import numpy as np
import matplotlib.pyplot as plt
def read_yuv2rgb(height, width, n_frames, file_name, file_dir=""):
file_path = os.path.join(file_dir, file_name)
yuv_data = np.fromfile(file_path, dtype='uint8')
yuv_data = yuv_data.reshape((n_frames, height*3//2, width))
rgb_data = np.empty((n_frames, height, width, 3), dtype=np.uint8)
for i in range(n_frames):
yuv_frame = yuv_data[i]
rgb_frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2RGB_I420)
rgb_data[i] = rgb_frame
return rgb_data
def read_yuv2gray(height, width, n_frames, file_name, file_dir=""):
file_path = os.path.join(file_dir, file_name)
yuv_data = np.fromfile(file_path, dtype='uint8')
yuv_data = yuv_data.reshape((n_frames, height*3//2, width))
gray_data = np.empty((n_frames, height, width), dtype=np.float)
for i in range(n_frames):
yuv_frame = yuv_data[i]
gray_frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2GRAY_I420)
gray_data[i] = gray_frame
return gray_data
def elapsed(last_time=[time.time()]):
""" Returns the time passed since elapsed() was last called. """
current_time = time.time()
diff = current_time - last_time[0]
last_time[0] = current_time
return diff
def get_tensor_entries(tensor, size, seed=None):
if tensor is None: return None
if seed is not None:
np.random.seed(seed)
random.seed(seed)
nx, ny, nz = tensor.shape
samples = np.random.choice(nx*ny*nz, size, replace=False)
x_coords = samples%nx
y_coords = ((samples - x_coords) // nx) % ny
z_coords = ((samples - nx*y_coords - x_coords) // (nx*ny)) % nz
val = tensor[x_coords, y_coords, z_coords]
return np.vstack((x_coords, y_coords, z_coords, val))
def sample_triples(size, nx, ny, nz):
samples = np.random.choice(nx*ny*nz, size, replace=False)
x_coords = samples%nx
y_coords = ((samples - x_coords) // nx) % ny
z_coords = ((samples - nx*y_coords - x_coords) // (nx*ny)) % nz
return (x_coords, y_coords, z_coords)
def normalize(v):
v_norm = v / np.linalg.norm(v)
return v_norm
def orthonormalize(v, inplace=False):
m = len(v)
n = len(v[0])
if not inplace:
v_new = np.copy(v)
else:
v_new = v
for i in range(m):
for j in range(i):
v_new[i] = v_new[i] - np.dot(v_new[i], v_new[j])*v_new[j]
v_new[i] = normalize(v_new[i])
return v_new
def compute_rse(pred, target, entries=None):
if entries is not None:
new_pred = pred[entries[0].astype(np.int), entries[1].astype(np.int), entries[2].astype(np.int)]
new_target = target[entries[0].astype(np.int), entries[1].astype(np.int), entries[2].astype(np.int)]
else:
new_pred = pred
new_target = target
error = np.linalg.norm(new_pred-new_target) / np.linalg.norm(new_target)
return error
class Logger():
def __init__(self):
self.logs = {}
def logs(self, log_dict, step=None):
for key, value in log_dict:
if key not in self.logs:
self.logs[key]['x'] = []
self.logs[key]['y'] = []
self.logs[key]['y'].append(value)
if step is not None:
idx = step
else:
idx = self.logs[key]['x'][-1] + 1
self.logs[key]['x'].append(idx)
def reset(self, log_name):
self.logs[log_name]['x'] = []
self.logs[log_name]['y'] = []
def plot(self, log_name, title=None, xlabel=None, ylabel=None):
title = title or ''
xlabel = xlabel or 'step'
ylabel = ylabel or log_name
plt.title(log_name)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
y = self.logs[log_name]['y']
x = self.logs[log_name]['x']
plt.plot(x, y)
plt.show()
``` |
{
"source": "30emeciel/auth0-firebase-token-exchange",
"score": 3
} |
#### File: 30emeciel/auth0-firebase-token-exchange/main.py
```python
import os
import requests
from box import Box
from core import firestore_client
from firebase_admin import auth
from google.api_core.exceptions import NotFound
from google.cloud.firestore_v1 import SERVER_TIMESTAMP
ERROR_REPORTING_API_KEY = os.environ["ERROR_REPORTING_API_KEY"]
db = firestore_client.db()
def from_request(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
# Set CORS headers for preflight requests
if request.method == 'OPTIONS':
# Allows GET requests from any origin with
# Authorization header
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'POST',
'Access-Control-Allow-Headers': 'content-type',
'Access-Control-Max-Age': '3600',
}
return '', 204, headers
from flask import abort
if request.method != 'POST':
return abort(405)
request_data = Box(request.get_json())
access_token = request_data.access_token
firebase_token = convert_auth0_token_to_firebase_token(access_token)
headers = {
'Access-Control-Allow-Origin': '*',
}
return ({
"firebase_token": firebase_token,
"error_reporting_api_key": ERROR_REPORTING_API_KEY,
}, 200, headers)
def convert_auth0_token_to_firebase_token(auth0_token):
# valid auth0_token and get user_profile
up = get_user_profile(auth0_token)
assert up.sub is not None
pax_id = up.pop("sub")
upset_user_profile_in_firestore(pax_id, up)
return create_firebase_token(pax_id)
def upset_user_profile_in_firestore(pax_id, user_profile):
user_profile_dict = user_profile.to_dict()
user_doc_ref = db.collection("pax").document(pax_id)
try:
user_doc_ref.update(user_profile_dict)
except NotFound:
user_profile_dict.update({
"created": SERVER_TIMESTAMP,
"state": "AUTHENTICATED",
})
user_doc_ref.set(user_profile_dict, merge=True)
def create_firebase_token(pax_id):
custom_token = auth.create_custom_token(pax_id)
return str(custom_token, "utf-8")
def get_user_profile(token):
headers = {
"Authorization": f"Bearer {token}"
}
req = requests.get("https://paxid.eu.auth0.com/userinfo", headers=headers)
req.raise_for_status()
resp = req.json()
obj = Box(resp)
if "email" in obj and obj.name == obj.email: # auth0 weird name
obj.name = obj.get("nickname") or obj.email
return obj
``` |
{
"source": "30emeciel/freshdesk-token-exchange",
"score": 3
} |
#### File: 30emeciel/freshdesk-token-exchange/main.py
```python
from datetime import datetime, timedelta
from os import environ
import jwt
import requests
from dotmap import DotMap
FRESHDESK_SHARED_SECRET_KEY = environ["FRESHDESK_SHARED_SECRET_KEY"]
def from_request(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
# Set CORS headers for preflight requests
if request.method == 'OPTIONS':
# Allows GET requests from any origin with
# Authorization header
headers = {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Methods': 'POST',
'Access-Control-Allow-Headers': 'content-type',
'Access-Control-Max-Age': '3600',
}
return '', 204, headers
from flask import abort
if request.method != 'POST':
return abort(405)
request_data = DotMap(request.get_json())
access_token = request_data.access_token
freshdesk_token = convert_auth0_token_to_freshdesk_token(access_token)
headers = {
'Access-Control-Allow-Origin': '*',
}
return ({
"freshdesk_token": freshdesk_token
}, 200, headers)
def convert_auth0_token_to_freshdesk_token(auth0_token):
# valid auth0_token and get user_profile
up = get_user_profile(auth0_token)
return get_freshdesk_token(up.name, up.email)
def get_freshdesk_token(name, email):
payload = {
"name": name,
"email": email,
"exp": int((datetime.now() + timedelta(hours=2)).timestamp())
}
token = jwt.encode(payload, FRESHDESK_SHARED_SECRET_KEY, algorithm='HS256')
return token
def get_user_profile(token):
headers = {
"Authorization": f"Bearer {token}"
}
req = requests.get("https://paxid.eu.auth0.com/userinfo", headers=headers)
req.raise_for_status()
resp = req.json()
return DotMap(resp)
``` |
{
"source": "30emeciel/py-push-preregistration-completed",
"score": 2
} |
#### File: 30emeciel/py-push-preregistration-completed/main.py
```python
import logging
from box import Box
from core import firestore_client
from core.slack_message import SlackSender
from core.tpl import render
log = logging.getLogger(__name__)
db = firestore_client.db()
slack_sender = SlackSender()
def from_firestore(event, context):
"""Triggered by a change to a Firestore document.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
resource_string = context.resource
# print out the resource string that triggered the function
log.debug(f"Function triggered by change to: {resource_string}.")
# now print out the entire event object
# print(str(data))
push_new_account_to_slack(resource_string, Box(event))
def push_new_account_to_slack(docpath, event):
if "state" not in event.updateMask.fieldPaths:
log.info("state hasn't changed, ignoring")
return
pax_ref = db.document(docpath)
pax_doc = pax_ref.get()
assert pax_doc.exists
pax = Box(pax_doc.to_dict())
if pax.state != "REGISTERED":
log.info(f"pax {pax_ref.id} has 'state' set to REGISTERED, ignoring")
return
data = {
"pax": pax
}
slack_message = render("preregistration_completed_fr.txt", data)
slack_sender.send_slack_message(slack_message)
``` |
{
"source": "30emeciel/trigger-on-update-pax",
"score": 2
} |
#### File: 30emeciel/trigger-on-update-pax/main.py
```python
import logging
from box import Box
from core import firestore_client
from core.mailer import Mailer
from core.rst_to_html import to_html
from core.tpl import render
log = logging.getLogger(__name__)
db = firestore_client.db()
mailer = Mailer()
def from_firestore(event, context):
"""Triggered by a change to a Firestore document.
Args:
event (dict): Event payload.
context (google.cloud.functions.Context): Metadata for the event.
"""
resource_string = context.resource
# print out the resource string that triggered the function
log.info(f"Function triggered by change to: {resource_string}.")
# now print out the entire event object
log.debug(str(event))
trigger_on_update_pax(resource_string, Box(event))
def trigger_on_update_pax(doc_path, event):
if "state" not in event.updateMask.fieldPaths:
log.info("state hasn't changed, ignoring")
return
pax_ref = db.document(doc_path)
pax_doc = pax_ref.get()
assert pax_doc.exists
pax = Box(pax_doc.to_dict())
if pax.state != "CONFIRMED":
log.info(f"pax 'state' != CONFIRMED, ignoring")
return
if "email" not in pax:
log.warning(f"No email field. ignoring pax with id {pax_ref.id} name={pax.name}...")
return
data = Box({
"pax": pax
})
html = to_html(render("confirmed_pax_fr.rst", data))
title = render("confirmed_pax_title_fr.txt", data)
mailer.send_mail(pax.name, pax.email, title, html)
``` |
{
"source": "30emeciel/trigger-on-update-reservation-request",
"score": 2
} |
#### File: 30emeciel/trigger-on-update-reservation-request/test_main.py
```python
import dotenv
import pytest
from box import Box
@pytest.fixture(autouse=True)
def env():
dotenv.load_dotenv()
def test_trigger_on_update_reservation_request(when):
resource = "pax/google-oauth2|107336710838050909583/requests/KHQpVVu1H7Hvn3jLwX01"
event = {
"oldValue": {
"createTime": "2021-03-05T19:54:37.991311Z",
"fields": {
"arrival_date": {"timestampValue": "2021-03-15T23:00:00Z"},
"created": {"timestampValue": "2021-03-05T19:54:37.978Z"},
"departure_date": {"timestampValue": "2021-03-19T23:00:00Z"},
"kind": {"stringValue": "COLIVING"},
"number_of_nights": {"integerValue": "4"},
"state": {"stringValue": "PENDING_REVIEW"}
},
"name": f"projects/trentiemeciel/databases/(default)/documents/{resource}",
"updateTime": "2021-03-05T21:56:55.248879Z"
},
"updateMask": {"fieldPaths": ["state"]},
"value": {
"createTime": "2021-03-05T19:54:37.991311Z",
"fields": {
"arrival_date": {"timestampValue": "2021-03-15T23:00:00Z"},
"created": {"timestampValue": "2021-03-05T19:54:37.978Z"},
"departure_date": {"timestampValue": "2021-03-19T23:00:00Z"},
"kind": {"stringValue": "COLIVING"},
"number_of_nights": {"integerValue": "4"},
"state": {"stringValue": "CANCELED"}
},
"name": f"projects/trentiemeciel/databases/(default)/documents/{resource}",
"updateTime": "2021-03-05T21:56:55.248879Z"
}
}
# from core.firestore_client import db
# sub_sub_ref_mock = mock({"get": lambda: {}}, spec=DocumentReference)
# sub_ref_mock = mock({"parent": sub_sub_ref_mock}, spec=DocumentReference)
# ref_mock = mock({"parent": sub_ref_mock}, spec=DocumentReference)
# when(db).document(resource).thenReturn(ref_mock)
# pax_doc_mock = mock(spec=DocumentSnapshot)
# request_doc_mock = mock(spec=DocumentSnapshot)
# when(sub_sub_ref_mock).get().thenReturn(pax_doc_mock)
# when(ref_mock).get().thenReturn(request_doc_mock)
# when(pax_doc_mock).exists().thenReturn(True)
# when(request_doc_mock).exists().thenReturn(True)
from main import trigger_on_update_reservation_request
trigger_on_update_reservation_request(resource, Box(event))
``` |
{
"source": "30hours/rgbee",
"score": 3
} |
#### File: rgbee/script/static-fade.py
```python
import sys
import time
import pigpio
def main(color, brightness = 1):
pi = pigpio.pi()
# get initial rgb
initial_red = pi.get_PWM_dutycycle(17)
initial_green = pi.get_PWM_dutycycle(27)
initial_blue = pi.get_PWM_dutycycle(22)
rgb = hex_to_rgb(color)
red = rgb[0]
green = rgb[1]
blue = rgb[2]
red = red * brightness
green = green * brightness
blue = blue * brightness
diff_red = red - initial_red
diff_green = green - initial_green
diff_blue = blue - initial_blue
for factor in range(0, 101):
pi.set_PWM_dutycycle(17, initial_red + diff_red*factor/100)
pi.set_PWM_dutycycle(27, initial_green + diff_green*factor/100)
pi.set_PWM_dutycycle(22, initial_blue + diff_blue*factor/100)
time.sleep(0.002)
def hex_to_rgb(hex_string):
r_hex = hex_string[0:2]
g_hex = hex_string[2:4]
b_hex = hex_string[4:6]
return int(r_hex, 16), int(g_hex, 16), int(b_hex, 16)
if len(sys.argv) == 2:
color = sys.argv[1]
main(color)
if len(sys.argv) == 3:
color = sys.argv[1]
brightness = float(sys.argv[2])
main(color, brightness/100)
```
#### File: rgbee/script/sweep_blue.py
```python
import sys
import time
import pigpio
import math
def main(brightness = 1):
pi = pigpio.pi()
# get initial rgb
initial_red = pi.get_PWM_dutycycle(17)
initial_green = pi.get_PWM_dutycycle(27)
initial_blue = pi.get_PWM_dutycycle(22)
fade = 0.5
# fade to start
start_blue = brightness*255
start_red = brightness*fade*(255/2)*(1+math.sin(math.radians(0)))
start_green = brightness*fade*(255/2)*(1+math.sin(math.radians(180)))
diff_red = start_red - initial_red
diff_green = start_green - initial_green
diff_blue = start_blue - initial_blue
for factor in range(0, 101):
pi.set_PWM_dutycycle(17, initial_red + diff_red*factor/100)
pi.set_PWM_dutycycle(27, initial_green + diff_green*factor/100)
pi.set_PWM_dutycycle(22, initial_blue + diff_blue*factor/100)
time.sleep(0.01)
while True:
nMax = 1024
nMin = 0
for i in range(nMin, nMax):
arg = 2*math.pi*i/nMax
blue = 255
red = int(fade*(255/2)*(1+math.sin(math.radians(0)+arg)))
green = int(fade*(255/2)*(1+math.sin(math.radians(180)+arg)))
pi.set_PWM_dutycycle(17, brightness*red)
pi.set_PWM_dutycycle(27, brightness*green)
pi.set_PWM_dutycycle(22, brightness*blue)
time.sleep(0.05)
if len(sys.argv) == 1:
main()
if len(sys.argv) == 2:
brightness = float(sys.argv[1])
main(brightness/100)
``` |
{
"source": "30ideas-Software-Factory/readIT",
"score": 3
} |
#### File: readIT/models/baseModel.py
```python
import models
from uuid import uuid4
from sqlalchemy import Column, String
from sqlalchemy.ext.declarative import declarative_base
# import engine
# from engine.dbStorage import DBStorage
Base = declarative_base()
class BaseModel:
"""Class that defines all common attributes/methods
for other classes will inherit"""
def __str__(self):
"""String representation of the BaseModel class"""
Id = 'Id' + self.__class__.__name__
return "[{:s}] ({:s}) {}".format(self.__class__.__name__,
eval('self.{}'.format(Id)),
self.__dict__)
def to_dict(self):
"""Returns a dictionary containing all keys/values of the instance"""
new_dict = self.__dict__.copy()
new_dict["Class"] = self.__class__.__name__
return new_dict
```
#### File: tests/test_models/test_users.py
```python
from models.users import User
from models.baseModel import BaseModel
import unittest
class TestUser(unittest.TestCase):
"""Test the User class"""
def test_is_subclass(self):
"""Test that User is a subclass of BaseModel"""
user = User()
self.assertIsInstance(user, BaseModel)
def test_init_(self):
user = User()
self.assertTrue(hasattr(user, "IdUser"))
self.assertTrue(hasattr(user, "FirstName"))
self.assertTrue(hasattr(user, "LastName"))
self.assertTrue(hasattr(user, "Phone"))
self.assertTrue(hasattr(user, "Mail"))
self.assertTrue(hasattr(user, "Password"))
self.assertTrue(hasattr(user, "City"))
def test_to_dict_values(self):
"""test that values in dict returned from to_dict are correct"""
eva = {'FirstName': 'Eva',
'LastName': 'DaughterOfGod',
'Mail': '<EMAIL>',
'Password': '<PASSWORD>',
'Phone': '59899101010',
'City': 'Paradise'}
u = User(**eva)
new_d = u.to_dict()
self.assertEqual(new_d["Class"], "User")
self.assertEqual(type(new_d["IdUser"]), str)
self.assertEqual(type(new_d["FirstName"]), str)
self.assertEqual(type(new_d["LastName"]), str)
self.assertEqual(type(new_d["Phone"]), str)
self.assertEqual(type(new_d["Mail"]), str)
self.assertEqual(type(new_d["Password"]), str)
self.assertEqual(type(new_d["City"]), str)
def test_str(self):
"""test that the str method has the correct output"""
user = User()
string = "[User] ({}) {}".format(user.IdUser, user.__dict__)
self.assertEqual(string, str(user))
``` |
{
"source": "30mb1/conference-scrapper",
"score": 2
} |
#### File: conference_scrapper/conference/admin.py
```python
from django.contrib import admin
from conference_scrapper.conference.models import Conference, ConferenceGraphEdge
from django.contrib.postgres import fields
from django_json_widget.widgets import JSONEditorWidget
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.db.models import Subquery, OuterRef
def link(link, inner_text, target_blank=True):
target = 'target="_blank"' if target_blank else ''
return format_html(f'<a href="{link}" {target}>{inner_text}</a>')
@admin.register(Conference)
class ConferenceAdmin(admin.ModelAdmin):
list_display = ['id', 'title', 'url', 'key_words']
search_fields = ['title', 'url', 'slug', 'source']
formfield_overrides = {
fields.JSONField: {'widget': JSONEditorWidget},
}
@admin.register(ConferenceGraphEdge)
class ConferenceEdgeAdmin(admin.ModelAdmin):
list_display = ['id', 'get_conf_1', 'get_conf_2', 'matches']
search_fields = ['conf_1', 'conf_2', 'matches_len', 'matches', 'source']
def get_queryset(self, request):
qs = super().get_queryset(request)
subqs1 = Conference.objects.filter(slug=OuterRef('conf_1')).values('id')
subqs2 = Conference.objects.filter(slug=OuterRef('conf_2')).values('id')
qs = qs.annotate(
conf_1_id=Subquery(subqs1[:1]),
conf_2_id=Subquery(subqs2[:1])
)
return qs
def get_conf_1(self, obj):
link_str = link(reverse('admin:conference_conference_change', args=(obj.conf_1_id,)), obj.conf_1)
return mark_safe(link_str)
def get_conf_2(self, obj):
link_str = link(reverse('admin:conference_conference_change', args=(obj.conf_2_id,)), obj.conf_2)
return mark_safe(link_str)
get_conf_1.short_description = 'Conference 1'
get_conf_2.short_description = 'Conference 2'
```
#### File: conference_scrapper/conference/filters.py
```python
from django_filters import CharFilter, FilterSet, NumberFilter
from datetime import datetime
from conference_scrapper.conference.models import Conference
class ConferenceFilter(FilterSet):
query = CharFilter(method='filter_query')
date = NumberFilter(method='filter_date')
source = CharFilter(method='filter_source')
class Meta:
model = Conference
fields = ['source', 'date', 'query']
def filter_query(self, queryset, name, value):
filter_type = self.data.get('filter_type', 'tags')
if filter_type == 'tags':
return self.filter_tags(queryset, name, value)
elif filter_type == 'title':
return self.filter_title(queryset, name, value)
elif filter_type == 'url':
return self.filter_url(queryset, name, value)
else:
return queryset
def filter_date(self, queryset, name, value):
from_date = datetime(year=value, month=1, day=1)
to_date = datetime(year=value, month=12, day=31)
queryset = queryset.filter(start_date__gte=from_date, end_date__lte=to_date)
return queryset
def filter_title(self, queryset, name, value):
queryset = queryset.filter(title__iexact=value)
return queryset
def filter_tags(self, queryset, name, value):
tags = [x.strip() for x in value.split(',')]
use_all_tags = self.data.get('use_all_tags', 'true') == 'true'
if use_all_tags:
queryset = queryset.filter(key_words__contains=tags)
else:
queryset = queryset.filter(key_words__overlap=tags)
return queryset
def filter_url(self, queryset, name, value):
queryset = queryset.filter(url=value)
return queryset
def filter_source(self, queryset, name, value):
if value == 'all':
return queryset
else:
return queryset.filter(source=value)
``` |
{
"source": "30Meridian/RozumneMistoSnapshot",
"score": 2
} |
#### File: RozumneMistoSnapshot/digest/helpers.py
```python
import hashlib
from datetime import datetime
from django.core.mail import send_mail
from django.template.loader import render_to_string
from weunion.settings import DEFAULT_FROM_EMAIL
from weunion.views import live_stream_list
from weunion.models import User
from .models import UserSubscriptions
class DigestMail(object):
local_life_stream_cache = {}
def send(self, recipients=None):
"""
Sending letters to all in dispatch list
:param recipients: queryset of users for whom will be sent a letter
:return: None
"""
if recipients is None:
recipients = User.objects
day = datetime.now().date()
for user in recipients.all():
try:
user_subscription = UserSubscriptions.objects.get(user=user)
filter_list = user_subscription.filter_list
except UserSubscriptions.DoesNotExist:
filter_list = {'News', 'Petitions', 'Polls', 'Defects'}
if len(filter_list) == 0:
continue
context = self.get_context(filter_list)
context['unsubscribe'] = str(user.id) + '?token=' + hashlib.md5(user.email.encode()).hexdigest()
if len(context['value']) > 1:
send_mail(
'Дайджест подій на ' + str(day),
render_to_string('emails/digest.email', context),
DEFAULT_FROM_EMAIL,
[user.email],
html_message=render_to_string('emails/digest.email', context),
)
self.local_life_stream_cache.clear()
def get_context(self, filter_list):
filter_list_like_string = str(set(filter_list))
if filter_list_like_string in self.local_life_stream_cache:
return self.local_life_stream_cache[filter_list_like_string]
else:
context = live_stream_list(datetime.now(), filter_list, 7)
self.local_life_stream_cache[filter_list_like_string] = context
return context
```
#### File: RozumneMistoSnapshot/mvs_wanted/models.py
```python
from weunion.models import User, Town
import uuid
from stdimage.models import StdImageField
from django.db import models
#генерируем псевдоуникальный файлнейм для загружаемых изображений
def get_file_path(instance, filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return 'mvs_wanted/%s' % filename
class MvsWanted(models.Model):
name = models.CharField(max_length=255)
birth_date = models.DateField()
image = StdImageField(blank=True, upload_to=get_file_path, variations={
'large': (600, 400),
'thumbnail': {"width": 100, "height": 100, "crop": True}
})
text = models.CharField(max_length=2000)
category = models.ForeignKey('MvsWantedCategories', db_column='category_id')
create_date = models.DateTimeField(auto_now_add=True)
owner_user = models.ForeignKey(User, db_column='owner_user')
town = models.ForeignKey(Town, db_column='town')
class Meta:
managed = False
db_table = 'mvs_wanted'
class MvsWantedCategories(models.Model):
category = models.CharField(max_length=50)
class Meta:
managed = False
db_table = 'mvs_wanted_categories'
```
#### File: RozumneMistoSnapshot/news/views.py
```python
import base64
from datetime import datetime
from django.shortcuts import render, redirect, get_object_or_404
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.views.generic.edit import FormView
from django.db.models import Q
from django.core.mail import send_mail, EmailMessage
from pure_pagination import Paginator, EmptyPage, PageNotAnInteger
from allauth.account.decorators import verified_email_required
from weunion.settings import DEFAULT_FROM_EMAIL
from weunion.models import Town
from .models import News
from .forms import NewsAdd, SuggestNews
# Добавить новость
@verified_email_required
def add(request, townslug):
if (request.user.is_authenticated() and request.user.is_active):
if request.user.isAllowedToModerate(request.session["town"], 'News'):
if request.method == 'POST':
town = Town.objects.get(pk=request.session["town"])
form = NewsAdd(request.POST or None, request.FILES)
# fill a model from a POST
if form.is_valid():
instance = form.save(commit=False)
instance.town = town
instance.mainimg = form.cleaned_data['mainimg']
instance.author = request.user
instance.publish = 0
instance.save()
return redirect('../news/%s' % instance.id)
else:
return render(request, 'add_news.html', {'form': form})
else:
form = NewsAdd()
return render(request, 'add_news.html', {'form': form})
else:
raise PermissionDenied("Доступ заборонено")
else:
raise PermissionDenied("Доступ заборонено")
# Отобразить список новостей
def list(request, townslug):
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
if 'town' in request.session:
if request.user.is_authenticated() and request.user.is_active and request.user.isAllowedToModerate(
request.session["town"], 'News'):
allowed = True
news_list = News.objects.filter(town=request.session["town"]).all().order_by('-datetime_publish', 'publish')
else:
allowed = False
news_list = News.objects.filter(town=request.session["town"], publish=1).all().order_by('-datetime_publish')
p = Paginator(news_list, 10, request=request)
articles = p.page(page)
return render(request, 'articles_list.html', {'articles': articles, 'allowed': allowed, 'townslug': townslug})
else:
return redirect(reverse('regions'))
# Отобразить новость
def article(request, id, townslug):
article = get_object_or_404(News, id=id)
if not (request.session.has_key('town')):
request.session['town'] = article.town.id
request.session['town_name'] = article.town.name
allowed = False
if request.user.is_authenticated() and request.user.is_active:
allowed = request.user.isAllowedToModerate(article.town.id, 'News')
if (article.publish == False and allowed == False):
raise PermissionDenied("Доступ заборонено")
return render(request, 'article.html', {'article': article, 'allowed': allowed, })
# Удалить новость
def delete(request, id, townslug):
if request.user.is_authenticated() and request.user.is_active and request.user.isAllowedToModerate(
request.session["town"], 'News'):
news = get_object_or_404(News, pk=id)
news.delete()
return redirect('../../news')
# Выставить статус опубликована
def publish(request, id, townslug):
if request.user.is_authenticated() and request.user.is_active and request.user.isAllowedToModerate(
request.session["town"], 'News'):
news = get_object_or_404(News, pk=id)
news.publish = 1
if not (news.datetime_publish):
news.datetime_publish = datetime.now()
news.save()
return redirect('../../news/' + id)
# Выставить статус снята с опубликации
def unpublish(request, id, townslug):
if request.user.is_authenticated() and request.user.is_active and request.user.isAllowedToModerate(
request.session["town"], 'News'):
news = get_object_or_404(News, pk=id)
news.publish = 0
news.save()
return redirect('../../news/' + id)
# Редактировать новость
def edit(request, id, townslug):
if (request.user.is_authenticated() and request.user.is_active):
if request.user.isAllowedToModerate(request.session["town"], 'News'):
news = get_object_or_404(News, pk=id)
form = NewsAdd(instance=news)
if request.method == 'POST':
form = NewsAdd(request.POST, request.FILES, instance=news)
if form.is_valid():
instance = form.save(commit=False)
instance.mainimg = form.cleaned_data['mainimg']
instance.author = request.user
instance.publish = 0
instance.save()
return redirect('../../news/%s' % instance.id)
else:
return render(request, 'edit_news.html', {'form': form, 'id': id})
else:
return render(request, 'edit_news.html', {'form': form, 'id': id})
else:
raise PermissionDenied("Доступ заборонено")
else:
raise PermissionDenied("Доступ заборонено")
class SuggestNewsView(FormView):
form_class = SuggestNews
template_name = 'suggest_news.html'
def get_success_url(self):
reverse('news:list', kwargs={'townslug': self.kwargs.get('townslug')})
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
files = request.FILES.getlist('file_field')
if form.is_valid():
recipients = []
town = Town.objects.filter(slug=self.kwargs.get('townslug')).first()
users = town.user_set.filter(
Q(groups__name='Moderator_news') | Q(groups__name='Moderator') | Q(groups__name='Control_news'))
for user in users:
recipients.append(user.email)
subject = 'Запропонована новина від {0} {1}'.format(request.user.first_name, request.user.last_name)
text = form.cleaned_data['text'] + '\nEmail: ' + str(request.user.email)
mail = EmailMessage(subject, text, DEFAULT_FROM_EMAIL, recipients)
for file in files:
mail.attach(file.name, file.read(), file.content_type)
mail.send()
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
return render(self.request, 'success_suggestion.html', {'townslug': self.kwargs.get('townslug')})
suggest_news = SuggestNewsView.as_view()
```
#### File: RozumneMistoSnapshot/petitions/helper.py
```python
from .models import *
from django.shortcuts import get_object_or_404, redirect
from ipware.ip import get_ip
from defects.models import Town
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.db.models import Q
from weunion.settings import DEFAULT_FROM_EMAIL
from email.header import Header
class Activity:
'''Клас реализует запись всех процессов происходящих над петицией'''
def add(self,request, petition_id, act_text):
activity = PetitionsActivity()
user = get_object_or_404(User, id =request.user.id)
petition = get_object_or_404(Petitions, id = petition_id)
try:
activity.user = user
activity.petition = petition
activity.activity = act_text
activity.ip = self._ip(request)
activity.save()
return redirect('petitions:petition', arg=(petition_id))
except:
return False
def add_robot(self,request, petition_id, act_text):
activity = PetitionsActivity()
user = get_object_or_404(User, id =request.user.id)
petition = get_object_or_404(Petitions, id = petition_id)
try:
activity.user = user
activity.petition = petition
activity.activity = act_text
activity.ip = self._ip(request)
activity.save()
return True
except:
return False
def _ip(self,request):
'''Получаем IP клиента с request'''
ip = get_ip(request)
if ip is not None:
return ip
else:
return 'local'
class ActivityMail:
'''Отправляем почту на основе темплейтов'''
def sendToModers(request, subject, template_name, town, context,email=None, module='Petitions'):
recipients = []
if(email == None):
recipients = []
users = Town.objects.get(pk=int(town)).user_set.filter(Q(groups__name='Moderator_%s' % module ) | Q(groups__name='Moderator'))
for user in users:
recipients.append(user.email)
else:
recipients = email
template_html = render_to_string('emails/'+template_name, context)
template_plain = render_to_string('emails/'+template_name, context)
send_mail(
subject,
template_plain,
DEFAULT_FROM_EMAIL,
recipients,
html_message=template_html,
)
def get_town_type(town):
text = ''
type = str(town.town_type)
if type == 'місто':
text = 'міської ради м. {}'.format(town)
elif type == 'смт.':
text = 'селищної ради смт. {}'.format(town)
elif type == 'село':
text = 'сільської ради с. {}'.format(town)
elif type == 'район':
text = 'районної ради р. "{}"'.format(town)
return text
def get_recipient_of_petition(town):
petition_to = ''
try:
petition_to = town.additions.get(type='petition_to').body
if not petition_to:
petition_to = get_town_type(town)
except:
petition_to = get_town_type(town)
return petition_to
```
#### File: RozumneMistoSnapshot/petitions/views.py
```python
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from .forms import *
from .models import PetitionsStatuses,Petitions, PetitionsVoices
from defects.models import Town, User
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from .helper import Activity, ActivityMail, get_recipient_of_petition
from pure_pagination import Paginator, EmptyPage, PageNotAnInteger
import datetime
import time
from ipware.ip import get_ip
from weunion.settings import CRON_SECRET, KARMA
from allauth.account.decorators import verified_email_required
from weunion.helper import Karma
#если админ или модер - показываем на главной петиции на модерации
def index(request, townslug):
if(request.user.is_authenticated() and request.user.is_active):
if request.user.isAllowedToModerate(request.session["town"], 'Petitions'):
return redirect('../petitions/moderate')
else:
return redirect('../petitions/status/2')
return redirect('../petitions/status/2')
#добавляем петицию (показываем форму)
@verified_email_required
def add(request, townslug):
petition_to = get_recipient_of_petition(Town.objects.get(id=request.user.towns.all()[0].id))
if(request.user.is_authenticated() and request.user.is_active):
if request.method == 'POST':
form = PetitionAdd(request.POST, request.FILES)
#fill a model from a POST
if form.is_valid():
user = request.user
instance = form.save(commit=False)
instance.owner_user = user
instance.town = Town.objects.get(id=request.user.towns.all()[0].id)
instance.status = PetitionsStatuses.objects.get(pk=1)
instance.image = form.cleaned_data['image']
instance.save()
Activity().add_robot(request, instance.id, 'Петиція створена')
ActivityMail.sendToModers(request,'Нова петиція на модерацію', 'petition_new.email', instance.town.id, {'petition_id':instance.id, 'title': instance.title,'townslug':townslug})
ActivityMail.sendToModers(request,'Ваша петиція направлена на модерацію', 'petition_change_status.email', instance.town.id, {'petition_id':instance.id, 'title': instance.title, 'status':'На модерації','townslug':townslug}, [instance.owner_user.email] )
return render(request,'thank_you.html')
else:
return render(request, 'add_petition.html', {'form': form, 'petition_to': petition_to})
else:
form = PetitionAdd()
return render(request, 'add_petition.html', {'form': form, 'petition_to': petition_to})
else:
redirect('accounts/signup')
#проверяем кроном просроченые петиции и переносим в архивные
def checktimeout(request, secret, townslug):
if(secret == CRON_SECRET):
petitions = Petitions.objects.filter(status=2)
count = 0
status = PetitionsStatuses.objects.get(pk=5)
for petition in petitions:
need_votes = petition.town.votes
pet_days = petition.town.pet_days
date_start = petition.when_approve
days_end = date_start + datetime.timedelta(days=pet_days)
day_now = datetime.datetime.now()
days_left = days_end - day_now
if(days_left.days < 0):
petition.status= status # делаем архивной если у петиции кончилось время сбора подписей и она не набрала нужного количества голосов
petition.save()
Activity().add(request, petition.id, 'Автоматична зміна статусу на "Архівна"')
ActivityMail.sendToModers(request,'Петиція НЕ набрала необхідну кількість голосів і переміщена в архів.', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status':'Архівна','townslug':townslug})
ActivityMail.sendToModers(request,'Ваша петиція НЕ набрала необхідну кількість голосів і переміщена в архів', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status':'Архівна','townslug':townslug}, [petition.owner_user.email])
count +=1
if(count):
return HttpResponse('Done! Find: '+str(count)+' petition(-s)')
else:
return HttpResponse("Not found any petitions that mutch enddate!")
else:
raise PermissionDenied('Досуп заборонено.')
#Отображаем петицию
def petition(request, petition_id, townslug):
petition = get_object_or_404(Petitions,id=petition_id)
town = Town.objects.get(id=petition.town.id)
petition_to = get_recipient_of_petition(town)
need_votes = town.votes
pet_days = town.pet_days
petition_number = town.pet_number_templ % (petition_id)
allowed = False
activities = None
if not(request.session.has_key('town')):
request.session['town'] = petition.town.id
request.session['town_name'] = petition.town.name
if request.user.is_authenticated():
allowed = request.user.isAllowedToModerate(petition.town.id, 'Petitions')
else:
allowed = False
if(petition.status.id == 1 and not allowed):
raise PermissionDenied("Перегляд петицій, що знаходяться на модерації не дозволено!")
if(allowed):
activities = petition.petitionsactivity_set.all().order_by("-id")
form_chagestatus = PetitionChangeStatus()
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
vpage = 1
p = Paginator(PetitionsVoices.objects.filter(petition=petition_id).exclude(block=1).all(),9, request=request)
votes = p.page(page)
votes_count = PetitionsVoices.objects.filter(petition=petition_id).exclude(block=1).all().count()
fullname = get_object_or_404(User,id = petition.owner_user.id).get_full_name_atall()
end_date = False
if(petition.status.id == 5):
date_start = petition.when_approve
end_date = date_start + datetime.timedelta(days=pet_days)
if(petition.status.id == 2):
can_vote = False
if (request.user.is_authenticated() and request.user.is_active) and \
petition.town in request.user.towns.all():
can_vote = True
date_start = petition.when_approve
days_end = date_start + datetime.timedelta(days=pet_days+1)
day_now = datetime.datetime.now()
days_left = days_end - day_now
return render(request, 'petition.html',{
'petition': petition,
'days_left': days_left.days,
'pet_days':pet_days,
'needvotes':need_votes,
'petition_number': petition_number,
'fullname': fullname,
'votes': votes,
'votes_count': votes_count,
'allowed': allowed,
'form_chagestatus': form_chagestatus,
'getusersign': _getusersign(request, petition_id),
'activities': activities,
'end_date': end_date,
'can_vote': can_vote,
'petition_to': petition_to,
})
else:
return render(request, 'petition.html',{
'petition': petition,
'pet_days':pet_days,
'needvotes':need_votes,
'fullname': fullname,
'petition_number': petition_number,
'allowed': allowed,
'form_chagestatus': form_chagestatus,
'votes': votes,
'votes_count': votes_count,
'getusersign': _getusersign(request,petition_id),
'activities':activities,
'end_date':end_date,
'petition_to': petition_to,
})
# Показываем список петиций
def list(request, status, townslug):
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
if 'town' in request.session:
p = Paginator(Petitions.objects.filter(town = request.session["town"], status = status).order_by("-id"), 25, request=request)
petitions = p.page(page)
if status == '2':
return render(request, 'petitions_list_2.html',{'petitions': petitions})
elif status == '3':
return render(request, 'petitions_list_3.html',{'petitions': petitions})
elif status == '4':
return render(request, 'petitions_list_4.html',{'petitions': petitions})
elif status == '5':
return render(request, 'petitions_list_5.html',{'petitions': petitions})
elif status == '6':
return render(request, 'petitions_list_6.html',{'petitions': petitions})
elif status == '8':
return render(request, 'petitions_list_8.html',{'petitions': petitions})
else:
return redirect(reverse('regions'))
# Показываем список петиций
def moderate(request, townslug):
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
if 'town' in request.session:
petitions_moder = Petitions.objects.filter(town = request.session["town"]).exclude(status__in=[2, 3, 4, 5, 6, 7]).all().order_by('status')
petitions_hidden = Petitions.objects.filter(town = request.session["town"]).exclude(status__in=[1, 2, 3, 4, 5, 6, 8]).all().order_by('status')
if(request.user.is_authenticated() and request.user.is_active):
if request.user.isAllowedToModerate(request.session["town"], 'Petitions'):
return render(request, 'moderate.html',{'petitions_moder': petitions_moder, 'petitions_hidden': petitions_hidden})
else:
raise PermissionDenied('Доступ заборонено')
else:
raise PermissionDenied('Доступ заборонено')
else:
return redirect(reverse('regions'))
#Правила петиций
def rules(request, townslug):
return render(request, 'rules.html')
#Помощь
def help(request, townslug):
return render(request, 'help_pet.html')
#отдать cвой голос
@verified_email_required
def vote(request, petition_id, townslug):
if(request.user.is_authenticated() and request.user.is_active):
petition = get_object_or_404(Petitions, id=petition_id)
if petition.town in request.user.towns.all():
vote = PetitionsVoices()
vote.petition_id = petition_id
vote.user = request.user
vote.ip = get_ip(request)
vote.save()
Karma.add(request.user,KARMA['PETITION_VOTE'],"Голосування за петицію", "Петиції")
votes_count = PetitionsVoices.objects.filter(petition=petition_id).exclude(block=1).all().count()
if (votes_count >= petition.town.votes):
status = PetitionsStatuses.objects.get(pk=8)
petition.status = status
petition.save()
Activity().add(request, petition_id, 'Автоматична зміна статусу на "На перевірці голосів"')
ActivityMail.sendToModers(request,'Петиція набрала необхідну кількість голосів. Перевірте підписи.', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status':'На перевірці голосів','townslug':townslug})
ActivityMail.sendToModers(request,'Ваша петиція набрала необхідну кількість голосів і знаходиться на перевірці підписів', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status':'На перевірці голосів','townslug':townslug}, [petition.owner_user.email])
return redirect('../../petitions/%s' % petition_id)
else:
# raise PermissionDenied('Доступ заборонено')
return render(request, '403.html', {'exception': "Ви намагаєтесь підписати петицію іншого населеного пункту, у якому Ви не зареєстровані."})
else:
raise PermissionDenied('Доступ заборонено')
#отдать/забрать свой голос
@verified_email_required
def disvote(request, petition_id, townslug):
if(request.user.is_authenticated() and request.user.is_active):
petition = get_object_or_404(Petitions, id=petition_id)
if petition.town in request.user.towns.all():
vote = get_object_or_404(PetitionsVoices,petition=petition_id, user= request.user.id)
vote.delete()
Karma.add(request.user,KARMA['PETITION_DISVOTE'],"Забрано голос з петиції", "Петиції")
return redirect('../../petitions/%s' % petition_id)
else:
raise PermissionDenied('Доступ заборонено')
else:
raise PermissionDenied('Доступ заборонено')
#проверяем голосовал ли пользователь за петицию
def _getusersign(request, petition_id):
if(request.user in [ p.user for p in PetitionsVoices.objects.filter(petition = petition_id)]):
return True
else:
return False
#Изменение статусов
def approve(request,petition_id, townslug):
"""Апруваем петицию"""
petition = get_object_or_404(Petitions,id=petition_id)
if request.user.isAllowedToModerate(petition.town.id, 'Petitions'):
petition = get_object_or_404(Petitions,id=petition_id)
status = get_object_or_404(PetitionsStatuses,id=2)
try:
petition.status = status
petition.when_approve = time.strftime('%Y-%m-%d %H:%M:%S')
petition.save()
Karma.add(petition.owner_user,KARMA['PETITION_WAS_APPROVE'],"Створення власної петиції, що пройшла модерацію", "Петиції")
Activity().add(request, petition_id, 'Зміна статусу на Опублікована')
ActivityMail.sendToModers(request,'Петиція пройшла модерацію і опублікована', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status':'Триває збір підписів','townslug':townslug})
ActivityMail.sendToModers(request,'Ваша петиція пройшла модерацію і опублікована', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status':'Триває збір підписів','townslug':townslug}, [petition.owner_user.email])
return redirect('../../petitions/%s' % petition_id)
except:
pass
else:
raise PermissionDenied('Доступ заборонено')
def onconsideration(request,petition_id, townslug):
"""Перемещаем петицию в статус 'Расматривается' """
petition = get_object_or_404(Petitions,id=petition_id)
if request.user.isAllowedToModerate(petition.town.id, 'Petitions'):
petition = get_object_or_404(Petitions,id=petition_id)
status = get_object_or_404(PetitionsStatuses,id=6)
try:
petition.status = status
petition.when_approve = time.strftime('%Y-%m-%d %H:%M:%S')
petition.save()
Karma.add(petition.owner_user,KARMA['PETITION_ONCONSIDERATION'],"Ваша петиція набрала необхідну кількість голосів і пройшла їх перевірку", "Петиції")
Activity().add(request, petition_id, 'Зміна статусу на "Розглядається"')
ActivityMail.sendToModers(request,'Петиція пройшла перевірку голосів і тепер знаходиться на розгляді', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status':'Розглядається','townslug':townslug})
ActivityMail.sendToModers(request,'Ваша петиція пройшла перевірку голосів і тепер знаходиться на розгляді', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status':'Розглядається','townslug':townslug}, [petition.owner_user.email])
return redirect('../../petitions/%s' % petition_id)
except:
pass
else:
raise PermissionDenied('Доступ заборонено')
def tomoderate(request, petition_id, townslug):
"""Возвращеем петициию на модерацию"""
petition = get_object_or_404(Petitions,id=petition_id)
if request.user.isAllowedToModerate(petition.town.id, 'Petitions'):
petition = get_object_or_404(Petitions,id=petition_id)
status = get_object_or_404(PetitionsStatuses,id=1)
try:
petition.status = status
petition.when_approve = time.strftime('%Y-%m-%d %H:%M:%S')
petition.save()
Activity().add(request, petition_id, 'Зміна статусу на "Повернуто на домодерацію"')
ActivityMail.sendToModers(request,'Петиція повернута на домодерацію', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status':'Повернуто на домодерацію','townslug':townslug})
ActivityMail.sendToModers(request,'Ваша петиція повернута на домодерацію', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status':'Повернуто на домодерацію','townslug':townslug}, [petition.owner_user.email])
return redirect('../../petitions/%s' % petition_id)
except:
pass
else:
raise PermissionDenied('Доступ заборонено')
def returntoactive(request, townslug):
"""возвращаем петицию на сбор подписей"""
if request.POST:
petition_id = request.POST['petition_id']
form = PetitionChangeStatus(request.POST)
petition = get_object_or_404(Petitions,id=petition_id)
if request.user.isAllowedToModerate(petition.town.id, 'Petitions'):
town_slug = Town.objects.get(id=request.session['town']).slug
status = get_object_or_404(PetitionsStatuses,id=2) #Статус: Активна
if form.is_valid():
petition.status = status
petition.resolution = form.cleaned_data['resolution']
petition.save()
Activity().add(request, petition_id, 'Зміна статусу на "Повернута на збір підписів. Причина: '+petition.resolution+'"')
ActivityMail.sendToModers(request,'Петиція повернута на добір підписів зі статусу "На розгляді"', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status': 'Розглядається', 'resolution': 'Причина: '+petition.resolution})
ActivityMail.sendToModers(request,'Ваша петиція повернута модератором на добір підписів', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status': 'Повернено на збір підписів', 'resolution': 'Причина: '+petition.resolution}, [petition.owner_user.email])
return redirect(reverse('petitions:petition', args=(town_slug, petition_id,)))
else:
return HttpResponse("Помилка. Зверніть увагу на поле резолюція, воно має бути заповнене")
else:
raise PermissionDenied('Досуп заборонено')
else:
raise PermissionDenied('Досуп заборонено. POST')
def disapprove(request, townslug):
"""Отклоняем петицию"""
if request.POST:
petition_id = request.POST['petition_id']
form = PetitionChangeStatus(request.POST)
petition = get_object_or_404(Petitions,id=petition_id)
if request.user.isAllowedToModerate(petition.town.id, 'Petitions'):
status = get_object_or_404(PetitionsStatuses,id=3) #Статус: Відхилена модератором
if form.is_valid():
petition.status = status
petition.resolution = form.cleaned_data['resolution']
petition.save()
Activity().add(request, petition_id, 'Зміна статусу на "Відхилена модератором Причина: '+petition.resolution+'"')
ActivityMail.sendToModers(request,'Петиція відхилена модератором', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status': 'Відхилена модератором', 'resolution': 'Причина: '+petition.resolution,'townslug':townslug})
ActivityMail.sendToModers(request,'Ваша петиція відхилена модератором', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status': 'Відхилена модератором', 'resolution': 'Причина: '+petition.resolution,'townslug':townslug}, [petition.owner_user.email])
return redirect('../../petitions/%s' % petition_id)
else:
return HttpResponse("Помилка. Зверніть увагу на поле резолюція, воно має бути заповнене")
else:
raise PermissionDenied('Досуп заборонено')
else:
raise PermissionDenied('Досуп заборонено. POST')
def hidepetition(request, townslug):
"""Прячим петицию петицию"""
if request.POST:
petition_id = request.POST['petition_id']
form = PetitionChangeStatus(request.POST)#
petition = get_object_or_404(Petitions,id=petition_id)
if request.user.isAllowedToModerate(petition.town.id, 'Petitions'):
status = get_object_or_404(PetitionsStatuses,id=7) #Статус: Схована модератором
if form.is_valid():
petition.status = status
petition.resolution = form.cleaned_data['resolution']
petition.save()
Activity().add(request, petition_id, 'Зміна статусу на "Модератором відхилив і приховав петицію Причина: '+petition.resolution+'"')
ActivityMail.sendToModers(request,'Петиція відхилена модератором і їй надано спец.статус "Прихована"', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status': 'Відхилена модератором і прихована', 'resolution': 'Причина: '+petition.resolution,'townslug':townslug})
ActivityMail.sendToModers(request,'Ваша петиція відхилена модератором і їй надано спец.статус "Прихована"', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status': 'Відхилена модератором і прихована', 'resolution': 'Причина: '+petition.resolution,'townslug':townslug}, [petition.owner_user.email])
return redirect('../../petitions/%s' % petition_id)
else:
return HttpResponse("Помилка. Зверніть увагу на поле резолюція, воно має бути заповнене")
else:
raise PermissionDenied('Досуп заборонено')
else:
raise PermissionDenied('Досуп заборонено. POST')
def done(request, townslug):
"""Петицию рассмотрено"""
if request.POST:
petition_id = request.POST['petition_id']
form = PetitionChangeStatus(request.POST)
petition = get_object_or_404(Petitions,id=petition_id)
if request.user.isAllowedToModerate(petition.town.id, 'Petitions'):
status = get_object_or_404(PetitionsStatuses,id=4) #Статус: Розглянута
if form.is_valid():
petition.status = status
petition.resolution = form.cleaned_data['resolution']
petition.save()
Activity().add(request, petition_id, 'Зміна статусу на "Петиція розглянута Резолюція: '+petition.resolution+'"')
ActivityMail.sendToModers(request,'Петиція розглянута', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status': 'Розглянута', 'resolution': 'Причина: '+petition.resolution,'townslug':townslug})
ActivityMail.sendToModers(request,'Ваша петиція розглянута', 'petition_change_status.email', petition.town.id, {'petition_id':petition.id, 'title': petition.title, 'status': 'Розглянута', 'resolution': 'Причина: '+petition.resolution,'townslug':townslug}, [petition.owner_user.email])
return redirect('../../petitions/%s' % petition_id)
else:
return HttpResponse("Помилка. Зверніть увагу на поле резолюція, воно має бути заповнене.")
else:
raise PermissionDenied('Досуп заборонено.')
else:
raise PermissionDenied('Досуп заборонено.')
#бан модератором пользователя и его голоса согласно регламенту
def ban(request,user_id, petition_id, vote_id, townslug):
'''забанить подпись пользователя и его самого. Если пользователь уже забанен
забанить только подпись'''
from django.db.models import Q
if(request.user.is_authenticated() and request.user.is_active):
petition = get_object_or_404(Petitions,id=petition_id)
if request.user.isAllowedToModerate(petition.town.id, 'Petitions'):
if user_id and petition_id and vote_id:
votes = [p.petitionsvoices_set.filter(user=user_id) for p in Petitions.objects.filter(Q(status=2) | Q(status=8)).all()]
block_voices_number = 0
for vote in votes:
for v in vote:
v.block = 1
v.save()
block_voices_number += 1
user = get_object_or_404(User, id=user_id)
if user.is_active == 0:
return redirect('../../../../petitions/%s' % petition_id)
else:
user.is_active = 0
user.save()
Activity().add(request, petition_id, 'Заблокований користувач '+user_id)
return redirect('../../../../petitions/%s' % petition_id)
else:
raise PermissionDenied('Досуп заборонено.')
else:
raise PermissionDenied('Досуп заборонено.')
else:
raise PermissionDenied('Досуп заборонено.')
#распечатать петицию
def print(request, petition_id, townslug):
petition = get_object_or_404(Petitions,id=petition_id)
town = Town.objects.get(id=request.session["town"])
petition_to = get_recipient_of_petition(town)
petition_number = town.pet_number_templ % (petition_id)
town = Town.objects.get(id=request.session["town"])
need_votes = town.votes
pet_days = town.pet_days
if request.user.is_authenticated():
allowed = request.user.isAllowedToModerate(petition.town.id, 'Petitions')
else:
allowed = False
days_end = False
if (petition.status.id == 5):
date_start = petition.when_approve
days_end = date_start + datetime.timedelta(days=pet_days)
day_now = datetime.datetime.now()
if allowed:
votes = PetitionsVoices.objects.filter(petition=petition_id).exclude(block=1).all()
owner = get_object_or_404(User,id = petition.owner_user.id)
return render(request, 'petition_print.html',{'petition': petition,'needvotes': need_votes, 'owner': owner,'petition_number': petition_number,'allowed': allowed, 'votes': votes, 'end_date':days_end, 'petition_to': petition_to})
else:
raise PermissionDenied('Досуп заборонено.')
def test(request, townslug):
ActivityMail.sendToModers(request,'Тестовый имейл', 'new_petition.email', 1, {'url':'www.google.com','title': 'Сайт гугла'})
return HttpResponse("Done!")
```
#### File: RozumneMistoSnapshot/smartapi/views.py
```python
import json
from django.http import JsonResponse, HttpResponse
from django.shortcuts import get_object_or_404, get_list_or_404, render
from weunion.models import User,Town,Regions
from .models import AuthUserApiKeys
from django.core.exceptions import PermissionDenied
from django.views.decorators.csrf import csrf_exempt
#@csrf_exempt - привинтивный декоратор для подавления проверки csrf-подписи
@csrf_exempt
def router(request):
if(request.method == "POST"):
if request.POST:
try:
allowed = get_object_or_404(AuthUserApiKeys, apikey=request.POST['apikey'])
except:
return JsonResponse({'error': 'Вкажіть, будь ласка, вірний ключ API'})
if('command' in request.POST):
data = '' #сущности, которые мы должны веруть клиенту
command = request.POST['command']
if(command == 'gettowns'):
data = _getLocation()
elif(command == 'gettownsbyreg'):
try:
regid=request.POST['regid']
except:
return JsonResponse({'error': "Акажіть ID регіону (regid)"})
data = _getTownsByReg(regid)
elif(command == 'gettownbyid'):
try:
townid=request.POST['townid']
except:
return JsonResponse({'error': "Акажіть ID міста (townid)"})
data = _getTownById(request.POST['townid'])
elif(command == 'getuserbyid'):
try:
townid=request.POST['uid']
except:
return JsonResponse({'error': "Акажіть ID користувача (uid)"})
data = _getUserById(request.POST['uid'])
elif(command == 'getuserbyemail'):
try:
townid=request.POST['email']
except:
return JsonResponse({'error': "Акажіть email користувача (email)"})
data = _getUserByEmail(request.POST['email'])
elif(command == 'getlastpetitions'):
try:
count = request.POST['count']
statusid = request.POST['statusid']
except:
count = None
statusid = None
data = _getLastPetitions(request.POST['townid'], count, statusid)
elif(command == 'getlastdefects'):
try:
count = request.POST['count']
except:
count = None
data = _getLastDefects(request.POST['townid'], count)
elif(command == 'getlastnews'):
try:
count = request.POST['count']
except:
count = None
data = _getLastNews(request.POST['townid'], count)
elif(command == 'getlastpolls'):
try:
count = request.POST['count']
except:
count = None
data = _getLastPolls(request.POST['townid'], count)
else:
return JsonResponse({'error': "Команда не розпізнана"})
return JsonResponse(data)#возвращаем конечному пользователю данные
else:
return JsonResponse({'error': 'Виберіть, будь ласка, комaнду для API'})
else:
clients_post = json.loads(request.body.decode("UTF-8"))
try:
allowed = get_object_or_404(AuthUserApiKeys, apikey=clients_post['apikey'])
except:
return JsonResponse({'error': 'Вкажіть, будь ласка, вірний ключ API'})
if ('command' in clients_post):
data = '' # сущности, которые мы должны веруть клиенту
command = clients_post['command']
if (command == 'gettowns'):
data = _getLocation()
elif (command == 'gettownsbyreg'):
try:
regid = clients_post['regid']
except:
return JsonResponse({'error': "Акажіть ID регіону (regid)"})
data = _getTownsByReg(regid)
elif (command == 'gettownbyid'):
try:
townid = clients_post['townid']
except:
return JsonResponse({'error': "Акажіть ID міста (townid)"})
data = _getTownById(clients_post['townid'])
elif (command == 'getuserbyid'):
try:
townid = clients_post['uid']
except:
return JsonResponse({'error': "Акажіть ID користувача (uid)"})
data = _getUserById(clients_post['uid'])
elif (command == 'getuserbyemail'):
try:
townid = clients_post['email']
except:
return JsonResponse({'error': "Акажіть email користувача (email)"})
data = _getUserByEmail(clients_post['email'])
elif (command == 'getlastpetitions'):
try:
count = clients_post['count']
statusid = clients_post['statusid']
except:
count = None
statusid = None
data = _getLastPetitions(clients_post['townid'], count, statusid)
elif (command == 'getlastdefects'):
try:
count = clients_post['count']
except:
count = None
data = _getLastDefects(clients_post['townid'], count)
elif (command == 'getlastnews'):
try:
count = clients_post['count']
except:
count = None
data = _getLastNews(clients_post['townid'], count)
elif (command == 'getlastpolls'):
try:
count = clients_post['count']
except:
count = None
data = _getLastPolls(clients_post['townid'], count)
else:
return JsonResponse({'error': "Команда не розпізнана"})
return JsonResponse(data) # возвращаем конечному пользователю данные
else:
return JsonResponse({'error': 'Виберіть, будь ласка, комaнду для API'})
else:
return render(request,'smartapi/api.html',_page(request))
#Возвращеет JSON опитування
def _getLastPolls(townid, count=5):
if(townid):
if(count):
count = int(count)
from polls.models import Poll
polls = {'polls':[]}
for poll in Poll.objects.filter(active=1,archive=0, town=townid).order_by('id')[:count]:
poll = {'id': poll.id, 'question': poll.question,'description': poll.description, 'date_end': poll.date_end}
polls['polls'].append(poll)
return polls
#Возвращеет JSON новостей
def _getLastNews(townid, count=5):
if(townid):
if(count):
count = int(count)
from news.models import News
news = {'news':[]}
for n in News.objects.filter(town=townid, publish=1).order_by('-datetime_publish')[:count]:
n = {'id':n.id,'title':n.title,'date_publish':n.datetime_publish,'description': n.shortdesc,'image':'http://rozumnemisto.org/medial/'+n.mainimg.name}
news['news'].append(n)
return news
#Возвращеет JSON дефектов
def _getLastDefects(townid, count=5):
if(townid):
if(count):
count = int(count)
from defects.models import Issues
defects = {'defects':[]}
for defect in [i for i in Issues.objects.filter(parent_task_ref=None, town_ref=townid).order_by('-id') if i.last_issue().status !=0 and i.last_issue().status !=3][:count]:
defect = {'id':defect.id,'title':defect.title,'description': defect.description,'address': defect.address, 'status':defect.last_issue().status}
defects['defects'].append(defect)
return defects
#Возвращеет JSON петиций
def _getLastPetitions(townid, count=5, statusid=2):
from petitions.models import Petitions
if(townid):
if(count):
count = int(count)
if (statusid):
statusid = int(statusid)
if statusid == 1 or statusid == 3 or statusid == 7:
petitions = {'petitions': []}
for petition in Petitions.objects.filter():
petition = {'Error your statusid is inadmissible': statusid}
petitions['petitions'].append(petition)
return petitions
else:
petitions = {'petitions':[]}
for petition in Petitions.objects.filter(town=townid, status=statusid).order_by('-id')[:count]:
petition = {'id':petition.id, 'votes': petition.vote_count(), 'votes_needs': petition.town.votes,'title':petition.title,'status': statusid ,'text':petition.text,'claim':petition.claim,\
'date':petition.when_approve,'image': 'http://rozumnemisto.org/media/%s' % petition.image,\
'url':'http://rozumnemisto.org/%s/petitions/%s' % (petition.town.slug, petition.id)}
petitions['petitions'].append(petition)
return petitions
#Возвращает JSON локации в ассортименте
def _getLocation():
towns = {'towns':[]} #инициализируем словарик городов
for town in get_list_or_404(Town, is_active=1):
town = {'id':town.id,'name':town.name,'type': town.town_type.title,'region': town.region_ref.name }
towns['towns'].append(town)
return towns
def _getTownById(townid):
if(townid):
try:
town = get_object_or_404(Town,pk=townid, is_active=1)
town_dict = {'name': town.name, 'type': town.town_type.title, 'region': town.region_ref.name,
'url':'http://rozumnemisto.org/town/%s' % town.slug}
return town_dict
except:
return _sendError("Місто з ID: %s не знайдене!" % townid)
def _getTownsByReg(regid):
if(regid):
try:
towns_objects = Regions.objects.get(pk=regid).town_set.filter(is_active=1)
except:
return _sendError("Регіон з ID: %s не знайдений!" % regid)
towns = {'towns':[]} #инициализируем словарик городов
for town in towns_objects:
town = {'id':town.id,'name':town.name,'type': town.town_type.title,'url':'http://rozumnemisto.org/town/%s' % town.slug}
towns['towns'].append(town)
return towns
#возвращаем JSON с юзером в ассортименте
def _getUserById(uid):
if(uid):
try:
user = get_object_or_404(User, pk=uid)
user_dict = {'first_name': user.first_name, 'middle_name': user.middle_name, 'last_name': user.last_name,
'phone': user.phone,'email': user.email,'town': user.towns.all()[0].name}
return user_dict
except:
return _sendError("Користувач не знайдений")
else:
return _sendError("Вкажіть коректний user ID")
#возвращаем JSON с юзером в ассортименте
def _getUserByEmail(email):
if(email):
try:
user = get_object_or_404(User, email=email)
user_dict = {'first_name': user.first_name, 'middle_name': user.middle_name, 'last_name': user.last_name,
'phone': user.phone,'email': user.email,'town': user.towns.all()[0].name}
return user_dict
except:
return _sendError("Користувач не знайдений")
else:
return _sendError("Вкажіть коректний Email")
#Аутендифицируем пользователя
def _loginUser(request, email, password):
pass
#Возвращалочка ошибки когда пользователь не задал команду для интерфейса
def _sendError(message):
return {'error':message}
@csrf_exempt
def apitest(request, message):
return JsonResponse({'error':message},safe=False)
#Генерируем ключик API
def _generateApiKey(request):
import random
import string
from .models import AuthUserApiKeys
if(request.user.is_authenticated() and request.user.is_active):
key = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(30))
userapikey = AuthUserApiKeys()
userapikey.user_ref = request.user
userapikey.apikey = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(30))
userapikey.isblock = 0
userapikey.save()
return userapikey.apikey
else:
return None
#Страничка API для пользователя
def _page(request):
if(request.user.is_authenticated() and request.user.is_active):
try:
userkey = request.user.authuserapikeys_set.all()[0]
except:
userkey = None
if(userkey):
apikey = userkey.apikey
else:
apikey = _generateApiKey(request)
else:
apikey = None
regions = Regions.objects.all()
towns = Town.objects.all()
return {'apikey': apikey,'regions': regions, 'towns': towns}
```
#### File: RozumneMistoSnapshot/weunion/admin.py
```python
import nested_admin
from django.contrib import admin
from weunion.settings import GOOGLE_API_KEY
from django.template.loader import render_to_string
from django.core.exceptions import ValidationError
from django.forms.models import BaseInlineFormSet
from django.contrib.auth.models import Group
from django.contrib.sites.models import Site
from allauth.account.models import EmailAddress
from .forms import UserAdminForm, TownAdminForm,\
TownAdditionsInlineForm, TownBudgetForm
from .models import User, Town, Regions, TownBanners, \
TownEdrpou, TownsAdditions, TownGromada, \
TownsGromadasVillages, TownBudgets, TownAllowedModules
from .admin_help import modules_values, allowed_defects, allowed_petitions, moders_content
class UserAdmin(admin.ModelAdmin):
form = UserAdminForm
list_display = ('id', 'get_full_name_atall', 'email', 'get_towns')
exclude = ('password',)
search_fields = ('last_name', 'email', 'id')
filter_horizontal = ('groups', 'towns', 'work_for', 'user_permissions')
fields = ('first_name', 'last_name', 'middle_name', 'username',
'email', 'phone', 'last_login', 'date_joined', 'verified_email',
'is_active', 'is_staff', 'is_superuser',
'groups', 'towns', 'work_for', 'user_permissions')
list_display_links = ('id', 'get_full_name_atall', 'email')
def get_form(self, request, obj=None, **kwargs):
form = super(UserAdmin, self).get_form(request, obj, **kwargs)
try:
account_email = EmailAddress.objects.get(user=obj.id)
if account_email.verified:
form.verified = True
else:
form.verified = False
except:
form.verified = False
return form
def get_towns(self, obj):
towns = obj.towns.all().values_list('name', flat=True)
return ', '.join(towns)
get_towns.short_description = 'Міста'
get_towns.admin_order_field = 'towns'
def has_add_permission(self, request):
return False
def save_model(self, request, obj, form, change):
try:
EmailAddress.objects.filter(user=obj.id).exclude(email=form.cleaned_data['email']).delete()
account_email = EmailAddress.objects.get(user=obj.id, email=form.cleaned_data['email'])
except:
account_email = False
if form.cleaned_data['verified_email']:
if account_email:
account_email.verified = True
account_email.save()
else:
EmailAddress.objects.create(user=obj, email=form.cleaned_data['email'], verified=1, primary=1)
else:
if account_email:
account_email.verified = False
account_email.save()
obj.save()
class RegionsAdmin(admin.ModelAdmin):
def has_add_permission(self, request):
return False
class TownBannerInline(admin.TabularInline):
model = TownBanners
max_num = 1
# class TownEdrpouInline(admin.TabularInline):
# model = TownEdrpou
# min_num = 0
# extra = 0
class TownsGromadasVillagesInline(nested_admin.NestedTabularInline):
model = TownsGromadasVillages
fk_name = 'gromada_ref'
min_num = 1
extra = 0
class TownsGromadaInline(nested_admin.NestedStackedInline):
inlines = [TownsGromadasVillagesInline]
model = TownGromada
fk_name = 'main_town_ref'
min_num = 0
extra = 0
max_num = 1
class TownBudgetInline(admin.TabularInline):
form = TownBudgetForm
model = TownBudgets
min_num = 0
max_num = 1
exclude = ('year',)
def has_delete_permission(self, request, obj=None):
return False
class TonwAdditionsFormset(BaseInlineFormSet):
def clean(self):
super(TonwAdditionsFormset, self).clean()
for field in self.forms:
if field.cleaned_data:
if ((field.cleaned_data['type'] in ['igov', 'donor']) and
not field.cleaned_data['body']):
raise ValidationError("Обов'язково заповнити \"Донорство крові\" та \"Електронні послуги\" ")
class TonwAdditionsInline(admin.TabularInline):
model = TownsAdditions
formset = TonwAdditionsFormset
form = TownAdditionsInlineForm
fields = ('title', 'type', 'body', 'description')
min_num = 6
extra = 7
# max_num = 14
# def has_delete_permission(self, request, obj=None):
# return False
class TownAllowedModulesFormset(BaseInlineFormSet):
def clean(self):
super(TownAllowedModulesFormset, self).clean()
list_of_modules = []
for field in self.forms:
if field.cleaned_data:
if field.cleaned_data['module'] in list_of_modules:
raise ValidationError('Було обрано два однакових модулі. Залиште лише один з них.')
list_of_modules.append(field.cleaned_data['module'])
class TownAllowedModulesInline(admin.TabularInline):
model = Town.modules.through
formset = TownAllowedModulesFormset
min_num = 0
extra = 13
max_num = 13
class TownAdmin(nested_admin.NestedModelAdmin):
form = TownAdminForm
list_display = ('name', 'koatuu', 'town_type')
search_fields = ('name', 'koatuu', 'slug', 'id')
inlines = [TownBannerInline, TownAllowedModulesInline, TownBudgetInline, TonwAdditionsInline, TownsGromadaInline]
add_fields = ('name', 'slug', 'region_ref', 'koatuu', 'is_active', 'town_type', 'votes', 'pet_days', 'pet_number_templ', ('map_lon', 'map_lat', 'zoom'), )
change_fields = ('name', 'slug', 'region_ref', 'koatuu', 'is_active', 'town_type', 'votes', 'pet_days', 'pet_number_templ', ('map_lon', 'map_lat', 'zoom'), 'menu')
def __init__(self, *args, **kwargs):
super(TownAdmin, self).__init__(*args, **kwargs)
self.town_slug = ''
self.menu_html = render_to_string('menu_for_town.html')
def add_view(self, request, form_url='', extra_context=None):
self.fields = getattr(self, 'add_fields', ())
extra_context = extra_context or {}
extra_context['api_key'] = GOOGLE_API_KEY
gmap = {'lon': 50.27, 'lat': 30.31, 'zoom': 6}
extra_context['gmap'] = gmap
return super(TownAdmin, self).add_view(
request, form_url=form_url, extra_context=extra_context)
def change_view(self, request, object_id, form_url='', extra_context=None):
self.fields = getattr(self, 'change_fields', ())
extra_context = extra_context or {}
extra_context['api_key'] = GOOGLE_API_KEY
town = Town.objects.get(id=object_id)
gmap = {'lon': town.map_lon, 'lat': town.map_lat, 'zoom': town.zoom}
extra_context['gmap'] = gmap
return super(TownAdmin, self).change_view(
request, object_id=object_id, form_url=form_url, extra_context=extra_context)
def save_model(self, request, obj, form, change):
self.town_slug = obj.slug
obj.slug = str(obj.slug).lower()
modules_list= '''{edata} {igov} {prozorro} {donor} {news}
{defects} {petitions} {polls} {openbudget}
{medicine} {flats} {smartroads} {mvs_wanted}'''
obj.menu = self.menu_html.format(town_slug=obj.slug, town_name=obj.name,
additional_items='{additional_items}',
moders='{moders}',
modules = modules_list
)
obj.save()
def save_formset(self, request, form, formset, change):
super(TownAdmin, self).save_formset(request, form, formset, change)
town = Town.objects.get(slug=self.town_slug)
if formset.model == TownAllowedModules:
dict_of_modules = {
'edata':['/{town_slug}/edata'.format(town_slug=town.slug), '/{town_slug}/edata'.format(town_slug=town.slug), 'Відкриті фінанси'],
'igov':['/{town_slug}/igov'.format(town_slug=town.slug), '/{town_slug}/igov'.format(town_slug=town.slug), 'Електронні послуги'],
'prozorro':['/{town_slug}/prozorro'.format(town_slug=town.slug), '/{town_slug}/prozorro'.format(town_slug=town.slug), 'Електронні закупівлі'],
'donor':['/{town_slug}/donor'.format(town_slug=town.slug), '/{town_slug}/donor'.format(town_slug=town.slug), 'Донорство крові'],
'polls':['/{town_slug}/polls'.format(town_slug=town.slug), '/polls', 'Опитування'],
'defects':['/{town_slug}/defects'.format(town_slug=town.slug), '/defects/', 'Дефекти ЖКГ'],
'petitions':['/{town_slug}/petitions'.format(town_slug=town.slug), '/petitions/', 'Петиції'],
'news':['/{town_slug}/news'.format(town_slug=town.slug), '/news/', 'Новини міста'],
'openbudget':['{openbudget_url}', '/openbudget/', 'Відкритий бюджет'],
'medicine':['{medicine_url}', '/medicines/', 'Реєстр ліків'],
'flats':['{flats_url}', '/flats/', 'Черги на житло'],
'mvs_wanted':['/{town_slug}/mvs_wanted/'.format(town_slug=town.slug), '/mvs_wanted/', 'Розшук поліції'],
'smartroads':['/{town_slug}/smartroads'.format(town_slug=town.slug), '/smartroads/', 'Розумні дороги'],
}
list_of_modules = [module[2] for module in dict_of_modules.values()]
dict_of_values = {}
for fields in formset:
if fields.cleaned_data:
module = str(fields.cleaned_data['module'])
to_delete = fields.cleaned_data['DELETE']
#
if module in list_of_modules and not to_delete:
title = [str(title) for title, value in dict_of_modules.items() if value[2] == module][0]
if fields.cleaned_data['active']:
dict_of_values[title] = modules_values[title].format(url=dict_of_modules[title][0] ,
allowed_petitions=allowed_petitions.format(town_slug=town.slug),
allowed_defects=allowed_defects.format(town_slug=town.slug))
else:
dict_of_values[title] = modules_values[title].format(url=dict_of_modules[title][1],
allowed_petitions='</a>',
allowed_defects='</a>')
elif module in list_of_modules and to_delete:
title = [str(title) for title, value in dict_of_modules.items() if value[2] == module][0]
dict_of_values[title] = ''
for name in dict_of_modules.keys():
if name not in dict_of_values.keys():
dict_of_values[name] = ''
town.menu = town.menu.format(
edata=dict_of_values['edata'],
igov=dict_of_values['igov'],
prozorro=dict_of_values['prozorro'],
donor=dict_of_values['donor'],
news=dict_of_values['news'],
defects=dict_of_values['defects'],
petitions=dict_of_values['petitions'],
polls=dict_of_values['polls'],
openbudget=dict_of_values['openbudget'],
medicine=dict_of_values['medicine'],
flats=dict_of_values['flats'],
smartroads=dict_of_values['smartroads'],
mvs_wanted=dict_of_values['mvs_wanted'],
additional_items = '{additional_items}',
moders='{moders}'
)
town.save()
if formset.model == TownBudgets:
budget = ''
for fields in formset:
if fields.cleaned_data:
if fields.cleaned_data['body']:
budget = '/{town_slug}/budget'.format(town_slug=town.slug)
else:
budget = '/openbudget/'
else:
budget = '/openbudget/'
town.menu = town.menu.format(
openbudget_url=budget,
medicine_url='{medicine_url}',
flats_url='{flats_url}',
additional_items='{additional_items}',
moders = '{moders}',
)
town.save()
#
if formset.model == TownsAdditions:
list_of_names = ['moders', 'flats', 'medicines', 'additional_items']
dict_of_values = {}
additional_items =''
for fields in formset:
if fields.cleaned_data:
if fields.cleaned_data['type'] == 'additional_items':
dict_of_values['additional_items'] = fields.cleaned_data['body']
elif fields.cleaned_data['type'] == 'moderators':
if fields.cleaned_data['body']:
dict_of_values['moders'] = moders_content.format(town_slug=town.slug)
else:
dict_of_values['moders'] = ''
elif fields.cleaned_data['type'] == 'flats':
if fields.cleaned_data['body']:
dict_of_values['flats'] = '/{town_slug}/info/flats'.format(town_slug=town.slug)
else:
dict_of_values['flats'] = '/flats/'
elif fields.cleaned_data['type'] == 'medicines':
if fields.cleaned_data['body']:
dict_of_values['medicines'] = '/{town_slug}/info/medicines'.format(town_slug=town.slug)
else:
dict_of_values['medicines'] = '/medicines/'
elif fields.cleaned_data['type'] == 'edata' and not fields.cleaned_data['body']:
edata = fields.save(commit=False)
edata.body = 7
edata.save()
town.menu = town.menu.format(
medicine_url=dict_of_values['medicines'],
flats_url=dict_of_values['flats'],
additional_items= dict_of_values['additional_items'],
moders=dict_of_values['moders']
)
town.save()
def get_form(self, request, obj=None, **kwargs):
form = super(TownAdmin, self).get_form(request, obj, **kwargs)
if obj is None:
form.add_obj = True
else:
form.add_obj = False
return form
class TownEdrpouAdmin(admin.ModelAdmin):
# def has_change_permission(self, request, obj=None):
# return False
fields = ('code', 'title', 'koatuu')
search_fields = ('koatuu', 'title', 'code')
list_display = ('code', 'title', 'koatuu')
list_display_links = ('code', 'name')
admin.site.register(Town, TownAdmin)
admin.site.register(User, UserAdmin)
admin.site.register(Regions, RegionsAdmin)
admin.site.register(TownEdrpou, TownEdrpouAdmin)
admin.site.unregister(Group)
admin.site.unregister(Site)
```
#### File: RozumneMistoSnapshot/weunion/statistics.py
```python
import datetime
from weunion.models import User, Town
from news.models import News
from petitions.models import Petitions
from defects.models import Issues
from polls.models import Poll
def city_stats():
users = []
for town in Town.objects.all():
count = User.objects.filter(towns=town).count()
defects = Issues.objects.filter(town_ref=town, parent_task_ref=None).count()
petitions = Petitions.objects.filter(town=town).count()
news = News.objects.filter(town=town).count()
polls = Poll.objects.filter(town=town).count()
if count > 4 or defects > 0 or petitions > 0 or news > 0 or polls > 0:
users.append((town.name, count, petitions, defects, news, polls))
return users
# Нижче даний модуль містить функції які використовуються для генерації статистики
# Кожна функція має однаковий набір параметрів date_from - date_to, розміщених для зручності використання у зворотному
# порядку
def users_registered(date_to=datetime.datetime.now(), date_from=datetime.datetime.min):
return User.objects.filter(date_joined__gte=date_from, date_joined__lte=date_to).count()
def news_published(date_to=datetime.datetime.now(), date_from=datetime.datetime.min):
return News.objects.filter(datetime_publish__gte=date_from, datetime_publish__lte=date_to).count()
def petitions_created(date_to=datetime.datetime.now(), date_from=datetime.datetime.min):
return Petitions.objects.filter(create_date__gte=date_from, create_date__lte=date_to).count()
def defects_created(date_to=datetime.datetime.now(), date_from=datetime.datetime.min):
return Issues.objects.filter(created__gte=date_from, created__lte=date_to, parent_task_ref=None).count()
def polls_created(date_to=datetime.datetime.now(), date_from=datetime.datetime.min):
return Poll.objects.filter(date_start__gte=date_from, date_start__lte=date_to).count()
``` |
{
"source": "30percent/PythonVariableMath",
"score": 3
} |
#### File: PythonVariableMath/effbotscript/tdop-4.py
```python
import sys
import re
try:
# test binding for python's built-in tokenizer; see
# http://svn.effbot.org/public/stuff/sandbox/pytoken
import pytoken
except ImportError:
pytoken = None
if 1:
# symbol (token type) registry
symbol_table = {}
class symbol_base(object):
id = None
value = None
first = second = third = None
def nud(self):
raise SyntaxError("Syntax error (%r)." % self.id)
def led(self, left):
raise SyntaxError("Unknown operator (%r)." % self.id)
def __repr__(self):
if self.id == "(name)" or self.id == "(literal)":
return "(%s %s)" % (self.id[1:-1], self.value)
out = [self.id, self.first, self.second, self.third]
out = map(str, filter(None, out))
return "(" + " ".join(out) + ")"
def symbol(id, bp=0):
try:
s = symbol_table[id]
except KeyError:
class s(symbol_base):
pass
s.__name__ = "symbol-" + id # for debugging
s.id = id
s.value = None
s.lbp = bp
symbol_table[id] = s
else:
s.lbp = max(bp, s.lbp)
return s
# helpers
def infix(id, bp):
def led(self, left):
self.first = left
self.second = expression(bp)
return self
symbol(id, bp).led = led
def infix_r(id, bp):
def led(self, left):
self.first = left
self.second = expression(bp-1)
return self
symbol(id, bp).led = led
def prefix(id, bp):
def nud(self):
self.first = expression(bp)
return self
symbol(id).nud = nud
def advance(id=None):
global token
if id and token.id != id:
raise SyntaxError("Expected %r" % id)
token = next()
def method(s):
# decorator
assert issubclass(s, symbol_base)
def bind(fn):
setattr(s, fn.__name__, fn)
return bind
# python expression syntax
symbol("lambda", 20)
symbol("if", 20); symbol("else") # ternary form
infix_r("or", 30); infix_r("and", 40); prefix("not", 50)
infix("in", 60); infix("not", 60) # not in
infix("is", 60);
infix("<", 60); infix("<=", 60)
infix(">", 60); infix(">=", 60)
infix("<>", 60); infix("!=", 60); infix("==", 60)
infix("|", 70); infix("^", 80); infix("&", 90)
infix("<<", 100); infix(">>", 100)
infix("+", 110); infix("-", 110)
infix("*", 120); infix("/", 120); infix("//", 120)
infix("%", 120)
prefix("-", 130); prefix("+", 130); prefix("~", 130)
infix_r("**", 140)
symbol(".", 150); symbol("[", 150); symbol("(", 150)
# additional behaviour
symbol("(name)").nud = lambda self: self
symbol("(literal)").nud = lambda self: self
symbol("(end)")
symbol(")")
@method(symbol("("))
def nud(self):
# parenthesized form; replaced by tuple former below
expr = expression()
advance(")")
return expr
symbol("else")
@method(symbol("if"))
def led(self, left):
self.first = left
self.second = expression()
advance("else")
self.third = expression()
return self
@method(symbol("."))
def led(self, left):
if token.id != "(name)":
SyntaxError("Expected an attribute name.")
self.first = left
self.second = token
advance()
return self
symbol("]")
@method(symbol("["))
def led(self, left):
self.first = left
self.second = expression()
advance("]")
return self
symbol(")"); symbol(",")
@method(symbol("("))
def led(self, left):
self.first = left
self.second = []
if token.id != ")":
while 1:
self.second.append(expression())
if token.id != ",":
break
advance(",")
advance(")")
return self
symbol(":"); symbol("=")
@method(symbol("lambda"))
def nud(self):
self.first = []
if token.id != ":":
argument_list(self.first)
advance(":")
self.second = expression()
return self
def argument_list(list):
while 1:
if token.id != "(name)":
SyntaxError("Expected an argument name.")
list.append(token)
advance()
if token.id == "=":
advance()
list.append(expression())
else:
list.append(None)
if token.id != ",":
break
advance(",")
# constants
def constant(id):
@method(symbol(id))
def nud(self):
self.id = "(literal)"
self.value = id
return self
constant("None")
constant("True")
constant("False")
# multitoken operators
@method(symbol("not"))
def led(self, left):
if token.id != "in":
raise SyntaxError("Invalid syntax")
advance()
self.id = "not in"
self.first = left
self.second = expression(60)
return self
@method(symbol("is"))
def led(self, left):
if token.id == "not":
advance()
self.id = "is not"
self.first = left
self.second = expression(60)
return self
# displays
@method(symbol("("))
def nud(self):
self.first = []
comma = False
if token.id != ")":
while 1:
if token.id == ")":
break
self.first.append(expression())
if token.id != ",":
break
comma = True
advance(",")
advance(")")
if not self.first or comma:
return self # tuple
else:
return self.first[0]
symbol("]")
@method(symbol("["))
def nud(self):
self.first = []
if token.id != "]":
while 1:
if token.id == "]":
break
self.first.append(expression())
if token.id != ",":
break
advance(",")
advance("]")
return self
symbol("}")
@method(symbol("{"))
def nud(self):
self.first = []
if token.id != "}":
while 1:
if token.id == "}":
break
self.first.append(expression())
advance(":")
self.first.append(expression())
if token.id != ",":
break
advance(",")
advance("}")
return self
# python tokenizer
def tokenize_python(program):
import tokenize
from cStringIO import StringIO
type_map = {
tokenize.NUMBER: "(literal)",
tokenize.STRING: "(literal)",
tokenize.OP: "(operator)",
tokenize.NAME: "(name)",
}
for t in tokenize.generate_tokens(StringIO(program).next):
try:
yield type_map[t[0]], t[1]
except KeyError:
if t[0] == tokenize.NL:
continue
if t[0] == tokenize.ENDMARKER:
break
else:
raise SyntaxError("Syntax error")
yield "(end)", "(end)"
def tokenize(program):
if isinstance(program, list):
source = program
else:
source = tokenize_python(program)
for id, value in source:
if id == "(literal)":
symbol = symbol_table[id]
s = symbol()
s.value = value
else:
# name or operator
symbol = symbol_table.get(value)
if symbol:
s = symbol()
elif id == "(name)":
symbol = symbol_table[id]
s = symbol()
s.value = value
else:
raise SyntaxError("Unknown operator (%r)" % id)
yield s
# parser engine
def expression(rbp=0):
global token
t = token
token = next()
left = t.nud()
while rbp < token.lbp:
t = token
token = next()
left = t.led(left)
return left
def parse(program):
global token, next
next = tokenize(program).next
token = next()
return expression()
def test(program):
print ">>>", program
print parse(program)
# taken from the python FAQ
program = """(lambda Ru,Ro,Iu,Io,IM,Sx,Sy:reduce(lambda x,y:x+y,map(lambda y,Iu=Iu,Io=Io,Ru=Ru,Ro=Ro,Sy=Sy,L=lambda yc,Iu=Iu,Io=Io,Ru=Ru,Ro=Ro,i=IM,Sx=Sx,Sy=Sy:reduce(lambda x,y:x+y,map(lambda x,xc=Ru,yc=yc,Ru=Ru,Ro=Ro,i=i,Sx=Sx,F=lambda xc,yc,x,y,k,f=lambda xc,yc,x,y,k,f:(k<=0)or (x*x+y*y>=4.0) or 1+f(xc,yc,x*x-y*y+xc,2.0*x*y+yc,k-1,f):f(xc,yc,x,y,k,f):chr(64+F(Ru+x*(Ro-Ru)/Sx,yc,0,0,i)),range(Sx))):L(Iu+y*(Io-Iu)/Sy),range(Sy))))(-2.1, 0.7, -1.2, 1.2, 30, 80, 24)"""
# program = program + "+" + program
# program = program + "+" + program
# program = program + "+" + program
if "--benchmark" in sys.argv:
def custom_tokenize_python(program):
# simplified tokenizer for this expression
pattern = r"\s*(?:(<=|>=|\W)|([a-zA-Z]\w*)|(\d+(?:\.\d*)?))"
for operator, name, literal in re.findall(pattern, program):
if operator:
yield "(operator)", operator
elif name:
yield "(name)", name
elif literal:
yield "(literal)", literal
else:
raise SyntaxError
yield "(end)", "(end)"
import time
print len(program), "bytes"
print len(list(tokenize(program))), "tokens"
def bench(name, func):
t0 = time.clock()
for i in xrange(1000):
func(program)
print name, time.clock() - t0
import parser, compiler
program_list = list(tokenize_python(program))
bench("topdown", parse)
bench("topdown pretokenized", lambda program: parse(program_list))
tokenize_python = custom_tokenize_python
bench("custom topdown", parse)
if pytoken:
tokenize_python = pytoken.token_list
bench("built-in topdown", parse)
print
bench("built-in compile", lambda program: compile(program, "", "eval"))
bench("parser.parse", lambda program: parser.st2tuple(parser.expr(program)))
print
bench("compiler.parse", lambda program: compiler.parse(program, "eval"))
bench("compiler.compile", lambda program: compiler.compile(program, "", "eval"))
sys.exit(0)
# samples
test("1")
test("+1")
test("-1")
test("1+2")
test("1+2+3")
test("1+2*3")
test("(1+2)*3")
print
print list(tokenize("1 not in 2"))
``` |
{
"source": "30sectomars/psas_testbot",
"score": 2
} |
#### File: psas_testbot/src/controller_pid_with_anti_windup.py
```python
import math
# Ros libsSIMULATION:
import rospy
# Ros messages
from std_msgs.msg import Float64
from std_msgs.msg import Float32MultiArray
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Twist
#Gravity
G = 9.81
FILTER_SIZE = 20
# IMU offset in real world
if rospy.has_param('/use_simulation'):
SIMULATION = rospy.get_param('/use_simulation')
if SIMULATION:
OFFSET_Y = 0.0
else:
OFFSET_Y = 0.134
else:
SIMULATION = False
OFFSET_Y = 0.134
# get v_max
if rospy.has_param('/v_max'):
V_MAX = rospy.get_param('/v_max')
else:
V_MAX = 0.05
# get loop rate in hz
if rospy.has_param('/loop_rate_in_hz'):
LOOP_RATE_IN_HZ = rospy.get_param('/loop_rate_in_hz')
else:
LOOP_RATE_IN_HZ = 100
class Controller:
def __init__(self):
self.connected = False
self.gyro_x = 0.0
self.gyro_y = 0.0
self.gyro_z = 0.0
self.accel_x = 0.0
self.accel_y = 0.0
self.accel_z = 0.0
self.ref = 0.0
self.e_sum = 0.0
self.e = [0.0, 0.0]
self.y = 0.0
self.y_list = [0.0] * FILTER_SIZE
self.u_pre = 0.0
self.u = [0.0, 0.0, 0.0]
self.diff_u = 0.0
self.umax = 0.116
self.umin = -0.116
self.Kp = 4.0
self.Ki = 0.1
self.Kd = 0.5
self.dt = 1.0 / LOOP_RATE_IN_HZ
self.delta1 = 0.0
if SIMULATION:
self.imu_sub = rospy.Subscriber('/imu', Imu, self.imu_callback)
else:
self.imu_sub = rospy.Subscriber('/testbot/imu', Float32MultiArray, self.imu_callback)
self.delta1_pub = rospy.Publisher('/testbot/delta1', Float64, queue_size=10)
self.e_pub = rospy.Publisher('/controller/e', Float64, queue_size=10)
self.y_avg_pub = rospy.Publisher('/controller/y_avg', Float64, queue_size=10)
self.y_pub = rospy.Publisher('/controller/y', Float64, queue_size=10)
self.u_pub = rospy.Publisher('/controller/u', Float64, queue_size=10)
self.u_pre_pub = rospy.Publisher('/controller/u_pre', Float64, queue_size=10)
self.u_pub = rospy.Publisher('/controller/u', Float64, queue_size=10)
self.diff_u_pub = rospy.Publisher('/controller/diff_u', Float64, queue_size=10)
self.e_sum_pub = rospy.Publisher('/controller/e_sum', Float64, queue_size=10)
self.vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
rospy.on_shutdown(self.shutdown)
def control(self):
self.diff_u = 0.0
self.y = sum(self.y_list)/len(self.y_list)
# insert new error in list and pop oldest value
self.e.insert(0, self.ref - self.y)
del self.e[-1]
self.e_sum += self.e[0]
I_anteil = 0.0
D_anteil = (self.e[0] - self.e[1]) / self.dt
self.u_pre = self.Kp * self.e[0] + self.Ki * I_anteil + self.Kd * D_anteil
if self.u_pre > self.umax:
self.diff_u = self.umax - self.u_pre
if self.u_pre < self.umin:
self.diff_u = self.umin - self.u_pre
if self.diff_u != 0:
I_anteil = (1.0 / self.Ki) * self.diff_u + self.e[0]
if (self.accel_y/G <= 1.0) & (self.accel_y/G > -1.0) & self.connected:
self.y_list.insert(0, math.asin(self.accel_y/G) - OFFSET_Y)
del self.y_list[-1]
self.u.insert(0,self.Kp * self.e[0] + self.Ki * I_anteil + self.Kd * D_anteil)
del self.u[-1]
self.delta1 = -math.tan(0.015 / V_MAX * self.u[0]) * 180 / math.pi
if SIMULATION:
self.delta1 = -self.delta1
def publish_all(self):
#self.delta1_pub.publish(self.delta1)
self.e_pub.publish(self.e[0])
self.y_pub.publish(self.y_list[0])
self.y_avg_pub.publish(self.y)
self.u_pre_pub.publish(self.u_pre)
self.u_pub.publish(self.u[0])
self.diff_u_pub.publish(self.diff_u)
self.e_sum_pub.publish(self.e_sum)
msg = Twist()
msg.linear.x = V_MAX
msg.angular.z = self.delta1
self.vel_pub.publish(msg)
def imu_callback(self, msg):
self.connected = True
if SIMULATION:
self.gyro_x = msg.angular_velocity.x
self.gyro_y = -msg.angular_velocity.y
self.gyro_z = -msg.angular_velocity.z
self.accel_x = msg.linear_acceleration.x
self.accel_y = -msg.linear_acceleration.y
self.accel_z = -msg.linear_acceleration.z
else:
self.gyro_x = msg.data[0]
self.gyro_y = msg.data[1]
self.gyro_z = msg.data[2]
self.accel_x = msg.data[3]
self.accel_y = msg.data[4]
self.accel_z = msg.data[5]
def shutdown(self):
msg = Twist()
msg.linear.x = 0.0
msg.angular.z = 0.0
self.vel_pub.publish(msg)
#rospy.loginfo("Controller is shut down")
def talker():
rospy.init_node('controller', anonymous=True)
ctrl = Controller()
rate = rospy.Rate(LOOP_RATE_IN_HZ)
while not rospy.is_shutdown():
ctrl.control()
ctrl.publish_all()
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException: pass
```
#### File: psas_testbot/src/controller_zustandsregler_mit_beobachter.py
```python
import math
# Ros libs
import rospy
# Ros messages
from std_msgs.msg import Float64
from std_msgs.msg import Float32MultiArray
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Twist
#Gravity
G = 9.81
FILTER_SIZE = 20
if rospy.has_param('/use_simulation'):
SIMULATION = rospy.get_param('/use_simulation')
if SIMULATION:
OFFSET_Y = 0.0
else:
OFFSET_Y = 0.134
else:
SIMULATION = False
OFFSET_Y = 0.134
# get v_max
if rospy.has_param('/v_max'):
V_MAX = rospy.get_param('/v_max')
else:
V_MAX = 0.05
# get loop rate in hz
if rospy.has_param('/loop_rate_in_hz'):
LOOP_RATE_IN_HZ = rospy.get_param('/loop_rate_in_hz')
else:
LOOP_RATE_IN_HZ = 100
class Controller:
def __init__(self):
self.connected = False
self.gyro_x = 0.0
self.gyro_y = 0.0
self.gyro_z = 0.0
self.accel_x = 0.0
self.accel_y = 0.0
self.accel_z = 0.0
self.l1 = 0.59
self.l2 = 17.4
self.k1 = 0.2752
self.k2 = 0.0707
self.alpha = 0.0
self.alpha_list = [0.0] * FILTER_SIZE
self.u = 0.0
self.alphaB = [0.0, 0.0]
self.psiB = [0.0, 0.0]
self.delta1 = 0.0
if SIMULATION:
self.imu_sub = rospy.Subscriber('/imu', Imu, self.imu_callback)
else:
self.imu_sub = rospy.Subscriber('/testbot/imu', Float32MultiArray, self.imu_callback)
self.delta1_pub = rospy.Publisher('/testbot/delta1', Float64, queue_size=10)
self.u_pub = rospy.Publisher('/controller/u', Float64, queue_size=10)
self.alphaB_pub = rospy.Publisher('/controller/alphaB', Float64, queue_size=10)
self.psiB_pub = rospy.Publisher('/controller/psiB', Float64, queue_size=10)
self.alpha_pub = rospy.Publisher('/controller/alpha_avg', Float64, queue_size=10)
self.alpha_list_pub = rospy.Publisher('/controller/alpha', Float64, queue_size=10)
self.vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
rospy.on_shutdown(self.shutdown)
def control(self):
if (self.accel_y/G <= 1.0) & (self.accel_y/G > -1.0) & self.connected:
self.alpha_list.insert(0, math.asin(self.accel_y/G) - OFFSET_Y)
del self.alpha_list[-1]
self.alpha = sum(self.alpha_list)/len(self.alpha_list)
self.alphaB.insert(0, (self.alphaB[0] + 0.005 * self.psiB[0] + self.l1 * (self.alpha - self.alphaB[0])))
del self.alphaB[-1]
# verschobener Index bei alphaB weil vorher insert an stelle 0
self.psiB.insert(0, (self.psiB[0] + 0.01 * self.u + self.l2 * (self.alpha - self.alphaB[1])))
del self.psiB[-1]
self.u = -self.k1 * self.alphaB[1] - self.k2 * self.psiB[1]
self.delta1 = -math.tan(0.015 / V_MAX * self.u) * 180 / math.pi
if SIMULATION:
self.delta1 = -self.delta1
def publish_all(self):
#self.delta1_pub.publish(self.delta1)
self.u_pub.publish(self.u)
self.alphaB_pub.publish(self.alphaB[1])
self.psiB_pub.publish(self.psiB[1])
self.alpha_pub.publish(self.alpha)
self.alpha_list_pub.publish(self.alpha_list[0])
msg = Twist()
msg.linear.x = V_MAX
msg.angular.z = self.delta1
self.vel_pub.publish(msg)
def imu_callback(self, msg):
self.connected = True
if SIMULATION:
self.gyro_x = msg.angular_velocity.x
self.gyro_y = -msg.angular_velocity.y
self.gyro_z = -msg.angular_velocity.z
self.accel_x = msg.linear_acceleration.x
self.accel_y = -msg.linear_acceleration.y
self.accel_z = -msg.linear_acceleration.z
else:
self.gyro_x = msg.data[0]
self.gyro_y = msg.data[1]
self.gyro_z = msg.data[2]
self.accel_x = msg.data[3]
self.accel_y = msg.data[4]
self.accel_z = msg.data[5]
def shutdown(self):
msg = Twist()
msg.linear.x = 0.0
msg.angular.z = 0.0
self.vel_pub.publish(msg)
def talker():
rospy.init_node('controller', anonymous=True)
ctrl = Controller()
rate = rospy.Rate(LOOP_RATE_IN_HZ)
while not rospy.is_shutdown():
ctrl.control()
ctrl.publish_all()
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException: pass
``` |
{
"source": "30x/orgs",
"score": 3
} |
#### File: orgs/test/test.py
```python
import requests
import base64
import json
from os import environ as env
from urlparse import urljoin
EXTERNAL_SCHEME = env['EXTERNAL_SCHEME']
BASE_URL = '%s://%s:%s' % (EXTERNAL_SCHEME, env['EXTERNAL_SY_ROUTER_HOST'], env['EXTERNAL_SY_ROUTER_PORT']) if 'EXTERNAL_SY_ROUTER_PORT' in env else '%s://%s' % (EXTERNAL_SCHEME, env['EXTERNAL_SY_ROUTER_HOST'])
def b64_decode(data):
missing_padding = (4 - len(data) % 4) % 4
if missing_padding:
data += b'='* missing_padding
return base64.decodestring(data)
if 'APIGEE_TOKEN1' in env:
TOKEN1 = env['APIGEE_TOKEN1']
else:
with open('token.txt') as f:
TOKEN1 = f.read()
claims = json.loads(b64_decode(TOKEN1.split('.')[1]))
USER1 = claims['iss'] + '#' + claims['sub']
if 'APIGEE_TOKEN2' in env:
TOKEN2 = env['APIGEE_TOKEN2']
else:
with open('token2.txt') as f:
TOKEN2 = f.read()
claims = json.loads(b64_decode(TOKEN2.split('.')[1]))
USER2 = claims['iss'] + '#' + claims['sub']
if 'APIGEE_TOKEN3' in env:
TOKEN3 = env['APIGEE_TOKEN3']
else:
with open('token3.txt') as f:
TOKEN3 = f.read()
claims = json.loads(b64_decode(TOKEN2.split('.')[1]))
USER2 = claims['iss'] + '#' + claims['sub']
def main():
print 'sending requests to %s' % BASE_URL
# GET orgs;ayesha
orgs_url = urljoin(BASE_URL, '/orgs;ayesha')
headers = {'Authorization': 'Bearer %s' % TOKEN1, 'Accept': 'application/json'}
r = requests.get(orgs_url, headers=headers)
if r.status_code == 200:
print 'correctly received org information %s' % r.content
else:
print 'failed to get org information for url %s %s %s' % (orgs_url, r.status_code, r.text)
return
return
if __name__ == '__main__':
main()
``` |
{
"source": "310224954/Fitapplication",
"score": 2
} |
#### File: Main/Food/models.py
```python
from django.db import models
from django.contrib import messages
from django.db.models import Sum
from django.urls import reverse
from django.template.defaultfilters import slugify
from datetime import date
class Products(models.Model):
"""
Class created for single, raw products contains
all nutrition information.
"""
name = models.CharField(max_length=50)
protein = models.FloatField()
carbohydrates = models.FloatField()
fat = models.FloatField()
description = models.TextField(blank=True)
quantity = models.IntegerField(blank=True)
price = models.DecimalField(max_digits=100, decimal_places=2, blank=True)
date = models.TimeField(auto_now_add=True)
food_type = models.CharField(max_length=6, choices=(
("1", "Meat"),
("2", "Fruit"),
("3", "Vegetable"),
("4", "SeaFood"),
("5", "Nuts"),
("6", "Grains"),
("7", "Diary")
)
)
slug = models.CharField(
max_length=20,
null= True,
unique=True,
)
class Meta:
ordering = ["name"]
def save(self, *args, **kwargs):
self.fat = round(self.fat, 1)
self.protein = round(self.protein, 1)
self.carbohydrates = round(self.carbohydrates, 1)
if not self.slug:
self.slug = slugify(self.name)
self.name = self.name.lower()
super(Products, self).save(*args, **kwargs)
def __str__(self):
return f"{self.name}"
@property
def name_type(self):
return "{} {}".format(self.name + self.food_type)
@property
def short_description(self):
description = self.description
short_description = ""
for i,element in enumerate(description):
if(i > 30 and element==" "):
break
short_description += element
short_description += " ..."
return short_description
def get_absolute_url(self):
return reverse("prod_desc", kwargs={"pk":self.pk})
class Meals(models.Model):
"""
Class for more complex food items, like whole dishes, meals.
Data about their nurtition is pulled from Products class objects.
"""
name = models.CharField(max_length=40)
ingredient = models.ManyToManyField(Products, related_name="products")
description = models.TextField(blank=True, max_length=750)
ingredients_weights = models.CharField(max_length=40, blank=True)
slug = models.CharField(
max_length=81,
blank= True,
unique=True,
)
@property
def weight_as_list(self):
return self.ingredients_weights.split(",")
def save(self, *args, **kwargs):
self.name = self.name.lower()
if not self.slug:
self.slug = slugify(self.name)
super(Meals, self).save(*args, **kwargs)
def get_all_ingredients(self):
return self.ingredient.all()
def __str__(self):
return self.name
@property
def protein(self):
protein_sum =0
for i, ing in enumerate(self.ingredient.all()):
protein_sum += ing.protein * int(self.ingredients_weights.split(",")[i]) / 100
return protein_sum
#return self.ingredient.aggregate(Sum("protein"))["protein__sum"]
@property
def carbohydrates(self):
return self.ingredient.aggregate(Sum("carbohydrates"))["carbohydrates__sum"]
@property
def fat(self):
return self.ingredient.aggregate(Sum("fat"))["fat__sum"]
@property
def quantity(self):
return self.ingredient.aggregate(Sum("quantity"))["quantity__sum"]
@property
def diet_category(self):
diet_types = "vegan, vegeterian, Keto, Paleo, Gluten-free"
food_types = ""
for ing in self.ingredient.all():
food_types += ing.food_type
if "1" in food_types:
diet_types = diet_types.replace("vegan, ", "").replace(" vegeterian,", "")
if "7" in food_types or "4" in food_types:
diet_types = diet_types.replace("vegan,", "")
if "6" in food_types:
diet_types = diet_types.replace(" Keto,", "").replace(" Paleo,", "").replace(" Gluten-free", "")
if "2" in food_types:
diet_types = diet_types.replace(" Keto,", "")
return (diet_types + " | " + food_types)
def get_absolute_url(self):
return reverse("meal_desc", kwargs={"pk":self.pk})
```
#### File: Main/Home/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
def home_view(request):
#return HttpResponse("<h1>Welcome to my fit django website</h1>")
context = {}
return render(request, "Home/home.html", context)
def contact_view(request):
return render(request, "Home/contact.html", {})
#return HttpResponse("<h1>Please call me</h1>")
def ciriculum_vitae_view(request):
pass
def socai_media(request):
pass
``` |
{
"source": "3110/uiflow-custom-block-generator",
"score": 2
} |
#### File: uiflow-custom-block-generator/uiflow_custom_block_generator/uiflow_custom_block_generator.py
```python
import json
import os
import re
DEFAULT_ENCODING = "utf-8"
DEFAULT_PYTHON_CODE_INDENT = 4
EXT_M5B = "m5b"
EXT_PY = "py"
FIELD_LABEL = "field_label"
FIELD_INPUT = "field_input"
FIELD_NUMBER = "field_number"
INPUT_VALUE = "input_value"
VALUE = "value"
KEY_ARGS = "args"
KEY_BLOCKS = "blocks"
KEY_CATEGORY = "category"
KEY_CODE = "code"
KEY_COLOR = "color"
KEY_COLOUR = "colour" # for m5b
KEY_JSCODE = "jscode"
KEY_MESSAGE = "message"
KEY_NAME = "name"
KEY_OUTPUT = "output"
KEY_PARAMS = "params"
KEY_PREVIOUS_STATEMENT = "previousStatement"
KEY_NEXT_STATEMENT = "nextStatement"
KEY_SPELL_CHECK = "spellcheck"
KEY_TEXT = "text"
KEY_TYPE = "type"
KEY_VALUE = VALUE
BLOCK_PARAM_TYPE_LABEL = "label"
BLOCK_PARAM_TYPE_STRING = "string"
BLOCK_PARAM_TYPE_NUMBER = "number"
BLOCK_PARAM_TYPE_VARIABLE = "variable"
BLOCK_PARAM_TYPES = [
BLOCK_PARAM_TYPE_LABEL,
BLOCK_PARAM_TYPE_STRING,
BLOCK_PARAM_TYPE_NUMBER,
BLOCK_PARAM_TYPE_VARIABLE,
]
BLOCK_TYPE_VALUE = VALUE
BLOCK_TYPE_EXECUTE = "execute"
BLOCK_NAME_FORMAT = "__{category}_{name}"
BLOCK_SETTING_REQUIRED_KEYS = [KEY_CATEGORY, KEY_COLOR, KEY_BLOCKS]
BLOCK_REQUIRED_KEYS = [KEY_NAME, KEY_TYPE]
BLOCK_PARAM_REQUIRED_KEYS = [KEY_NAME, KEY_TYPE]
BLOCK_TYPE_PARAMS = {
BLOCK_TYPE_VALUE: {
KEY_OUTPUT: None,
},
BLOCK_TYPE_EXECUTE: {
KEY_PREVIOUS_STATEMENT: None,
KEY_NEXT_STATEMENT: None,
},
}
TEMPLATE_FILENAME = "{root}.{ext}"
TEMPLATE_BLOCK_COMMENT = "// Block {block_name}"
TEMPLATE_BLOCK_CODE_VARIABLE = (
"var {var_name} = Blockly.Python.valueToCode(block, '{var_name}', Blockly.Python.ORDER_NONE);"
)
TEMPLATE_BLOCK_CODE_FIELD_VALUE = "var {var_name} = block.getFieldValue('{var_name}');"
TEMPLATE_BLOCK_JSON_CODE = "var {block_name}_json = {json};"
TEMPLATE_BLOCK_DEFINITION = '''window['Blockly'].Blocks['{block_name}'] = {{
init: function() {{
this.jsonInit({block_name}_json);
}}
}};
'''
TEMPLATE_BLOCK_CODE = {
BLOCK_TYPE_VALUE: '''window['Blockly'].Python['{block_name}'] = function(block) {{
{vars}return [`{python_code}`, Blockly.Python.ORDER_CONDITIONAL]
}};
''',
BLOCK_TYPE_EXECUTE: '''window['Blockly'].Python['{block_name}'] = function(block) {{
{vars}return `{python_code}\n\n`
}};
''',
}
def to_camel(s):
w = re.split(r"[\s_-]", s.lower())
return "".join([v if p == 0 else v.capitalize() for p, v in enumerate(w)])
def validate_argument(arg, t):
if not isinstance(arg, t):
raise UiFlowCustomBlockGeneratorError("Illegal Argument: expected: {}, actual: {}".format(type(t), type(arg)))
def validate_required_keys(target, required_keys, where):
for k in required_keys:
if k not in target.keys():
raise MissingRequiredKey(k, where)
class UiFlowCustomBlockGeneratorError(Exception):
pass
class MissingRequiredKey(UiFlowCustomBlockGeneratorError):
def __init__(self, key, where):
super().__init__(f"{key} in {where}")
class LabelParameterGenerator:
def generate_args(self, pos, name):
return {f"{KEY_MESSAGE}{pos}": "%1", f"{KEY_ARGS}{pos}": [{KEY_TYPE: FIELD_LABEL, KEY_TEXT: str(name)}]}
def generate_vars(self, name):
return ""
class StringParameterGenerator:
def generate_args(self, pos, name):
return {
f"{KEY_MESSAGE}{pos}": "%1 %2",
f"{KEY_ARGS}{pos}": [
{KEY_TYPE: FIELD_LABEL, KEY_TEXT: str(name)},
{KEY_TYPE: FIELD_INPUT, KEY_TEXT: "", KEY_SPELL_CHECK: False, KEY_NAME: str(name)},
],
}
def generate_vars(self, name):
return TEMPLATE_BLOCK_CODE_FIELD_VALUE.format(var_name=name)
class NumberParameterGenerator:
def generate_args(self, pos, name):
return {
f"{KEY_MESSAGE}{pos}": "%1 %2",
f"{KEY_ARGS}{pos}": [
{KEY_TYPE: FIELD_LABEL, KEY_TEXT: str(name)},
{KEY_TYPE: FIELD_NUMBER, KEY_VALUE: 0, KEY_NAME: str(name)},
],
}
def generate_vars(self, name):
return TEMPLATE_BLOCK_CODE_FIELD_VALUE.format(var_name=name)
class VariableParameterGenerator:
def generate_args(self, pos, name):
return {
f"{KEY_MESSAGE}{pos}": "%1 %2",
f"{KEY_ARGS}{pos}": [
{KEY_TYPE: FIELD_LABEL, KEY_TEXT: str(name)},
{KEY_TYPE: INPUT_VALUE, KEY_NAME: name},
],
}
def generate_vars(self, name):
return TEMPLATE_BLOCK_CODE_VARIABLE.format(var_name=name)
class ParameterGenerator:
def __init__(self):
self.generators = {
BLOCK_PARAM_TYPE_LABEL: LabelParameterGenerator(),
BLOCK_PARAM_TYPE_STRING: StringParameterGenerator(),
BLOCK_PARAM_TYPE_NUMBER: NumberParameterGenerator(),
BLOCK_PARAM_TYPE_VARIABLE: VariableParameterGenerator(),
}
def generate_args(self, params):
args = {}
for pos, p in enumerate(params):
args.update(self.generators[p[KEY_TYPE]].generate_args(pos, p[KEY_NAME]))
return args
def generate_vars(self, params):
return [self.generators[p[KEY_TYPE]].generate_vars(p[KEY_NAME]) for p in params]
class BlockGenerator:
def __init__(self, base_dir, logger=None):
self.logger = logger
self.base_dir = base_dir
self.param_generator = ParameterGenerator()
def validate_parameter_types(self, block_name, params):
for pos, p in enumerate(params):
if not p[KEY_TYPE] in BLOCK_PARAM_TYPES:
raise UiFlowCustomBlockGeneratorError(
"Illegal Parameter Type: {type} (#{pos} parameter in the block \"{name}\")".format(
type=p[KEY_TYPE], pos=pos + 1, name=block_name
)
)
def validate(self, block):
validate_argument(block, dict)
validate_required_keys(block, BLOCK_REQUIRED_KEYS, "block")
self.validate_parameter_types(block[KEY_NAME], block.get(KEY_PARAMS, []))
def load(self, name, encoding=DEFAULT_ENCODING):
file_path = os.path.normpath(os.path.join(self.base_dir, TEMPLATE_FILENAME.format(root=name, ext=EXT_PY)))
self.logger.debug(f"Block Code: {file_path}")
with open(file_path, "r", encoding=encoding) as f:
return "\n".join([line.rstrip() for line in f.readlines()])
def generate(self, category, color, block):
self.validate(block)
name = block[KEY_NAME]
block_name = BLOCK_NAME_FORMAT.format(category=category, name=to_camel(name))
params = block[KEY_PARAMS]
args = {}
args.update(BLOCK_TYPE_PARAMS[block[KEY_TYPE]])
args.update(self.param_generator.generate_args(params))
args.update({KEY_COLOUR: color})
vs = self.param_generator.generate_vars(params)
result = [
TEMPLATE_BLOCK_COMMENT.format(block_name=block_name),
TEMPLATE_BLOCK_JSON_CODE.format(
block_name=block_name, json=json.dumps(args, ensure_ascii=False, indent=DEFAULT_PYTHON_CODE_INDENT)
),
TEMPLATE_BLOCK_DEFINITION.format(block_name=block_name),
TEMPLATE_BLOCK_CODE[block[KEY_TYPE]].format(
block_name=block_name, vars="".join([v + "\n" for v in vs]), python_code=self.load(name)
),
]
return result
class UiFlowCustomBlockGenerator:
def __init__(self, config, target_dir=None, logger=None):
self.logger = logger
with config:
self.config = json.load(config)
validate_required_keys(self.config, BLOCK_SETTING_REQUIRED_KEYS, "setting")
self.base_dir = os.path.abspath(os.path.dirname(config.name))
self.target_dir = os.path.abspath(target_dir) if target_dir else self.base_dir
self.filename = os.path.splitext(os.path.basename(config.name))[0]
self.block_generator = BlockGenerator(self.base_dir, logger)
def generate(self):
category = self.config[KEY_CATEGORY]
color = self.config[KEY_COLOR]
m5b = {KEY_CATEGORY: category, KEY_COLOR: color, KEY_BLOCKS: [], KEY_JSCODE: ""}
for block in self.config[KEY_BLOCKS]:
m5b[KEY_BLOCKS].append(BLOCK_NAME_FORMAT.format(category=category, name=to_camel(block[KEY_NAME])))
m5b[KEY_JSCODE] += "\n".join(self.block_generator.generate(category, color, block))
self.dump(m5b)
def dump(self, data, encoding=DEFAULT_ENCODING):
file_path = os.path.normpath(
os.path.join(self.target_dir, TEMPLATE_FILENAME.format(root=self.filename, ext=EXT_M5B))
)
self.logger.debug("Write M5B: " + file_path)
with open(file_path, "w", encoding=encoding) as f:
json.dump(data, f, ensure_ascii=False)
``` |
{
"source": "311devs/clickhouse-sqlalchemy",
"score": 2
} |
#### File: src/drivers/base.py
```python
import re
import six
from sqlalchemy import schema, types as sqltypes, exc, util as sa_util
from sqlalchemy.engine import default, reflection
from sqlalchemy.sql import compiler, expression, type_api, crud
from sqlalchemy.types import DATE, DATETIME, INTEGER, VARCHAR, FLOAT
from .. import types
# Column spec
colspecs = {}
# Type converters
ischema_names = {
'Int64': INTEGER,
'Int32': INTEGER,
'Int16': INTEGER,
'Int8': INTEGER,
'UInt64': INTEGER,
'UInt32': INTEGER,
'UInt16': INTEGER,
'UInt8': INTEGER,
'Date': DATE,
'DateTime': DATETIME,
'Float64': FLOAT,
'Float32': FLOAT,
'String': VARCHAR,
'FixedString': VARCHAR,
'Enum8': types.Enum8,
'Enum16': types.Enum16,
'Array': types.Array
}
class ClickHouseIdentifierPreparer(compiler.IdentifierPreparer):
def quote_identifier(self, value):
# Never quote identifiers.
return self._escape_identifier(value)
def quote(self, ident, force=None):
return ident
class ClickHouseCompiler(compiler.SQLCompiler):
def visit_count_func(self, fn, **kw):
# count accepts zero arguments.
return 'count%s' % self.process(fn.clause_expr, **kw)
def visit_case(self, clause, **kwargs):
text = 'CASE '
if clause.value is not None:
text += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
text += 'WHEN ' + cond._compiler_dispatch(
self, **kwargs
) + ' THEN ' + result._compiler_dispatch(
self, **kwargs) + " "
if clause.else_ is None:
raise exc.CompileError('ELSE clause is required in CASE')
text += 'ELSE ' + clause.else_._compiler_dispatch(
self, **kwargs
) + ' END'
return text
def visit_if__func(self, func, **kw):
return "(%s) ? (%s) : (%s)" % (
self.process(func.clauses.clauses[0], **kw),
self.process(func.clauses.clauses[1], **kw),
self.process(func.clauses.clauses[2], **kw)
)
def limit_clause(self, select, **kw):
text = ''
if select._limit_clause is not None:
text += ' \n LIMIT '
if select._offset_clause is not None:
text += self.process(select._offset_clause, **kw) + ', '
text += self.process(select._limit_clause, **kw)
else:
if select._offset_clause is not None:
raise exc.CompileError('OFFSET without LIMIT is not supported')
return text
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
column = self.process(extract.expr, **kw)
if field == 'year':
return 'toYear(%s)' % column
elif field == 'month':
return 'toMonth(%s)' % column
elif field == 'day':
return 'toDayOfMonth(%s)' % column
else:
return column
def _compose_select_body(
self, text, select, inner_columns, froms, byfrom, kwargs):
text += ', '.join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True,
fromhints=byfrom, **kwargs)
for f in froms])
else:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True, **kwargs)
for f in froms])
else:
text += self.default_from()
sample_clause = getattr(select, '_sample_clause', None)
if sample_clause is not None:
text += self.sample_clause(select, **kwargs)
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
text += self.group_by_clause(select, **kwargs)
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
text += self.order_by_clause(select, **kwargs)
if (select._limit_clause is not None or
select._offset_clause is not None):
text += self.limit_clause(select, **kwargs)
if select._for_update_arg is not None:
text += self.for_update_clause(select, **kwargs)
return text
def sample_clause(self, select, **kw):
return " \nSAMPLE " + self.process(select._sample_clause, **kw)
def group_by_clause(self, select, **kw):
text = ""
group_by = select._group_by_clause._compiler_dispatch(
self, **kw)
if group_by:
text = " GROUP BY " + group_by
if getattr(select, '_with_totals', False):
text += " WITH TOTALS"
return text
def visit_join(self, join, asfrom=False, **kwargs):
prefix = "ALL" # standard SQL behaviour
if join.full:
join_type = "FULL JOIN "
elif join.isouter:
join_type = "LEFT JOIN"
else:
join_type = "INNER JOIN"
return ' '.join(
(self.process(join.left, asfrom=True, **kwargs),
prefix,
join_type,
self.process(join.right, asfrom=True, **kwargs),
"ON", self.process(join.onclause, asfrom=True, **kwargs)
))
def visit_update(self, update_stmt, asfrom=False, **kw):
text = 'ALTER TABLE '
table_text = self.update_tables_clause(update_stmt, update_stmt.table, [], **kw)
text += table_text
text += ' UPDATE '
crud_params = crud._setup_crud_params(
self, update_stmt, crud.ISUPDATE, include_table=False, **kw)
text += ', '.join(
c[0]._compiler_dispatch(self,
include_table=False) +
'=' + c[1] for c in crud_params
)
if update_stmt._whereclause is not None:
t = update_stmt._whereclause._compiler_dispatch(self, include_table=False)
if t:
text += " WHERE " + t
return text
class ClickHouseDDLCompiler(compiler.DDLCompiler):
def visit_create_column(self, create, **kw):
column = create.element
nullable = column.nullable
# All columns including synthetic PKs must be 'nullable'
column.nullable = True
rv = super(ClickHouseDDLCompiler, self).visit_create_column(
create, **kw
)
column.nullable = nullable
return rv
def visit_primary_key_constraint(self, constraint):
# Do not render PKs.
return ''
def visit_engine(self, engine):
compiler = self.sql_compiler
def compile_param(expr):
if not isinstance(expr, expression.ColumnClause):
if not hasattr(expr, 'self_group'):
# assuming base type (int, string, etc.)
return six.text_type(expr)
else:
expr = expr.self_group()
return compiler.process(
expr, include_table=False, literal_binds=True
)
engine_params = engine.get_params()
engine_partition_by = engine.get_partition_by()
engine_order_by = engine.get_order_by()
engine_sample_by = engine.get_sample_by()
engine_settings = engine.get_settings()
text = engine.name()
if engine_params:
text += '('
compiled_params = []
for param in engine_params:
if isinstance(param, tuple):
compiled = (
'(' +
', '.join(compile_param(p) for p in param) +
')'
)
else:
compiled = compile_param(param)
compiled_params.append(compiled)
text += ', '.join(compiled_params)
text += ')'
if engine_partition_by is not None:
text += "\nPARTITION BY " + compile_param(engine_partition_by)
if engine_order_by is not None:
text += "\nORDER BY (" + ', '.join([compile_param(p) for p in engine_order_by]) + ")"
if engine_sample_by is not None:
text += "\nSAMPLE BY " + compile_param(engine_sample_by)
if engine_settings is not None:
text += "\nSETTINGS " + ', '.join([k + '=' + compile_param(v) for k, v in engine_settings.items()])
return text
def post_create_table(self, table):
engine = getattr(table, 'engine', None)
if not engine:
raise exc.CompileError("No engine for table '%s'" % table.name)
return ' ENGINE = ' + self.process(engine)
class ClickHouseTypeCompiler(compiler.GenericTypeCompiler):
def visit_string(self, type_, **kw):
if type_.length is None:
return 'String'
else:
return 'FixedString(%s)' % type_.length
def visit_array(self, type_, **kw):
item_type = type_api.to_instance(type_.item_type)
return "Array(%s)" % self.process(item_type, **kw)
def visit_nullable(self, type_, **kw):
nested_type = type_api.to_instance(type_.nested_type)
return "Nullable(%s)" % self.process(nested_type, **kw)
def visit_int8(self, type_, **kw):
return 'Int8'
def visit_uint8(self, type_, **kw):
return 'UInt8'
def visit_int16(self, type_, **kw):
return 'Int16'
def visit_uint16(self, type_, **kw):
return 'UInt16'
def visit_int32(self, type_, **kw):
return 'Int32'
def visit_uint32(self, type_, **kw):
return 'UInt32'
def visit_int64(self, type_, **kw):
return 'Int64'
def visit_uint64(self, type_, **kw):
return 'UInt64'
def visit_date(self, type_, **kw):
return 'Date'
def visit_float32(self, type_, **kw):
return 'Float32'
def visit_float64(self, type_, **kw):
return 'Float64'
def _render_enum(self, db_type, type_, **kw):
choices = (
"'%s' = %d" %
(x.name.replace("'", "\\'"), x.value) for x in type_.enum_type
)
return "%s(%s)" % (db_type, ', '.join(choices))
def visit_enum8(self, type_, **kw):
return self._render_enum('Enum8', type_, **kw)
def visit_enum16(self, type_, **kw):
return self._render_enum('Enum16', type_, **kw)
def visit_DATETIME(self, type_, **kw):
return 'DateTime'
def visit_numeric(self, type_, **kw):
return 'Decimal(%s, %s)' % (type_.precision, type_.scale)
class ClickHouseExecutionContextBase(default.DefaultExecutionContext):
@sa_util.memoized_property
def should_autocommit(self):
return False # No DML supported, never autocommit
class ClickHouseDialect(default.DefaultDialect):
name = 'clickhouse'
supports_cast = True
supports_unicode_statements = True
supports_unicode_binds = True
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_decimal = True
supports_native_boolean = False
supports_alter = True
supports_sequences = False
supports_native_enum = True # Do not render check constraints on enums.
supports_multivalues_insert = True
max_identifier_length = 127
default_paramstyle = 'pyformat'
colspecs = colspecs
ischema_names = ischema_names
convert_unicode = True
returns_unicode_strings = True
description_encoding = None
postfetch_lastrowid = False
preparer = ClickHouseIdentifierPreparer
type_compiler = ClickHouseTypeCompiler
statement_compiler = ClickHouseCompiler
ddl_compiler = ClickHouseDDLCompiler
construct_arguments = [
(schema.Table, {
"data": []
})
]
def _execute(self, connection, sql):
raise NotImplementedError
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
return self.get_table_names(connection, schema, **kw)
def has_table(self, connection, table_name, schema=None):
query = 'EXISTS TABLE {}'.format(table_name)
for r in self._execute(connection, query):
if r.result == 1:
return True
return False
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
query = 'DESCRIBE TABLE {}'.format(table_name)
rows = self._execute(connection, query)
columns = []
for name, type_, default_type, default_expression in rows:
# Get only type without extra modifiers.
type_ = re.search(r'^\w+', type_).group(0)
try:
type_ = ischema_names[type_]
except KeyError:
type_ = sqltypes.NullType
columns.append({
'name': name,
'type': type_,
'nullable': True,
'default': None,
})
return columns
@reflection.cache
def get_schema_names(self, connection, **kw):
# No support for schemas.
return []
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# No support for foreign keys.
return []
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
# No support for primary keys.
return []
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
# No support for indexes.
return []
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
return [row.name for row in self._execute(connection, 'SHOW TABLES')]
def do_rollback(self, dbapi_connection):
# No support for transactions.
pass
def do_executemany(self, cursor, statement, parameters, context=None):
cursor.executemany(statement, parameters, context=context)
def do_execute(self, cursor, statement, parameters, context=None):
cursor.execute(statement, parameters, context=context)
def _check_unicode_returns(self, connection, additional_tests=None):
return True
def _check_unicode_description(self, connection):
return True
```
#### File: src/sql/selectable.py
```python
from sqlalchemy.sql.selectable import Select as StandardSelect
from ..ext.clauses import sample_clause
__all__ = ('Select', 'select')
class Select(StandardSelect):
_with_totals = False
_sample_clause = None
def with_totals(self, _with_totals=True):
self._with_totals = _with_totals
return self
def sample(self, sample):
self._sample_clause = sample_clause(sample)
return self
select = Select
``` |
{
"source": "312shan/FastHMM",
"score": 3
} |
#### File: FastHMM/FastHMM/non_rec_viterbi.py
```python
import math
from collections import deque
from functools import reduce
from typing import List, Union, Set, Dict, DefaultDict, Tuple
class Viterbi(object):
def __init__(
self,
A: Union[Dict[str, Dict[str, float]], DefaultDict[str, Dict[str, float]]],
B: Union[Dict[str, Dict[str, float]], DefaultDict[str, Dict[str, float]]],
PI: Union[Dict[str, float], DefaultDict[str, float]],
STATE: Union[Set[str], List[str]],
very_small_probability: float = 1e-32
):
self._A = A
self._B = B
self._PI = PI
self._STATE = set(STATE)
# TODO: find out what is the best value and why?
self._MINI_FOR_ZERO = math.log(very_small_probability)
def predict_state(self, word_list):
# type: (List[str]) -> Tuple[List[str], float]
return self._viterbi(word_list)
def p_aij(self, i, j):
if not self._A.get(i):
return self._MINI_FOR_ZERO
return self._A[i].get(j, self._MINI_FOR_ZERO)
def p_bik(self, i, k):
if not self._B.get(i):
return self._MINI_FOR_ZERO
return self._B[i].get(k, self._MINI_FOR_ZERO)
def p_pi(self, i):
return self._PI.get(i, self._MINI_FOR_ZERO)
def _viterbi(self, obs_init):
# type: (List[str]) -> Tuple[List[str], float]
"""
Viterbi decode algorithm
Uses queues to store calculated candidate sequences and their probabilities
:param obs_init: Observation sequence
:return: Hidden state sequences and probability scores
"""
q = deque()
q.append((obs_init, {}, []))
while q:
obs, val_pre, qseq_pre = q.popleft()
if len(obs) == 0:
val_temp = [(qseq_pre[q_] + [q_], val_pre[q_]) for q_ in self._STATE]
max_q_seq = reduce(lambda x1, x2: x2 if x2[1] > x1[1] else x1, val_temp)
seq, val = max_q_seq
return seq, val
val = {}
qseq = {}
for cur_state in self._STATE:
if len(val_pre) == 0:
val.update({cur_state: self.p_pi(cur_state) + self.p_bik(cur_state, obs[0])})
qseq.update({cur_state: []})
else:
# transition probability of (pre_tag->cur_tag) * Output probability of (cur_tag->obs[0])
val_temp = [(qseq_pre[q_pre] + [q_pre],
val_pre[q_pre] + self.p_aij(q_pre, cur_state) + self.p_bik(cur_state, obs[0]))
for q_pre in self._STATE]
# gain tuple with max probability
max_q_seq = reduce(lambda x1, x2: x2 if x2[1] > x1[1] else x1, val_temp)
val.update({cur_state: max_q_seq[1]})
qseq.update({cur_state: max_q_seq[0]})
q.append((obs[1:], val, qseq))
if __name__ == "__main__":
STATE = ['A', 'B', 'C']
PI = {'A': .8}
A = {'A': {'A': 0.1, 'B': .7, 'C': .2, }, 'B': {'A': .1, 'B': 0.1, 'C': .8}, 'C': {'A': .1, 'B': 0.1, 'C': .8}}
B = {'A': {'你': 0.5, '我': 0.5},
'B': {'是': 0.4, '打': 0.6},
'C': {'人': 0.5, '中国人': 0.5}}
viterbi = Viterbi(A, B, PI, STATE)
trace = viterbi.predict_state(["我", "打", "中国人"])
print(trace)
state_sequence = viterbi.predict_state(["我", "是", "中国人"])
print(state_sequence)
``` |
{
"source": "31337H4X0R/crab-tracker",
"score": 3
} |
#### File: crab-tracker/GUI/crab.py
```python
class Crab:
def __init__(self, crab_id, sex, species, color, damage, carapace, mass, epibiont, molt):
self.id = crab_id
self.sex = sex
self.species = species
self.color = color
self.damage = damage
self.carapace = carapace
self.mass = mass
self.epibiont = epibiont
self.molt = molt
def get_tuple(self):
return self.id, self.sex, self.species, self.color, self.damage, self.carapace, self.mass, self.epibiont, self.molt
``` |
{
"source": "31337mbf/MLAlgorithms",
"score": 3
} |
#### File: MLAlgorithms/mla/gaussian_mixture.py
```python
import random
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import multivariate_normal
from mla.base import BaseEstimator
from mla.kmeans import KMeans
class GaussianMixture(BaseEstimator):
"""Gaussian Mixture Model: clusters with Gaussian prior.
Finds clusters by repeatedly performing Expectation–Maximization (EM) algorithm
on the dataset. GMM assumes the datasets is distributed in multivariate Gaussian,
and tries to find the underlying structure of the Gaussian, i.e. mean and covariance.
E-step computes the "responsibility" of the data to each cluster, given the mean
and covariance; M-step computes the mean, covariance and weights (prior of each
cluster), given the responsibilities. It iterates until the total likelihood
changes less than the tolerance.
Parameters
----------
K : int
The number of clusters into which the dataset is partitioned.
max_iters: int
The maximum iterations of assigning points to the perform EM.
Short-circuited by the assignments converging on their own.
init: str, default 'random'
The name of the method used to initialize the first clustering.
'random' - Randomly select values from the dataset as the K centroids.
'kmeans' - Initialize the centroids, covariances, weights with KMeams's clusters.
tolerance: float, default 1e-3
The tolerance of difference of the two latest likelihood for convergence.
"""
y_required = False
def __init__(self, K=4, init="random", max_iters=500, tolerance=1e-3):
self.K = K
self.max_iters = max_iters
self.init = init
self.assignments = None
self.likelihood = []
self.tolerance = tolerance
def fit(self, X, y=None):
"""Perform Expectation–Maximization (EM) until converged."""
self._setup_input(X, y)
self._initialize()
for _ in range(self.max_iters):
self._E_step()
self._M_step()
if self._is_converged():
break
def _initialize(self):
"""Set the initial weights, means and covs (with full covariance matrix).
weights: the prior of the clusters (what percentage of data does a cluster have)
means: the mean points of the clusters
covs: the covariance matrix of the clusters
"""
self.weights = np.ones(self.K)
if self.init == "random":
self.means = [self.X[x] for x in random.sample(range(self.n_samples), self.K)]
self.covs = [np.cov(self.X.T) for _ in range(self.K)]
elif self.init == "kmeans":
kmeans = KMeans(K=self.K, max_iters=self.max_iters // 3, init="++")
kmeans.fit(self.X)
self.assignments = kmeans.predict()
self.means = kmeans.centroids
self.covs = []
for i in np.unique(self.assignments):
self.weights[int(i)] = (self.assignments == i).sum()
self.covs.append(np.cov(self.X[self.assignments == i].T))
else:
raise ValueError("Unknown type of init parameter")
self.weights /= self.weights.sum()
def _E_step(self):
"""Expectation(E-step) for Gaussian Mixture."""
likelihoods = self._get_likelihood(self.X)
self.likelihood.append(likelihoods.sum())
weighted_likelihoods = self._get_weighted_likelihood(likelihoods)
self.assignments = weighted_likelihoods.argmax(axis=1)
weighted_likelihoods /= weighted_likelihoods.sum(axis=1)[:, np.newaxis]
self.responsibilities = weighted_likelihoods
def _M_step(self):
"""Maximization (M-step) for Gaussian Mixture."""
weights = self.responsibilities.sum(axis=0)
for assignment in range(self.K):
resp = self.responsibilities[:, assignment][:, np.newaxis]
self.means[assignment] = (resp * self.X).sum(axis=0) / resp.sum()
self.covs[assignment] = (self.X - self.means[assignment]).T.dot(
(self.X - self.means[assignment]) * resp
) / weights[assignment]
self.weights = weights / weights.sum()
def _is_converged(self):
"""Check if the difference of the latest two likelihood is less than the tolerance."""
if (len(self.likelihood) > 1) and (self.likelihood[-1] - self.likelihood[-2] <= self.tolerance):
return True
return False
def _predict(self, X):
"""Get the assignments for X with GMM clusters."""
if not X.shape:
return self.assignments
likelihoods = self._get_likelihood(X)
weighted_likelihoods = self._get_weighted_likelihood(likelihoods)
assignments = weighted_likelihoods.argmax(axis=1)
return assignments
def _get_likelihood(self, data):
n_data = data.shape[0]
likelihoods = np.zeros([n_data, self.K])
for c in range(self.K):
likelihoods[:, c] = multivariate_normal.pdf(data, self.means[c], self.covs[c])
return likelihoods
def _get_weighted_likelihood(self, likelihood):
return self.weights * likelihood
def plot(self, data=None, ax=None, holdon=False):
"""Plot contour for 2D data."""
if not (len(self.X.shape) == 2 and self.X.shape[1] == 2):
raise AttributeError("Only support for visualizing 2D data.")
if ax is None:
_, ax = plt.subplots()
if data is None:
data = self.X
assignments = self.assignments
else:
assignments = self.predict(data)
COLOR = "bgrcmyk"
cmap = lambda assignment: COLOR[int(assignment) % len(COLOR)]
# generate grid
delta = 0.025
margin = 0.2
xmax, ymax = self.X.max(axis=0) + margin
xmin, ymin = self.X.min(axis=0) - margin
axis_X, axis_Y = np.meshgrid(np.arange(xmin, xmax, delta), np.arange(ymin, ymax, delta))
def grid_gaussian_pdf(mean, cov):
grid_array = np.array(list(zip(axis_X.flatten(), axis_Y.flatten())))
return multivariate_normal.pdf(grid_array, mean, cov).reshape(axis_X.shape)
# plot scatters
if assignments is None:
c = None
else:
c = [cmap(assignment) for assignment in assignments]
ax.scatter(data[:, 0], data[:, 1], c=c)
# plot contours
for assignment in range(self.K):
ax.contour(
axis_X,
axis_Y,
grid_gaussian_pdf(self.means[assignment], self.covs[assignment]),
colors=cmap(assignment),
)
if not holdon:
plt.show()
```
#### File: neuralnet/tests/test_activations.py
```python
import sys
import numpy as np
from mla.neuralnet.activations import *
def test_softplus():
# np.exp(z_max) will overflow
z_max = np.log(sys.float_info.max) + 1.0e10
# 1.0 / np.exp(z_min) will overflow
z_min = np.log(sys.float_info.min) - 1.0e10
inputs = np.array([0.0, 1.0, -1.0, z_min, z_max])
# naive implementation of np.log(1 + np.exp(z_max)) will overflow
# naive implementation of z + np.log(1 + 1 / np.exp(z_min)) will
# throw ZeroDivisionError
outputs = np.array([np.log(2.0), np.log1p(np.exp(1.0)), np.log1p(np.exp(-1.0)), 0.0, z_max])
assert np.allclose(outputs, softplus(inputs))
``` |
{
"source": "31337mbf/yapo",
"score": 3
} |
#### File: yapo/cifrum/_instance.py
```python
from typing import List, Dict, Union, Optional
import numpy as np
import pandas as pd
from contracts import contract
from ._portfolio.currency import PortfolioCurrencyFactory, PortfolioCurrency
from ._portfolio.portfolio import Portfolio, PortfolioAsset, PortfolioItemsFactory
from ._search import _Search
from ._sources.registries import FinancialSymbolsRegistry
from .common.enums import Currency, SecurityType
from .common.financial_symbol import FinancialSymbol
from .common.financial_symbol_id import FinancialSymbolId
class Cifrum:
def __init__(self,
financial_symbols_registry: FinancialSymbolsRegistry,
portfolio_currency_factory: PortfolioCurrencyFactory,
portfolio_items_factory: PortfolioItemsFactory,
search: _Search):
self.portfolio_currency_factory = portfolio_currency_factory
self.portfolio_items_factory = portfolio_items_factory
self.financial_symbols_registry = financial_symbols_registry
self.__search = search
self.__period_lowest = '1900-1'
self.__period_highest = lambda: str(pd.Period.now(freq='M'))
def information(self, **kwargs) -> Union[Optional[FinancialSymbol], List[Optional[FinancialSymbol]]]:
"""
Fetches financial symbol information based on internal ID.
The information includes ISIN, short and long
names, exchange, currency, etc.
:param kwargs:
either `name` or `names` should be defined
name (str): name of a financial symbol
names (List[str]): names of financial symbols
:returns: financial symbol information
"""
if 'name' in kwargs:
name = kwargs['name']
financial_symbol_id = FinancialSymbolId.parse(name)
finsym_info = self.financial_symbols_registry.get(financial_symbol_id)
return finsym_info
elif 'names' in kwargs:
names = kwargs['names']
finsym_infos: List[Optional[FinancialSymbol]] = []
for name in names:
finsym_info1 = self.information(name=name)
if not (finsym_info1 is None or isinstance(finsym_info1, FinancialSymbol)):
raise ValueError('Unexpected type of financial symbol information')
finsym_infos.append(finsym_info1)
return finsym_infos
else:
raise Exception('Unexpected state of kwargs')
def portfolio_asset(self,
currency: str = None,
start_period: str = None, end_period: str = None,
**kwargs) -> Union[PortfolioAsset, List[PortfolioAsset], None]:
if start_period is None:
start_period = self.__period_lowest
if end_period is None:
end_period = self.__period_highest()
if 'name' in kwargs:
start_period = pd.Period(start_period, freq='M')
end_period = pd.Period(end_period, freq='M')
name: str = kwargs['name']
finsym_info = self.information(name=name)
if finsym_info is None:
return None
if not isinstance(finsym_info, FinancialSymbol):
raise ValueError('Unexpected type of financial symbol information')
if currency is None:
currency_enum: Currency = finsym_info.currency
else:
currency_enum = Currency.__dict__[currency.upper()] # type: ignore
allowed_security_types = {SecurityType.STOCK_ETF, SecurityType.MUT,
SecurityType.CURRENCY, SecurityType.INDEX}
assert finsym_info.security_type in allowed_security_types
a = self.portfolio_items_factory.new_asset(symbol=finsym_info,
start_period=start_period, end_period=end_period,
currency=currency_enum)
return a
elif 'names' in kwargs:
names: List[str] = kwargs['names']
assets: List[PortfolioAsset] = []
for name in names:
pa = self.portfolio_asset(name=name,
start_period=start_period, end_period=end_period,
currency=currency)
if pa is None:
continue
if not isinstance(pa, PortfolioAsset):
raise ValueError('Unexpected type of portfolio asset')
assets.append(pa)
return assets
else:
raise ValueError('Unexpected state of `kwargs`. Either `name`, or `names` should be given')
pass
@contract(
assets='dict[N](str: float|int,>0), N>0',
)
def portfolio(self,
assets: Dict[str, float],
currency: str,
start_period: str = None, end_period: str = None) -> Portfolio:
"""
:param assets: list of RostSber IDs. Supported security types: stock/ETF, MUT, Currency
:param start_period: preferred period to start
:param end_period: preferred period to end
:param currency: common currency for all assets
:return: returns instance of portfolio
"""
if start_period is None:
start_period = self.__period_lowest
if end_period is None:
end_period = self.__period_highest()
names = list(assets.keys())
assets_resolved = \
self.portfolio_asset(names=names,
start_period=str(pd.Period(start_period, freq='M') - 1),
end_period=end_period,
currency=currency)
if not isinstance(assets_resolved, list):
raise ValueError('`assets_resolved` should be list')
asset2weight_dict: Dict[PortfolioAsset, float] = \
{a: assets[a.symbol.identifier.format()] for a in assets_resolved}
weights_sum: float = \
np.abs(np.fromiter(asset2weight_dict.values(), dtype=float, count=len(asset2weight_dict)).sum())
if np.abs(weights_sum - 1.) > 1e-3:
asset2weight_dict = {a: (w / weights_sum) for a, w in asset2weight_dict.items()}
start_period = pd.Period(start_period, freq='M')
end_period = pd.Period(end_period, freq='M')
currency_enum: Currency = Currency.__dict__[currency.upper()] # type: ignore
portfolio_instance = \
self.portfolio_items_factory.new_portfolio(assets_to_weight=asset2weight_dict,
start_period=start_period, end_period=end_period,
currency=currency_enum)
return portfolio_instance
def available_names(self, **kwargs):
"""
Returns the list of registered financial symbols names
:param kwargs:
either `namespace`, or `namespaces`, or nothing should be provided
namespace (str): namespace of financial symbols
namespaces (List[str]): a list of namespaces of financial symbols
DEFAULT: returns the list of all registered namespaces
:returns: (List[str]) list of financial symbols full names
"""
if 'namespace' in kwargs:
namespace = kwargs['namespace']
return self.financial_symbols_registry.get_all_infos(namespace)
elif 'namespaces' in kwargs:
namespaces = kwargs['namespaces']
assert isinstance(namespaces, list)
return [name
for namespace in namespaces
for name in self.available_names(namespace=namespace)]
else:
return self.financial_symbols_registry.namespaces()
def search(self, query: str, top=10):
return self.__search.perform(query, top)
def inflation(self, currency: str, kind: str,
end_period: str = None,
start_period: str = None, years_ago: int = None):
currency_enum: Currency = Currency.__dict__[currency.upper()] # type: ignore
pc = self.portfolio_currency_factory.new(currency=currency_enum)
if start_period:
start_period = pd.Period(start_period, freq='M')
elif years_ago is None:
start_period = pc.period_min
end_period = pd.Period(end_period, freq='M') if end_period else pc.period_max
inflation_ts = pc.inflation(kind=kind,
start_period=start_period, end_period=end_period,
years_ago=years_ago)
return inflation_ts
def currency(self, currency: str) -> PortfolioCurrency:
currency_enum: Currency = Currency.__dict__[currency.upper()] # type: ignore
pc = self.portfolio_currency_factory.new(currency=currency_enum)
return pc
```
#### File: cifrum/_portfolio/portfolio.py
```python
import copy
import datetime as dtm
from textwrap import dedent
from typing import List, Dict, Optional
import dateutil.relativedelta
import numpy as np
import pandas as pd
from contracts import contract
from .._portfolio.currency import PortfolioCurrency, PortfolioCurrencyFactory
from .._settings import _MONTHS_PER_YEAR
from .._sources.registries import CurrencySymbolsRegistry
from ..common.enums import Currency, Period
from ..common.financial_symbol import FinancialSymbol
from ..common.time_series import TimeSeries, TimeSeriesKind
class PortfolioAsset:
def __init__(self,
currency_symbols_registry: CurrencySymbolsRegistry,
portfolio_items_factory: 'PortfolioItemsFactory',
symbol: FinancialSymbol,
start_period: pd.Period, end_period: pd.Period, currency: PortfolioCurrency,
portfolio: Optional['Portfolio'],
weight: Optional[float]):
if (end_period - start_period).n < 2:
raise ValueError('period range should be at least 2 months')
self.portfolio_items_factory = portfolio_items_factory
self.symbol = symbol
self.currency = currency
self.currency_symbols_registry = currency_symbols_registry
self._portfolio = portfolio
self._weight = weight
datetime_now = dtm.datetime.now()
if (datetime_now + dtm.timedelta(days=1)).month == datetime_now.month:
datetime_now -= dateutil.relativedelta.relativedelta(months=1)
period_now = pd.Period(datetime_now, freq='M')
self._period_min = max(
pd.Period(self.symbol.start_period, freq='M'),
self.currency.period_min,
start_period,
)
self._period_max = min(
pd.Period(self.symbol.end_period, freq='M'),
period_now,
self.currency.period_max,
end_period,
)
if self._period_min >= self._period_max:
raise ValueError('`self._period_min` must not be >= `self._period_max`')
currency_conversion_rate = \
self.__currency_conversion_rate(currency_to=self.currency.value)
self._period_min = max(self._period_min, currency_conversion_rate.start_period)
self._period_max = min(self._period_max, currency_conversion_rate.end_period)
self.__values = self.__transform_values_according_to_period()
@property
def portfolio(self):
return self._portfolio
@property
def weight(self):
return self._weight
def __transform_values_according_to_period(self):
vals = self.symbol.values(start_period=self._period_min, end_period=self._period_max)
if len(vals['period']) > 0:
self._period_min = max(self._period_min, vals['period'].min())
self._period_max = min(self._period_max, vals['period'].max())
# TODO: okama_dev-98
if self.symbol.period == Period.DECADE:
ts = TimeSeries(values=vals['rate'].values,
start_period=self._period_min, end_period=self._period_max,
kind=TimeSeriesKind.DIFF)
else:
ts = TimeSeries(values=vals['close'].values,
start_period=self._period_min, end_period=self._period_max,
kind=TimeSeriesKind.VALUES)
currency_conversion_rate = self.__currency_conversion_rate(currency_to=self.currency.value)
ts = ts * currency_conversion_rate
return ts
def __currency_conversion_rate(self, currency_to: Currency):
currency_from = self.symbol.currency
currency_rate = self.currency_symbols_registry \
.convert(currency_from=currency_from,
currency_to=currency_to,
start_period=self._period_min,
end_period=self._period_max)
currency_rate = TimeSeries(values=currency_rate['close'].values,
start_period=currency_rate['period'].min(),
end_period=currency_rate['period'].max(),
kind=TimeSeriesKind.CURRENCY_RATE)
return currency_rate
def close(self):
return copy.deepcopy(self.__values)
def get_return(self, kind='values', real=False):
if kind not in ['values', 'cumulative', 'ytd']:
raise ValueError('`kind` is not in expected values')
if kind == 'ytd':
ror = self.get_return(kind='values', real=real)
ror_ytd = ror.ytd()
return ror_ytd
ror = self.close().pct_change()
if real:
inflation = self.inflation(kind='values')
ror = (ror + 1.) / (inflation + 1.) - 1.
if kind == 'cumulative':
ror = (ror + 1.).cumprod() - 1.
return ror
def risk(self, period='year'):
"""
Returns risk of the asset
:param period:
month - returns monthly risk
year - returns risk approximated to yearly value
"""
p = self.portfolio_items_factory.new_portfolio(assets_to_weight={self: 1.},
start_period=self._period_min,
end_period=self._period_max,
currency=self.currency.value)
return p.risk(period=period)
@contract(
years_ago='int,>0|None|list[int,>0]',
real='bool',
)
def cagr(self, years_ago=None, real=False):
p = self.portfolio_items_factory.new_portfolio(assets_to_weight={self: 1.},
start_period=self._period_min,
end_period=self._period_max,
currency=self.currency.value)
return p.cagr(years_ago=years_ago, real=real)
def inflation(self, kind: str, years_ago: int = None):
ror = self.get_return()
start_period = None if years_ago else ror.start_period
return self.currency.inflation(kind=kind,
start_period=start_period, end_period=ror.end_period,
years_ago=years_ago)
def __repr__(self):
asset_repr = """\
PortfolioAsset(
symbol: {},
currency: {},
period_min: {},
period_max: {}
)""".format(self.symbol.identifier, self.currency,
self._period_min, self._period_max)
return dedent(asset_repr)
class Portfolio:
def __init__(self,
portfolio_items_factory: 'PortfolioItemsFactory',
assets: List[PortfolioAsset],
weights: List[float],
start_period: pd.Period, end_period: pd.Period,
currency: PortfolioCurrency):
"""
:param start_period: start period of first order diff
:param end_period: end period of first order diff
"""
if (end_period - start_period).n < 2:
raise ValueError('period range should be at least 2 months')
self.weights = weights
self.currency = currency
self._period_min = max(
self.currency.period_min,
*[a._period_min for a in assets],
start_period,
)
self._period_max = min(
self.currency.period_max,
*[a._period_max for a in assets],
end_period,
)
self._assets = [portfolio_items_factory.new_asset(symbol=a.symbol,
start_period=self._period_min,
end_period=self._period_max,
currency=currency.value,
portfolio=self,
weight=w) for a, w in zip(assets, weights)]
assert(len(self._assets) > 0)
@property
def assets(self) -> Dict[str, PortfolioAsset]:
assets_dict = {a.symbol.identifier_str: a for a in self._assets}
return assets_dict
def risk(self, period='year'):
"""
Returns risk of the asset
:param period:
month - returns monthly risk
year - returns risk approximated to yearly value
"""
if period == 'month':
ror = self.get_return()
return ror.std()
elif period == 'year':
ror = self.get_return()
if ror.period_size < 12:
raise Exception('year risk is requested for less than 12 months')
mean = (1. + ror).mean()
risk_monthly = self.risk(period='month')
risk_yearly = ((risk_monthly ** 2 + mean ** 2) ** 12 - mean ** 24).sqrt()
return risk_yearly
else:
raise Exception('unexpected value of `period` {}'.format(period))
@contract(
years_ago='int,>0|None|list[int,>0]',
real='bool',
)
def cagr(self, years_ago=None, real=False):
if years_ago is None:
ror = self.get_return()
years_total = ror.period_size / _MONTHS_PER_YEAR
ror_c = (ror + 1.).prod()
cagr = ror_c ** (1 / years_total) - 1.
if real:
inflation_cumulative = self.inflation(kind='cumulative')
cagr = (cagr + 1.) / (inflation_cumulative + 1.) ** (1 / years_total) - 1.
return cagr
elif isinstance(years_ago, int):
ror = self.get_return()
months_count = years_ago * _MONTHS_PER_YEAR
if ror.period_size < months_count:
return self.cagr(years_ago=None, real=real)
ror_slice = self.get_return()[-months_count:]
ror_slice_c = (ror_slice + 1.).prod()
cagr = ror_slice_c ** (1 / years_ago) - 1.
if real:
inflation_cumulative = self.inflation(kind='cumulative',
years_ago=years_ago)
cagr = (cagr + 1.) / (inflation_cumulative + 1.) ** (1 / years_ago) - 1.
return cagr
else:
raise Exception('unexpected type of `years_ago`: {}'.format(years_ago))
def get_return(self, kind='values', real=False) -> TimeSeries:
if kind not in ['values', 'cumulative', 'ytd']:
raise ValueError('`kind` is not in expected values')
if kind == 'ytd':
ror_assets = np.array([a.get_return(kind=kind, real=real) for a in self._assets])
ror = (ror_assets * self.weights).sum()
return ror
ror_assets = np.array([a.get_return() for a in self._assets])
ror = (ror_assets * self.weights).sum()
if real:
inflation = self.inflation(kind='values')
ror = (ror + 1.) / (inflation + 1.) - 1.
if kind == 'cumulative':
ror = (ror + 1.).cumprod() - 1.
return ror
def inflation(self, kind: str, years_ago: int = None):
ror = self.get_return()
start_period = None if years_ago else ror.start_period
return self.currency.inflation(kind=kind,
start_period=start_period,
end_period=ror.end_period,
years_ago=years_ago)
def __repr__(self):
assets_repr = ', '.join(asset.symbol.identifier.__repr__() for asset in self._assets)
portfolio_repr = """\
Portfolio(
assets: {},
currency: {},
)""".format(assets_repr, self.currency)
return dedent(portfolio_repr)
class PortfolioItemsFactory:
def __init__(self, portfolio_currency_factory: PortfolioCurrencyFactory,
currency_symbols_registry: CurrencySymbolsRegistry):
self.portfolio_currency_factory = portfolio_currency_factory
self.currency_symbols_registry = currency_symbols_registry
def new_asset(self, symbol: FinancialSymbol,
start_period: pd.Period, end_period: pd.Period, currency: Currency,
portfolio: Optional[Portfolio] = None,
weight: Optional[float] = None):
pc = self.portfolio_currency_factory.new(currency=currency)
pa = PortfolioAsset(self.currency_symbols_registry,
symbol=symbol,
start_period=start_period, end_period=end_period,
currency=pc,
portfolio=portfolio,
weight=weight,
portfolio_items_factory=self)
return pa
def new_portfolio(self,
assets_to_weight: Dict[PortfolioAsset, float],
start_period: pd.Period, end_period: pd.Period,
currency: Currency):
pc = self.portfolio_currency_factory.new(currency=currency)
p = Portfolio(portfolio_items_factory=self,
assets=list(assets_to_weight.keys()),
weights=list(assets_to_weight.values()),
start_period=start_period, end_period=end_period,
currency=pc)
return p
```
#### File: cifrum/_sources/all_sources.py
```python
from abc import ABCMeta, abstractmethod
from typing import List
from .._sources.base_classes import FinancialSymbolsSource
from .._sources.inflation_source import InflationSource
from .._sources.micex_stocks_source import MicexStocksSource
from .._sources.moex_indexes_source import MoexIndexesSource
from .._sources.mutru_funds_source import MutualFundsRuSource
from .._sources.okama_source import OkamaSource
from .._sources.us_data_source import UsDataSource
from .._sources.single_financial_symbol_source import CbrCurrenciesSource, CbrTopRatesSource
from .._sources.yahoo_indexes_source import YahooIndexesSource
class SymbolSources(metaclass=ABCMeta):
@property
@abstractmethod
def sources(self) -> List[FinancialSymbolsSource]:
raise NotImplementedError()
class AllSymbolSources(SymbolSources):
def __init__(self,
cbr_currencies_source: CbrCurrenciesSource,
cbr_top_rates_source: CbrTopRatesSource,
inflation_source: InflationSource,
micex_stocks_source: MicexStocksSource,
moex_indexes_source: MoexIndexesSource,
mutual_funds_ru_source: MutualFundsRuSource,
us_data_source: UsDataSource,
okama_source: OkamaSource,
yahoo_indexes_source: YahooIndexesSource):
self.cbr_currencies_source = cbr_currencies_source
self.cbr_top_rates_source = cbr_top_rates_source
self.inflation_source = inflation_source
self.micex_stocks_source = micex_stocks_source
self.moex_indexes_source = moex_indexes_source
self.mutual_funds_ru_source = mutual_funds_ru_source
self.us_data_source = us_data_source
self.okama_source = okama_source
self.yahoo_indexes_source = yahoo_indexes_source
@property
def sources(self):
return [
self.cbr_currencies_source,
self.cbr_top_rates_source,
self.inflation_source,
self.micex_stocks_source,
self.moex_indexes_source,
self.mutual_funds_ru_source,
self.us_data_source,
self.okama_source,
self.yahoo_indexes_source,
]
```
#### File: swagger_client/api/adjusted_values_api.py
```python
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class AdjustedValuesApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def adjusted_close_values(self, registration_number, currency, start_date, end_date, period_frequency, interpolation_type, **kwargs): # noqa: E501
"""Returns adjusted close values of a mutual fund by registrationNumber # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.adjusted_close_values(registration_number, currency, start_date, end_date, period_frequency, interpolation_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str registration_number: (required)
:param str currency: (required)
:param str start_date: (required)
:param str end_date: (required)
:param str period_frequency: (required)
:param str interpolation_type: (required)
:return: ModelsRawValues
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.adjusted_close_values_with_http_info(registration_number, currency, start_date, end_date, period_frequency, interpolation_type, **kwargs) # noqa: E501
else:
(data) = self.adjusted_close_values_with_http_info(registration_number, currency, start_date, end_date, period_frequency, interpolation_type, **kwargs) # noqa: E501
return data
def adjusted_close_values_with_http_info(self, registration_number, currency, start_date, end_date, period_frequency, interpolation_type, **kwargs): # noqa: E501
"""Returns adjusted close values of a mutual fund by registrationNumber # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.adjusted_close_values_with_http_info(registration_number, currency, start_date, end_date, period_frequency, interpolation_type, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str registration_number: (required)
:param str currency: (required)
:param str start_date: (required)
:param str end_date: (required)
:param str period_frequency: (required)
:param str interpolation_type: (required)
:return: ModelsRawValues
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registration_number', 'currency', 'start_date', 'end_date', 'period_frequency', 'interpolation_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method adjusted_close_values" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registration_number' is set
if ('registration_number' not in params or
params['registration_number'] is None):
raise ValueError("Missing the required parameter `registration_number` when calling `adjusted_close_values`") # noqa: E501
# verify the required parameter 'currency' is set
if ('currency' not in params or
params['currency'] is None):
raise ValueError("Missing the required parameter `currency` when calling `adjusted_close_values`") # noqa: E501
# verify the required parameter 'start_date' is set
if ('start_date' not in params or
params['start_date'] is None):
raise ValueError("Missing the required parameter `start_date` when calling `adjusted_close_values`") # noqa: E501
# verify the required parameter 'end_date' is set
if ('end_date' not in params or
params['end_date'] is None):
raise ValueError("Missing the required parameter `end_date` when calling `adjusted_close_values`") # noqa: E501
# verify the required parameter 'period_frequency' is set
if ('period_frequency' not in params or
params['period_frequency'] is None):
raise ValueError("Missing the required parameter `period_frequency` when calling `adjusted_close_values`") # noqa: E501
# verify the required parameter 'interpolation_type' is set
if ('interpolation_type' not in params or
params['interpolation_type'] is None):
raise ValueError("Missing the required parameter `interpolation_type` when calling `adjusted_close_values`") # noqa: E501
collection_formats = {}
path_params = {}
if 'registration_number' in params:
path_params['registrationNumber'] = params['registration_number'] # noqa: E501
query_params = []
if 'currency' in params:
query_params.append(('currency', params['currency'])) # noqa: E501
if 'start_date' in params:
query_params.append(('startDate', params['start_date'])) # noqa: E501
if 'end_date' in params:
query_params.append(('endDate', params['end_date'])) # noqa: E501
if 'period_frequency' in params:
query_params.append(('periodFrequency', params['period_frequency'])) # noqa: E501
if 'interpolation_type' in params:
query_params.append(('interpolationType', params['interpolation_type'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/adjusted-values/mut-ru/{registrationNumber}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ModelsRawValues', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
```
#### File: swagger_client/models/models_mutual_fund_ru_info.py
```python
import pprint
import re # noqa: F401
import six
class ModelsMutualFundRuInfo(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'registration_number': 'str',
'company_name': 'str',
'date_start': 'datetime',
'date_end': 'datetime'
}
attribute_map = {
'name': 'name',
'registration_number': 'registrationNumber',
'company_name': 'companyName',
'date_start': 'dateStart',
'date_end': 'dateEnd'
}
def __init__(self, name=None, registration_number=None, company_name=None, date_start=None, date_end=None): # noqa: E501
"""ModelsMutualFundRuInfo - a model defined in Swagger""" # noqa: E501
self._name = None
self._registration_number = None
self._company_name = None
self._date_start = None
self._date_end = None
self.discriminator = None
self.name = name
self.registration_number = registration_number
self.company_name = company_name
self.date_start = date_start
self.date_end = date_end
@property
def name(self):
"""Gets the name of this ModelsMutualFundRuInfo. # noqa: E501
:return: The name of this ModelsMutualFundRuInfo. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ModelsMutualFundRuInfo.
:param name: The name of this ModelsMutualFundRuInfo. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def registration_number(self):
"""Gets the registration_number of this ModelsMutualFundRuInfo. # noqa: E501
:return: The registration_number of this ModelsMutualFundRuInfo. # noqa: E501
:rtype: str
"""
return self._registration_number
@registration_number.setter
def registration_number(self, registration_number):
"""Sets the registration_number of this ModelsMutualFundRuInfo.
:param registration_number: The registration_number of this ModelsMutualFundRuInfo. # noqa: E501
:type: str
"""
if registration_number is None:
raise ValueError("Invalid value for `registration_number`, must not be `None`") # noqa: E501
self._registration_number = registration_number
@property
def company_name(self):
"""Gets the company_name of this ModelsMutualFundRuInfo. # noqa: E501
:return: The company_name of this ModelsMutualFundRuInfo. # noqa: E501
:rtype: str
"""
return self._company_name
@company_name.setter
def company_name(self, company_name):
"""Sets the company_name of this ModelsMutualFundRuInfo.
:param company_name: The company_name of this ModelsMutualFundRuInfo. # noqa: E501
:type: str
"""
if company_name is None:
raise ValueError("Invalid value for `company_name`, must not be `None`") # noqa: E501
self._company_name = company_name
@property
def date_start(self):
"""Gets the date_start of this ModelsMutualFundRuInfo. # noqa: E501
:return: The date_start of this ModelsMutualFundRuInfo. # noqa: E501
:rtype: datetime
"""
return self._date_start
@date_start.setter
def date_start(self, date_start):
"""Sets the date_start of this ModelsMutualFundRuInfo.
:param date_start: The date_start of this ModelsMutualFundRuInfo. # noqa: E501
:type: datetime
"""
if date_start is None:
raise ValueError("Invalid value for `date_start`, must not be `None`") # noqa: E501
self._date_start = date_start
@property
def date_end(self):
"""Gets the date_end of this ModelsMutualFundRuInfo. # noqa: E501
:return: The date_end of this ModelsMutualFundRuInfo. # noqa: E501
:rtype: datetime
"""
return self._date_end
@date_end.setter
def date_end(self, date_end):
"""Sets the date_end of this ModelsMutualFundRuInfo.
:param date_end: The date_end of this ModelsMutualFundRuInfo. # noqa: E501
:type: datetime
"""
if date_end is None:
raise ValueError("Invalid value for `date_end`, must not be `None`") # noqa: E501
self._date_end = date_end
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ModelsMutualFundRuInfo, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ModelsMutualFundRuInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: api_swagger_client/test/test_models_mutual_fund_ru_info.py
```python
from __future__ import absolute_import
import unittest
import swagger_client
from models.models_mutual_fund_ru_info import ModelsMutualFundRuInfo # noqa: E501
from swagger_client.rest import ApiException
class TestModelsMutualFundRuInfo(unittest.TestCase):
"""ModelsMutualFundRuInfo unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testModelsMutualFundRuInfo(self):
"""Test ModelsMutualFundRuInfo"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.models_mutual_fund_ru_info.ModelsMutualFundRuInfo() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
```
#### File: yapo/tests/conftest.py
```python
import pytest
from hamcrest.core.base_matcher import BaseMatcher
decimal_places = 4
delta = 10 ** (-decimal_places)
class ListIsSorted(BaseMatcher):
def __init__(self):
pass
def matches(self, item, mismatch_description=None):
if not isinstance(item, list):
return False
return item == sorted(item)
def describe_to(self, description):
description.append_text('list was not sorted')
def sorted_asc():
return ListIsSorted()
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true", default=False, help="run slow tests")
def pytest_collection_modifyitems(config, items):
if not config.getoption("--runslow", default=False):
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if 'slow' in item.keywords:
item.add_marker(skip_slow)
``` |
{
"source": "31415us/linda-lidar-rangefinder-playground",
"score": 3
} |
#### File: linda/tests/test_QuadraticRegression.py
```python
import unittest
import numpy as np
from linda.QuadraticRegression import quadratic_regression, Gaussian
class QuadraticRegressionTest(unittest.TestCase):
def test_simple_parabola(self):
x_vals = np.array([i * 0.1 for i in range(-100, 100)])
y_vals = x_vals * x_vals
prior = None
true_param = np.array([0.0, 0.0, 1.0])
posterior = quadratic_regression(prior, x_vals, y_vals)
np.testing.assert_allclose(posterior.mean, true_param, rtol=1e-5, atol=1e-5)
def test_general_parabola(self):
a = 3.0
b = 2.0
c = 1.0
true_mean = np.array([c, b, a])
x_vals = np.array([i * 0.1 for i in range(-100, 100)])
y_vals = a * x_vals * x_vals + b * x_vals + c * np.ones(x_vals.size)
prior = None
posterior = quadratic_regression(prior, x_vals, y_vals)
np.testing.assert_allclose(posterior.mean, true_mean, rtol=1e-5, atol=1e-5)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "3143ylc279144/python",
"score": 2
} |
#### File: appium_automation/Basic/driver.py
```python
from selenium import webdriver
# apk参数
def init_driver():
desired_caps = {}
# 手机 系统信息
desired_caps['platformName'] = 'Android' # 设置平台
desired_caps['platformVersion'] = '10.0' # 系统版本
# 设备号
desired_caps['deviceName'] = '61359515231666' # 设备id
# 包名
desired_caps['appPackage'] = '' # 包名
# 启动名
desired_caps['appActivity'] = '' # 启动的activity
desired_caps['automationName'] = 'Uiautomator2'
# 允许输入中文
# desired_caps['unicodeKeyboard'] = True
# desired_caps['resetKeyboard'] = True
# desired_caps['autoGrantPermissions'] = True
desired_caps['noReset'] = False
# 手机驱动对象
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
return driver
def driver_weixin():
desired_caps = {}
# 手机 系统信息
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '10.0'
# 设备号
desired_caps['deviceName'] = '61359515231666'
# 包名
desired_caps['appPackage'] = 'com.tencent.mm'
# 启动名
desired_caps['appActivity'] = '.ui.LauncherUI'
# desired_caps['automationName'] = 'Uiautomator2'
# 允许输入中文
desired_caps['unicodeKeyboard'] = True
desired_caps['resetKeyboard'] = True
desired_caps['noReset'] = True
# desired_caps["newCommandTimeout"] = 30
# desired_caps['fullReset'] = 'false'
# desired_caps['newCommandTimeout'] = 10
# desired_caps['recreateChromeDriverSessions'] = True
desired_caps['chromeOptions'] = {'androidProcess': 'com.tencent.mm:tools'}
# 手机驱动对象
driver = webdriver.Remote("http://1192.168.127.12:4723/wd/hub", desired_caps)
return driver
# def tearDown(self):
# self.driver.quit() #case执行完退出
```
#### File: appium_automation/Basic/get_data.py
```python
import os
import yaml
def getData(funcname, file):
PATH = os.getcwd() + os.sep
with open(PATH + 'Data/' + file + '.yaml', 'r', encoding="utf8") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
# 1 先将我们获取到的所有数据都存放在一个变量当中
tmpdata = data[funcname]
# 2 所以此时我们需要使用循环走进它的内心。
res_arr = list()
for value in tmpdata.values():
tmp_arr = list()
for j in value.values():
tmp_arr.append(j)
res_arr.append(tmp_arr)
return res_arr
```
#### File: appium_automation/Page/AuthorityPage.py
```python
from appium.webdriver.common import mobileby
from Basic.base import Base
class AuthorityPage(Base):
# 欢迎页面,authority:权限框元素及方法,该类继承自BasePage类
by = mobileby.MobileBy()
# 授权框-》允许 按钮
allow_button = (by.ID, 'com.android.permissioncontroller:id/permission_allow_button')
# 授权框-》拒绝 按钮
deny_button = (by.ID, 'com.android.permissioncontroller:id/permission_deny_button')
# 授权框-》文案 按钮
authority_text = (by.ID, 'com.android.permissioncontroller:id/permission_message')
# 点击 授权框-》允许 按钮
def click_authority_allow_button(self):
self.driver.find_element(*self.allow_button).click()
# 点击 授权框-》取消 按钮
def click_authority_deny_button(self):
self.driver.find_element(*self.deny_button).click()
# 验证授权框文案
def check_authority_text(self,text):
self.driver.find_element(*self.authority_text).text()
```
#### File: xmind_to_excel/src/xmindtoxls.py
```python
from xmindparser import xmind_to_dict
import re
import xlwt
class xmind_to_xls():
def xmind_num(self, value):
"""获取xmind标题个数"""
try:
return len(value['topics'])
except KeyError:
return 0
def xmind_title(self,value):
"""获取xmind标题内容"""
return value['title']
def xmind_cat(self, filename):
'''调试函数,打印内容用的'''
self.out = xmind_to_dict(filename)
self.story = self.out[0]['topic']['topics']
self.num = len(self.story)
print(self.out)
print(self.out[0]['topic']['title'])
return self.story, self.num
def write_excel(self, xmind_file, servicename='', editionname='', performer=''):
'''生成excel文件函数'''
self.f = xlwt.Workbook()
self.sheet1 = self.f.add_sheet('sheet1',cell_overwrite_ok=True)
self.row0 = ["storyid", '需求名称', '功能模块', '测试用例名称', '前置条件', '执行步骤', '期望结果', '服务名称', '版本', '维护人']
#生成第一行
for i in range(0, len(self.row0)):
self.sheet1.write(0, i, self.row0[i])
self.out = xmind_to_dict(xmind_file)
self.xls_name = self.out[0]['topic']['title']
self.story = self.out[0]['topic']['topics']
self.storynum = len(self.story)
j = 1 # 用例计算器
z = 0 # 用例结果数计数器
for i in range(0, self.storynum):
self.storyname = self.story[i]['title']
print(self.storyname)
self.regex_str = ".*[\[【](.+?)[\]】].*"
self.storyid_reg = re.match(self.regex_str, self.storyname)
if self.storyid_reg:
# print(self.storyid_reg) #取出正则解析后的结果
self.storyid = self.storyid_reg.group(1)#正则取出用例编号
print(self.storyid_reg.group(1))
self.testmodel_num = len(self.story[i]['topics'])
for s in range(0,self.testmodel_num):
self.modle_name = self.xmind_title(self.story[i]['topics'][s])
self.testcase_num = self.xmind_num(self.story[i]['topics'][s])
for k in range(0,self.testcase_num): #测试用例数
self.testcase = self.story[i]['topics'][s]['topics'][k]
self.testcase_name = self.xmind_title(self.testcase)
self.testcase_stepnum = self.xmind_num(self.testcase) #每个用例的步骤数量
self.testcase_condition = self.xmind_title(self.testcase['topics'][0]) # 用例预置条件
self.sheet1.write(s+k + i + z + j, 0, self.storyid)
self.sheet1.write(s+k + i + z + j, 1, self.storyname)
self.sheet1.write(s+k + i + z + j, 2, self.modle_name)
self.sheet1.write(s+k + i + z + j, 3, self.testcase_name)
self.sheet1.write(s+k + i + z + j, 4, self.testcase_condition)
self.sheet1.write(s+k + i + z + j, 7, servicename)
self.sheet1.write(s+k + i + z + j, 8, editionname)
self.sheet1.write(s+k + i + z + j, 9, performer)
for x in range(1,self.testcase_stepnum): #测试用例步骤数
self.testcase_step = self.testcase['topics'][x]
self.teststep_title = self.xmind_title(self.testcase_step) #用例步骤名称
self.teststep_num = self.xmind_num(self.testcase_step) #用例步骤个数
if self.teststep_num != 0:
for y in range(0, self.teststep_num):
self.test_results = self.testcase_step['topics'][y]
self.test_result = self.xmind_title(self.test_results)#用例结果
self.sheet1.write(s+k + i + z + j+y, 5, self.teststep_title)
self.sheet1.write(s+k + i + z + j+y, 6, self.test_result)
z = z+y+1
else:
self.test_result = '/'
self.sheet1.write(s+k + i + z + j, 5, self.teststep_title)
self.sheet1.write(s+k + i + z + j, 6, self.test_result)
# z = z
j=j+k
self.f.save(self.xls_name+'.xls') #xls名称取xmind主题名称
if __name__ == '__main__':
xmind_file = "Y:/Documents/软件/软件/xmind_to_excel/xmind模板.xmind" # xmind文件
servicename = 'aa' #服务名称
editionname = 'bb' #版本
performer = 'cc' #执行人员
xmind_to_xls().write_excel(xmind_file, servicename, editionname, performer)
xmind_to_xls().xmind_cat(xmind_file)
```
#### File: python/xmind_to_excel/tkintertoxls.py
```python
import tkinter as tk
from tkinter.filedialog import askopenfilename
from src.xmindtoxls import xmind_to_xls
from tkinter.messagebox import showinfo
import re
# 定义MainUI类表示应用/窗口,继承Frame类
class MainUI(tk.Frame):
# Application构造函数,master为窗口的父控件
def __init__(self, master=None):
# 初始化Application的Frame部分
tk.Frame.__init__(self, master)
# 显示窗口,并使用grid布局
self.grid()
self.path = tk.StringVar()
# 创建控件
self.createWidgets()
def selectPath(self):
'''选择要转换成excel的xmind地址'''
self.path_ = askopenfilename()
self.path.set(self.path_)
# 创建控件
def createWidgets(self):
'''生成gui界面'''
# 创建一个标签,输出要显示的内容
self.firstLabel = tk.Label(self, text="目标路径")
# 设定使用grid布局
self.firstLabel.grid(row = 0, column = 0)
self.firstEntry = tk.Entry(self,textvariable = self.path)
self.firstEntry.grid(row=0, column=1)
# 创建一个按钮,用来触发answer方法
self.clickButton = tk.Button(self, text="路径选择", command=self.selectPath)
# 设定使用grid布局
self.clickButton.grid(row = 0, column = 2)
# 创建一个标签,输入模块
self.secLabel = tk.Label(self, text="模块")
# 设定使用grid布局
self.secLabel.grid(row=1, column=0)
self.module = tk.StringVar()
self.secEntry = tk.Entry(self,textvariable = self.module)
self.secEntry.grid(row=1, column=1)
# 创建一个标签,输入版本号
self.trLabel = tk.Label(self, text="版本号")
# 设定使用grid布局
self.trLabel.grid(row=2, column=0)
self.buildnum = tk.StringVar()
self.trEntry = tk.Entry(self,textvariable = self.buildnum)
self.trEntry.grid(row=2, column=1)
# 创建一个标签,输入执行人
self.fourLabel = tk.Label(self, text="执行人")
# 设定使用grid布局
self.fourLabel.grid(row=3, column=0)
self.owner = tk.StringVar()
self.fourEntry = tk.Entry(self,textvariable = self.owner)
self.fourEntry.grid(row=3, column=1)
# 创建一个提交按钮,用来触发提交方法,获取值
self.clickButton = tk.Button(self, text="提交",command=self.getvalue)
# 设定使用grid布局
self.clickButton.grid(row=4, column=1)
def getvalue(self):
'''获取输入的值,并执行转换excel函数'''
global way,module,buildnum,owner
way = self.path.get()
module = self.module.get()
buildnum = self.buildnum.get()
owner = self.owner.get()
print(way,module,buildnum,owner)
self.regvalue = '.*\.xmind$'
self.xmind_reg = re.match(self.regvalue,way )
if self.xmind_reg:
# xmind转换成xls
self.xmind_to_xls = xmind_to_xls()
self.xmind_to_xls.write_excel(way,module,buildnum,owner)
else:
showinfo(title='提示',message='请选择正确的xmind文件,谢谢!')
# 创建一个MainUI对象
app = MainUI()
# 设置窗口标题
app.master.title('「xmind转xls」')
# 设置窗体大小
app.master.geometry('400x200')
# 主循环开始
app.mainloop()
``` |
{
"source": "314cascio/WebScraping",
"score": 3
} |
#### File: 314cascio/WebScraping/app.py
```python
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
# 2. Create an app, being sure to pass __name__
app = Flask(__name__)
# Use flask_pymongo to set up mongo connection
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
# 3. Define index
@app.route('/')
def index():
# Return the template
mars = mongo.db.mars.find_one()
return render_template('index.html', mars=mars)
# 3. Define scrape route
@app.route('/scrape')
def scrape_data():
mars = mongo.db.mars
mars_data = scrape_mars.scrape_all()
mars.update({}, mars_data, upsert=True)
return "Scraping Successful"
# 4.
if __name__ == "__main__":
app.run()
``` |
{
"source": "314esther/motivation_streamlit",
"score": 4
} |
#### File: 314esther/motivation_streamlit/motivation.py
```python
import streamlit as st
import math
import numpy as np
import pandas as pd
from bokeh.plotting import figure
from bokeh.models import Legend
from bokeh.layouts import column
st.set_page_config(layout='wide')
st.title("Income vs. Motivation")
col1, col2 = st.columns((2,1))
st.markdown("""
<style>
div[data-testid="stHorizontalBlock"] > div:nth-of-type(2) {
background-color: #d9d9d9;
border-radius: 20px;
}
div[data-testid="stBlock"] {
padding: 20px;
}
</style>
""", unsafe_allow_html=True)
def calc_scarcity(income):
#based on annual income figures
if income <= 20000:
return 1
elif income > 20000 and income <= 75000:
return (15/11)-(income/55000)
elif income > 75000:
return 0
def calc_extrinsic_motivation(income, incentive):
#incentive = calc_incentive(income)
raw_motivation = calc_scarcity(income)*(incentive/income)
if raw_motivation >= 1:
return 1
else:
return raw_motivation
def calc_happiness(income, personality):
#income + personality
if income <= 20000:
return 0+(personality*0.5)
elif income > 20000 and income <= 75000:
return (income/55000 - (4/11))*0.5+(personality*0.5)
elif income > 75000:
return 0.5+(personality*0.5)
incentive = col2.slider("Incentive", 5000,20000,10000)
stocastic = col2.checkbox("Make Personality a Stocastic variable")
motivation_data = []
extrinsic_data = []
intrinsic_data = []
for income in range(10000,90000,5000):
if stocastic:
personality = np.random.normal(0.5, 0.125, 1)[0]
else:
personality = 0.5
intrinsic_motivation = calc_happiness(income, personality)
extrinsic_data.append(calc_extrinsic_motivation(income, incentive))
intrinsic_data.append(intrinsic_motivation)
motivation_data.append((calc_extrinsic_motivation(income, incentive)+intrinsic_motivation))
p = figure(x_axis_label="income",y_axis_label="motivation", height=350) #width=700, height=350
motivation = [motivation_data, extrinsic_data, intrinsic_data]
xs = [list(range(20000,80000,5000)), list(range(20000,80000,5000)), list(range(20000,80000,5000))]
mot = p.line(list(range(10000,90000,5000)), motivation_data, color=(255,0,0))
ext = p.line(list(range(10000,90000,5000)), extrinsic_data, color=(0,255,0))
intr = p.line(list(range(10000,90000,5000)), intrinsic_data, color=(0,0,255))
my_legend = Legend(items=[("Total Motivation" , [mot]), ("Extrinsic Motivation", [ext]), ("Intrinsic Motivation", [intr])], orientation="horizontal")
p.add_layout(my_legend, 'below')
column([p], sizing_mode="stretch_width")
col1.bokeh_chart(p)
``` |
{
"source": "314eter/subliminal",
"score": 2
} |
#### File: subliminal/providers/legendastv.py
```python
import io
import json
import logging
import os
import re
from babelfish import Language, language_converters
from datetime import datetime, timedelta
from dogpile.cache.api import NO_VALUE
from guessit import guessit
import pytz
import rarfile
from rarfile import RarFile, is_rarfile
from rebulk.loose import ensure_list
from requests import Session
from zipfile import ZipFile, is_zipfile
from . import ParserBeautifulSoup, Provider
from ..cache import SHOW_EXPIRATION_TIME, region
from ..exceptions import AuthenticationError, ConfigurationError, ProviderError, ServiceUnavailable
from ..matches import guess_matches
from ..subtitle import SUBTITLE_EXTENSIONS, Subtitle, fix_line_ending
from ..utils import sanitize
from ..video import Episode, Movie
logger = logging.getLogger(__name__)
language_converters.register('legendastv = subliminal.converters.legendastv:LegendasTVConverter')
# Configure :mod:`rarfile` to use the same path separator as :mod:`zipfile`
rarfile.PATH_SEP = '/'
#: Conversion map for types
type_map = {'M': 'movie', 'S': 'episode', 'C': 'episode'}
#: BR title season parsing regex
season_re = re.compile(r' - (?P<season>\d+)(\xaa|a|st|nd|rd|th) (temporada|season)', re.IGNORECASE)
#: Downloads parsing regex
downloads_re = re.compile(r'(?P<downloads>\d+) downloads')
#: Rating parsing regex
rating_re = re.compile(r'nota (?P<rating>\d+)')
#: Timestamp parsing regex
timestamp_re = re.compile(r'(?P<day>\d+)/(?P<month>\d+)/(?P<year>\d+) - (?P<hour>\d+):(?P<minute>\d+)')
#: Title with year/country regex
title_re = re.compile(r'^(?P<series>.*?)(?: \((?:(?P<year>\d{4})|(?P<country>[A-Z]{2}))\))?$')
#: Cache key for releases
releases_key = __name__ + ':releases|{archive_id}|{archive_name}'
class LegendasTVArchive(object):
"""LegendasTV Archive.
:param str id: identifier.
:param str name: name.
:param bool pack: contains subtitles for multiple episodes.
:param bool pack: featured.
:param str link: link.
:param int downloads: download count.
:param int rating: rating (0-10).
:param timestamp: timestamp.
:type timestamp: datetime.datetime
"""
def __init__(self, id, name, pack, featured, link, downloads=0, rating=0, timestamp=None):
#: Identifier
self.id = id
#: Name
self.name = name
#: Pack
self.pack = pack
#: Featured
self.featured = featured
#: Link
self.link = link
#: Download count
self.downloads = downloads
#: Rating (0-10)
self.rating = rating
#: Timestamp
self.timestamp = timestamp
#: Compressed content as :class:`rarfile.RarFile` or :class:`zipfile.ZipFile`
self.content = None
def __repr__(self):
return '<%s [%s] %r>' % (self.__class__.__name__, self.id, self.name)
class LegendasTVSubtitle(Subtitle):
"""LegendasTV Subtitle."""
provider_name = 'legendastv'
def __init__(self, language, type, title, year, imdb_id, season, archive, name):
super(LegendasTVSubtitle, self).__init__(language, page_link=archive.link)
self.type = type
self.title = title
self.year = year
self.imdb_id = imdb_id
self.season = season
self.archive = archive
self.name = name
@property
def id(self):
return '%s-%s' % (self.archive.id, self.name.lower())
@property
def info(self):
return self.name
def get_matches(self, video, hearing_impaired=False):
matches = guess_matches(video, {
'title': self.title,
'year': self.year
})
# episode
if isinstance(video, Episode) and self.type == 'episode':
# imdb_id
if video.series_imdb_id and self.imdb_id == video.series_imdb_id:
matches.add('series_imdb_id')
# movie
elif isinstance(video, Movie) and self.type == 'movie':
# imdb_id
if video.imdb_id and self.imdb_id == video.imdb_id:
matches.add('imdb_id')
# name
matches |= guess_matches(video, guessit(self.name, {'type': self.type}))
return matches
class LegendasTVProvider(Provider):
"""LegendasTV Provider.
:param str username: username.
:param str password: password.
"""
languages = {Language.fromlegendastv(l) for l in language_converters['legendastv'].codes}
server_url = 'http://legendas.tv/'
subtitle_class = LegendasTVSubtitle
def __init__(self, username=None, password=<PASSWORD>):
# Provider needs UNRAR installed. If not available raise ConfigurationError
try:
rarfile.custom_check([rarfile.UNRAR_TOOL], True)
except rarfile.RarExecError:
raise ConfigurationError('UNRAR tool not available')
if any((username, password)) and not all((username, password)):
raise ConfigurationError('Username and password must be specified')
self.username = username
self.password = password
self.logged_in = False
self.session = None
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = self.user_agent
# login
if self.username and self.password:
logger.info('Logging in')
data = {'_method': 'POST', 'data[User][username]': self.username, 'data[User][password]': self.password}
r = self.session.post(self.server_url + 'login', data, allow_redirects=False, timeout=10)
raise_for_status(r)
soup = ParserBeautifulSoup(r.content, ['html.parser'])
if soup.find('div', {'class': 'alert-error'}, string=re.compile(u'Usuário ou senha inválidos')):
raise AuthenticationError(self.username)
logger.debug('Logged in')
self.logged_in = True
def terminate(self):
# logout
if self.logged_in:
logger.info('Logging out')
r = self.session.get(self.server_url + 'users/logout', allow_redirects=False, timeout=10)
raise_for_status(r)
logger.debug('Logged out')
self.logged_in = False
self.session.close()
@staticmethod
def title_matches(title, title_id, sanitized_title, season, year):
"""Check if is a valid title."""
sanitized_result = sanitize(title['title'])
if sanitized_result != sanitized_title:
logger.debug("Mismatched title, discarding title %d (%s)",
title_id, sanitized_result)
return False
# episode type
if season is not None:
# discard mismatches on type
if title['type'] != 'episode':
logger.debug("Mismatched 'episode' type, discarding title %d (%s)", title_id, sanitized_result)
return False
# discard mismatches on season
if 'season' not in title or title['season'] != season:
logger.debug('Mismatched season %s, discarding title %d (%s)',
title.get('season'), title_id, sanitized_result)
return False
# movie type
else:
# discard mismatches on type
if title['type'] != 'movie':
logger.debug("Mismatched 'movie' type, discarding title %d (%s)", title_id, sanitized_result)
return False
# discard mismatches on year
if year is not None and 'year' in title and title['year'] != year:
logger.debug("Mismatched movie year, discarding title %d (%s)", title_id, sanitized_result)
return False
return True
@staticmethod
def episode_matches(name, season, episodes):
if season is None or not episodes:
return True
guess = guessit(name, {'type': 'episode'})
for prop, value in (('season', season), ('episode', episodes)):
if prop not in guess:
continue
wanted = set(ensure_list(value))
actual = set(ensure_list(guess[prop]))
if not wanted.intersection(actual):
logger.debug('Mismatched %s (wanted: %s, actual %s), discarding %s', prop, wanted, actual, name)
return False
return True
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME, should_cache_fn=lambda value: value)
def search_titles(self, title, season, title_year):
"""Search for titles matching the `title`.
For episodes, each season has it own title
:param str title: the title to search for.
:param int season: season of the title
:param int title_year: year of the title
:return: found titles.
:rtype: dict
"""
titles = {}
sanitized_titles = [sanitize(title)]
ignore_characters = {'\'', '.'}
if any(c in title for c in ignore_characters):
sanitized_titles.append(sanitize(title, ignore_characters=ignore_characters))
for sanitized_title in sanitized_titles:
# make the query
if season:
logger.info('Searching episode title %r for season %r', sanitized_title, season)
else:
logger.info('Searching movie title %r', sanitized_title)
r = self.session.get(self.server_url + 'legenda/sugestao/{}'.format(sanitized_title), timeout=10)
raise_for_status(r)
results = json.loads(r.text)
# loop over results
for result in results:
source = result['_source']
# extract id
title_id = int(source['id_filme'])
# extract type
title = {'type': type_map[source['tipo']]}
# extract title, year and country
name, year, country = title_re.match(source['dsc_nome']).groups()
title['title'] = name
# extract imdb_id
if source['id_imdb'] != '0':
if not source['id_imdb'].startswith('tt'):
title['imdb_id'] = 'tt' + source['id_imdb'].zfill(7)
else:
title['imdb_id'] = source['id_imdb']
# extract season
if title['type'] == 'episode':
if source['temporada'] and source['temporada'].isdigit():
title['season'] = int(source['temporada'])
else:
match = season_re.search(source['dsc_nome_br'])
if match:
title['season'] = int(match.group('season'))
else:
logger.debug('No season detected for title %d (%s)', title_id, name)
# extract year
if year:
title['year'] = int(year)
elif source['dsc_data_lancamento'] and source['dsc_data_lancamento'].isdigit():
# year is based on season air date hence the adjustment
title['year'] = int(source['dsc_data_lancamento']) - title.get('season', 1) + 1
# add title only if is valid
# Check against title without ignored chars
if self.title_matches(title, title_id, sanitized_titles[0], season, title_year):
titles[title_id] = title
logger.debug('Found %d titles', len(titles))
return titles
@region.cache_on_arguments(expiration_time=timedelta(minutes=15).total_seconds())
def get_archives(self, title_id, language_code, type, season, episodes):
"""Get the archive list from a given `title_id`, `language_code`, `type`, `season` and `episode`.
:param int title_id: title id.
:param int language_code: language code.
:param str type: episode or movie
:param int season: season
:param list episodes: episodes
:return: the archives.
:rtype: list of :class:`LegendasTVArchive`
"""
archives = []
page = 0
while True:
# get the archive page
url = self.server_url + 'legenda/busca/-/{language}/-/{page}/{title}'.format(
language=language_code, page=page, title=title_id)
r = self.session.get(url)
raise_for_status(r)
# parse the results
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
for archive_soup in soup.select('div.list_element > article > div > div.f_left'):
# create archive
archive = LegendasTVArchive(archive_soup.a['href'].split('/')[2],
archive_soup.a.text,
'pack' in archive_soup.parent['class'],
'destaque' in archive_soup.parent['class'],
self.server_url + archive_soup.a['href'][1:])
# clean name of path separators and pack flags
clean_name = archive.name.replace('/', '-')
if archive.pack and clean_name.startswith('(p)'):
clean_name = clean_name[3:]
if type == 'episode' and not archive.pack and not self.episode_matches(clean_name, season, episodes):
continue
# extract text containing downloads, rating and timestamp
data_text = archive_soup.find('p', class_='data').text
# match downloads
archive.downloads = int(downloads_re.search(data_text).group('downloads'))
# match rating
match = rating_re.search(data_text)
if match:
archive.rating = int(match.group('rating'))
# match timestamp and validate it
time_data = {k: int(v) for k, v in timestamp_re.search(data_text).groupdict().items()}
archive.timestamp = pytz.timezone('America/Sao_Paulo').localize(datetime(**time_data))
if archive.timestamp > datetime.utcnow().replace(tzinfo=pytz.utc):
raise ProviderError('Archive timestamp is in the future')
# add archive
logger.info('Found archive for title %d and language %d at page %s: %s',
title_id, language_code, page, archive)
archives.append(archive)
# stop on last page
if soup.find('a', attrs={'class': 'load_more'}, string='carregar mais') is None:
break
# increment page count
page += 1
logger.debug('Found %d archives', len(archives))
return archives
def download_archive(self, archive):
"""Download an archive's :attr:`~LegendasTVArchive.content`.
:param archive: the archive to download :attr:`~LegendasTVArchive.content` of.
:type archive: :class:`LegendasTVArchive`
"""
logger.info('Downloading archive %s', archive.id)
r = self.session.get(self.server_url + 'downloadarquivo/{}'.format(archive.id))
raise_for_status(r)
# open the archive
archive_stream = io.BytesIO(r.content)
if is_rarfile(archive_stream):
logger.debug('Identified rar archive')
archive.content = RarFile(archive_stream)
elif is_zipfile(archive_stream):
logger.debug('Identified zip archive')
archive.content = ZipFile(archive_stream)
else:
raise ValueError('Not a valid archive')
def query(self, language, title, season=None, episodes=None, year=None):
# search for titles
titles = self.search_titles(title, season, year)
subtitles = []
# iterate over titles
for title_id, t in titles.items():
logger.info('Getting archives for title %d and language %d', title_id, language.legendastv)
archives = self.get_archives(title_id, language.legendastv, t['type'], season, episodes or [])
if not archives:
logger.info('No archives found for title %d and language %d', title_id, language.legendastv)
# iterate over title's archives
for a in archives:
# compute an expiration time based on the archive timestamp
expiration_time = (datetime.utcnow().replace(tzinfo=pytz.utc) - a.timestamp).total_seconds()
# attempt to get the releases from the cache
cache_key = releases_key.format(archive_id=a.id, archive_name=a.name)
releases = region.get(cache_key, expiration_time=expiration_time)
# the releases are not in cache or cache is expired
if releases == NO_VALUE:
logger.info('Releases not found in cache')
# download archive
self.download_archive(a)
# extract the releases
releases = []
for name in a.content.namelist():
# discard the legendastv file
if name.startswith('Legendas.tv'):
continue
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
continue
# discard non-subtitle files
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
# discard wrong episodes
if t['type'] == 'episode' and not self.episode_matches(name, season, episodes):
continue
releases.append(name)
# cache the releases
region.set(cache_key, releases)
# iterate over releases
for r in releases:
subtitle = self.subtitle_class(language, t['type'], t['title'], t.get('year'), t.get('imdb_id'),
t.get('season'), a, r)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
season = None
episodes = []
if isinstance(video, Episode):
titles = [video.series] + video.alternative_series
season = video.season
episodes = video.episodes
else:
titles = [video.title] + video.alternative_titles
for title in titles:
subtitles = [s for l in languages for s in
self.query(l, title, season=season, episodes=episodes, year=video.year)]
if subtitles:
return subtitles
return []
def download_subtitle(self, subtitle):
# download archive in case we previously hit the releases cache and didn't download it
if subtitle.archive.content is None:
self.download_archive(subtitle.archive)
# extract subtitle's content
subtitle.content = fix_line_ending(subtitle.archive.content.read(subtitle.name))
subtitle.archive.content = None
def raise_for_status(r):
# When site is under maintaince and http status code 200.
if 'Em breve estaremos de volta' in r.text:
raise ServiceUnavailable
else:
r.raise_for_status()
```
#### File: subliminal/tests/conftest.py
```python
import subprocess
from io import BytesIO
import os
from zipfile import ZipFile
import pytest
import requests
from babelfish import Country
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
from subliminal import Episode, Movie
from subliminal.cache import region
@pytest.fixture(autouse=True, scope='session')
def configure_region():
region.configure('dogpile.cache.null')
region.configure = Mock()
@pytest.fixture
def movies():
return {'man_of_steel':
Movie(os.path.join('Man of Steel (2013)', 'man.of.steel.2013.720p.bluray.x264-felony.mkv'), 'Man of Steel',
source='Blu-ray', release_group='felony', resolution='720p', video_codec='H.264', audio_codec='DTS',
imdb_id='tt0770828', size=7033732714, year=2013,
hashes={'napiprojekt': '6303e7ee6a835e9fcede9fb2fb00cb36',
'opensubtitles': '5b8f8f4e41ccb21e',
'shooter': '314f454ab464775498ae6f1f5ad813a9;fdaa8b702d8936feba2122e93ba5c44f;'
'0a6935e3436aa7db5597ef67a2c494e3;4d269733f36ddd49f71e92732a462fe5',
'thesubdb': 'ad32876133355929d814457537e12dc2'}),
'enders_game':
Movie('enders.game.2013.720p.bluray.x264-sparks.mkv', 'Ender\'s Game',
source='Blu-ray', release_group='sparks', resolution='720p', video_codec='H.264', year=2013),
'café_society':
Movie(u'Café Society.1080p.avc1.RARBG.mp4', u'Café Society', year=2016),
'interstellar':
Movie('Interstellar.2014.2014.1080p.BluRay.x264.YIFY.rar', 'Interstellar',
source='Blu-ray', release_group='YIFY', resolution='1080p', video_codec='H.264', year=2014),
'jack_reacher_never_go_back':
Movie(os.path.join('<NAME>- Never Go Back (2016)',
'Jack.Reacher.Never.Go.Back.2016.1080p.WEBDL.AC3.x264-FGT.mkv'),
'<NAME>: Never Go Back',
source='Web', release_group='FGT', resolution='1080p', video_codec='H.264',
audio_codec='Dolby Digital', imdb_id='tt3393786', year=2016)
}
@pytest.fixture
def episodes():
return {'bbt_s07e05':
Episode(os.path.join('The Big Bang Theory', 'Season 07',
'The.Big.Bang.Theory.S07E05.720p.HDTV.X264-DIMENSION.mkv'),
'The Big Bang Theory', 7, 5, title='The Workplace Proximity', year=2007, tvdb_id=4668379,
series_tvdb_id=80379, series_imdb_id='tt0898266', source='HDTV', release_group='DIMENSION',
resolution='720p', video_codec='H.264', audio_codec='Dolby Digital',
imdb_id='tt3229392', size=501910737,
hashes={'napiprojekt': '6303e7ee6a835e9fcede9fb2fb00cb36',
'opensubtitles': '6878b3ef7c1bd19e',
'shooter': 'c13e0e5243c56d280064d344676fff94;cd4184d1c0c623735f6db90841ce15fc;'
'3faefd72f92b63f2504269b4f484a377;8c68d1ef873afb8ba0cc9f97cbac41c1',
'thesubdb': '9dbbfb7ba81c9a6237237dae8589fccc'}),
'got_s03e10':
Episode(os.path.join('Game of Thrones', 'Season 03',
'Game.of.Thrones.S03E10.Mhysa.720p.WEB-DL.DD5.1.H.264-NTb.mkv'),
'Game of Thrones', 3, 10, title='Mhysa', tvdb_id=4517466, series_tvdb_id=121361,
series_imdb_id='tt0944947', source='Web', release_group='NTb', resolution='720p',
video_codec='H.264', audio_codec='Dolby Digital', imdb_id='tt2178796', size=2142810931,
hashes={'napiprojekt': '6303e7ee6a835e9fcede9fb2fb00cb36',
'opensubtitles': 'b850baa096976c22',
'shooter': 'b02d992c04ad74b31c252bd5a097a036;ef1b32f873b2acf8f166fc266bdf011a;'
'82ce34a3bcee0c66ed3b26d900d31cca;78113770551f3efd1e2d4ec45898c59c',
'thesubdb': 'b1f899c77f4c960b84b8dbf840d4e42d'}),
'dallas_s01e03':
Episode('Dallas.S01E03.mkv', 'Dallas', 1, 3, title='Spy in the House', year=1978, tvdb_id=228224,
imdb_id='tt0553425', series_tvdb_id=77092, series_imdb_id='tt0077000'),
'dallas_2012_s01e03':
Episode('Dallas.2012.S01E03.mkv', 'Dallas', 1, 3, title='The Price You Pay', year=2012,
original_series=False, tvdb_id=4199511, series_tvdb_id=242521, series_imdb_id='tt1723760',
imdb_id='tt2205526'),
'marvels_agents_of_shield_s02e06':
Episode('Marvels.Agents.of.S.H.I.E.L.D.S02E06.720p.HDTV.x264-KILLERS.mkv',
'Marvel\'s Agents of S.H.I.E.L.D.', 2, 6, year=2013, source='HDTV', release_group='KILLERS',
resolution='720p', video_codec='H.264'),
'csi_cyber_s02e03':
Episode('CSI.Cyber.S02E03.hdtv-lol.mp4', 'CSI: Cyber', 2, 3, source='HDTV', release_group='lol'),
'the_x_files_s10e02':
Episode('The.X-Files.S10E02.HDTV.x264-KILLERS.mp4', 'The X-Files', 10, 2, source='HDTV',
release_group='KILLERS', video_codec='H.264'),
'colony_s01e09':
Episode('Colony.S01E09.720p.HDTV.x264-KILLERS.mkv', 'Colony', 1, 9, title='Zero Day', year=2016,
tvdb_id=5463229, series_tvdb_id=284210, series_imdb_id='tt4209256', source='HDTV',
release_group='KILLERS', resolution='720p', video_codec='H.264', imdb_id='tt4926022'),
'the_jinx_e05':
Episode('The.Jinx-The.Life.and.Deaths.of.Robert.Durst.E05.BDRip.x264-ROVERS.mkv',
'The Jinx: The Life and Deaths of <NAME>', 1, 5, year=2015, original_series=True,
source='Blu-ray', release_group='ROVERS', video_codec='H.264'),
'the_100_s03e09':
Episode('The.100.S03E09.720p.HDTV.x264-AVS.mkv', 'The 100', 3, 9, title='Stealing Fire', year=2014,
tvdb_id=5544536, series_tvdb_id=268592, series_imdb_id='tt2661044', source='HDTV',
release_group='AVS', resolution='720p', video_codec='H.264', imdb_id='tt4799896'),
'the fall':
Episode('the_fall.3x01.720p_hdtv_x264-fov.mkv', 'The Fall', 3, 1, title='The Fall', year=2013,
tvdb_id=5749493, series_tvdb_id=258107, series_imdb_id='tt2294189', source='HDTV',
release_group='fov', resolution='720p', video_codec='H.264', imdb_id='tt4516230'),
'csi_s15e18':
Episode('CSI.S15E18.720p.HDTV.X264.DIMENSION.mkv', 'CSI: Crime Scene Investigation', 15, 18,
title='The End Game', year=2000, tvdb_id=5104359, series_tvdb_id=72546, series_imdb_id='tt0247082',
source='HDTV', release_group='DIMENSION', resolution='720p', video_codec='H.264',
imdb_id='tt4145952'),
'turn_s04e03':
Episode('Turn.S04E03.720p.HDTV.x264-AVS.mkv', "TURN: Washington's Spies", 4, 3,
title='Blood for Blood', year=2014, tvdb_id=6124360, series_tvdb_id=272135,
series_imdb_id='tt2543328',
source='HDTV', release_group='AVS', resolution='720p', video_codec='H.264',
imdb_id='tt6137686', alternative_series=['Turn']),
'turn_s03e01':
Episode('Turn.S03E01.720p.HDTV.x264-AVS.mkv', "TURN: Washington's Spies", 3, 1,
title='Valediction', year=2014, tvdb_id=5471384, series_tvdb_id=272135,
series_imdb_id='tt2543328',
source='HDTV', release_group='AVS', resolution='720p', video_codec='H.264',
imdb_id='tt4909774', alternative_series=['Turn']),
'marvels_jessica_jones_s01e13':
Episode('Marvels.Jessica.Jones.S01E13.720p.WEBRip.x264-2HD', "Mar<NAME>", 1, 13,
title='AKA Smile', year=2015, tvdb_id=5311273, series_tvdb_id=284190,
series_imdb_id='tt2357547',
source='Web', release_group='2HD', resolution='720p', video_codec='H.264',
imdb_id='tt4162096', alternative_series=['<NAME>']),
'fear_walking_dead_s03e10':
Episode('Fear.the.Walking.Dead.S03E10.1080p.WEB-DL.DD5.1.H264-RARBG', 'Fear the Walking Dead', 3, 10,
resolution='1080p', source='Web', video_codec='H.264', release_group='RARBG'),
'the_end_of_the_fucking_world':
Episode('the.end.of.the.fucking.world.s01e04.720p.web.x264-skgtv.mkv', 'The End of the Fucking World', 1, 4,
resolution='720p', source='Web', video_codec='H.264', release_group='skgtv',
alternative_series=['The end of the f***ing world']),
'Marvels.Agents.of.S.H.I.E.L.D.S05E01-E02':
Episode('Marvels.Agents.of.S.H.I.E.L.D.S05E01-E02.720p.HDTV.x264-AVS', 'Marvels.Agents.of.S.H.I.E.L.D', 5,
1, resolution='720p', source='HDTV', video_codec='H.264', release_group='AVS'),
'alex_inc_s01e04':
Episode('Alex.Inc.S01E04.HDTV.x264-SVA.mkv', 'Alex, Inc.', 1, 4, source='HDTV', video_codec='H.264',
release_group='SVA', year=2018, title='The Nanny', series_imdb_id='tt6466948', tvdb_id=6627151,
series_tvdb_id=328635),
'shameless_us_s08e01':
Episode('Shameless.US.s08e01.web.h264-convoy', 'Shameless', 8, 1, source='Web', video_codec='H.264',
country=Country('US'), original_series=False, release_group='convoy', year=2010,
alternative_series=[], title='We Become What We... Frank!',
series_imdb_id='tt1586680', series_tvdb_id=161511, imdb_id='tt6347410', tvdb_id=6227949),
'house_of_cards_us_s06e01':
Episode('house.of.cards.us.s06e01.720p.web-dl.x264', 'House of Cards', 6, 1, source='Web',
video_codec='H.264', country=Country('US'), year=2013, original_series=False,
alternative_series=['House of Cards (2013)'], title='Chapter 66', series_imdb_id='tt1856010',
series_tvdb_id=262980, imdb_id='tt7538918', tvdb_id=6553109),
'walking_dead_s08e07':
Episode('The Walking Dead - 08x07 - Time for After.AMZN.WEB-DL-CasStudio.mkv', 'The Walking Dead',
8, 7, source='Web', streaming_service='Amazon Prime', release_group='CasStudio')
}
@pytest.fixture(scope='session')
def mkv():
data_path = os.path.join('tests', 'data', 'mkv')
# download matroska test suite
if not os.path.exists(data_path) or len(os.listdir(data_path)) != 8:
r = requests.get('http://downloads.sourceforge.net/project/matroska/test_files/matroska_test_w1_1.zip')
with ZipFile(BytesIO(r.content), 'r') as f:
f.extractall(data_path, [m for m in f.namelist() if os.path.splitext(m)[1] == '.mkv'])
# populate a dict with mkv files
files = {}
for path in os.listdir(data_path):
name, _ = os.path.splitext(path)
files[name] = os.path.join(data_path, path)
return files
@pytest.fixture(scope='session')
def rar(mkv):
data_path = os.path.join('tests', 'data', 'rar')
if not os.path.exists(data_path):
os.makedirs(data_path)
downloaded_files = {
'pwd-protected': 'https://github.com/markokr/rarfile/blob/master/test/files/rar5-psw.rar?raw=true',
'simple': 'https://github.com/markokr/rarfile/blob/master/test/files/rar5-quick-open.rar?raw=true'
}
generated_files = {
'video': [mkv['test1']],
'videos': [mkv['test3'], mkv['test4'], mkv['test5']],
}
files = {}
for filename, download_url in downloaded_files.items():
files[filename] = os.path.join(data_path, filename) + '.rar'
if not os.path.exists(files[filename]):
r = requests.get(download_url)
with open(files[filename], 'wb') as f:
f.write(r.content)
for filename, videos in generated_files.items():
files[filename] = os.path.join(data_path, filename) + '.rar'
if not os.path.exists(files[filename]):
subprocess.call(['rar', 'a', files[filename]] + videos)
return files
```
#### File: subliminal/tests/test_subtitle.py
```python
import os
import six
from babelfish import Language
from subliminal.subtitle import Subtitle, fix_line_ending, get_subtitle_path
def test_subtitle_text():
subtitle = Subtitle(Language('eng'))
subtitle.content = b'Some ascii text'
assert subtitle.text == 'Some ascii text'
def test_subtitle_text_no_content():
subtitle = Subtitle(Language('eng'))
assert subtitle.text is None
def test_subtitle_is_valid_no_content():
subtitle = Subtitle(Language('fra'))
assert subtitle.is_valid() is False
def test_subtitle_is_valid_valid(monkeypatch):
subtitle = Subtitle(Language('fra'))
text = (u'1\n'
u'00:00:20,000 --> 00:00:24,400\n'
u'En réponse à l\'augmentation de la criminalité\n'
u'dans certains quartiers,\n')
monkeypatch.setattr(Subtitle, 'text', text)
assert subtitle.is_valid() is True
def test_subtitle_is_valid_invalid(monkeypatch):
subtitle = Subtitle(Language('fra'))
text = (u'1\n'
u'00:00:20,000 --> 00:00:24,400\n'
u'En réponse à l\'augmentation de la criminalité\n'
u'dans certains quartiers,\n\n')
text += u'This line shouldn\'t be here'
monkeypatch.setattr(Subtitle, 'text', text)
assert subtitle.is_valid() is False
def test_subtitle_is_valid_valid_begin(monkeypatch):
subtitle = Subtitle(Language('fra'))
text = (u'1\n'
u'00:00:20,000 --> 00:00:24,400\n'
u'En réponse à l\'augmentation de la criminalité\n'
u'dans certains quartiers,\n\n')*20
text += u'This line shouldn\'t be here'
monkeypatch.setattr(Subtitle, 'text', text)
assert subtitle.is_valid() is True
def test_get_subtitle_path(movies):
video = movies['man_of_steel']
assert get_subtitle_path(video.name, extension='.sub') == os.path.splitext(video.name)[0] + '.sub'
def test_get_subtitle_path_language(movies):
video = movies['man_of_steel']
assert get_subtitle_path(video.name, Language('por', 'BR')) == os.path.splitext(video.name)[0] + '.pt-BR.srt'
def test_get_subtitle_path_language_undefined(movies):
video = movies['man_of_steel']
assert get_subtitle_path(video.name, Language('und')) == os.path.splitext(video.name)[0] + '.srt'
def test_fix_line_ending():
content = b'Text\r\nwith\r\nweird\nline ending\r\ncharacters'
assert fix_line_ending(content) == b'Text\nwith\nweird\nline ending\ncharacters'
# https://github.com/pannal/Sub-Zero.bundle/issues/646 replaced all Chinese character “不” with “上”
def test_fix_line_ending_chinese_characters():
character = bytes('不') if six.PY2 else bytes('不', 'utf16')
content = b''.join([character, b'\r\n', character, b'\n', character])
expected = b''.join([character, b'\n', character, b'\n', character])
assert fix_line_ending(content) == expected
def test_subtitle_valid_encoding():
subtitle = Subtitle(Language('deu'), False, None, 'windows-1252')
assert subtitle.encoding == 'cp1252'
def test_subtitle_empty_encoding():
subtitle = Subtitle(Language('deu'), False, None, None)
assert subtitle.encoding is None
def test_subtitle_invalid_encoding():
subtitle = Subtitle(Language('deu'), False, None, 'rubbish')
assert subtitle.encoding is None
def test_subtitle_guess_encoding_utf8():
subtitle = Subtitle(Language('zho'), False, None, None)
subtitle.content = b'Something here'
assert subtitle.guess_encoding() == 'utf-8'
assert isinstance(subtitle.text, six.text_type)
# regression for #921
def test_subtitle_text_guess_encoding_none():
content = b'\x00d\x00\x80\x00\x00\xff\xff\xff\xff\xff\xff,\x00\x00\x00\x00d\x00d\x00\x00\x02s\x84\x8f\xa9'
subtitle = Subtitle(Language('zho'), False, None, None)
subtitle.content = content
assert subtitle.guess_encoding() is None
assert not subtitle.is_valid()
assert not isinstance(subtitle.text, six.text_type)
``` |
{
"source": "314pies/ScreenTranslator",
"score": 3
} |
#### File: share/updates/hunspell.py
```python
import sys
import os
import subprocess
import re
def parse_language_names():
root = os.path.abspath(os.path.basename(__file__) + '/../../..')
lines = []
with open(root + '/src/languagecodes.cpp', 'r') as d:
lines = d.readlines()
result = {}
for line in lines:
if line.startswith('//'):
continue
all = re.findall(r'"(.*?)"', line)
if len(all) != 6:
continue
result[all[2]] = all[5]
return result
if len(sys.argv) < 2:
print("Usage:", sys.argv[0], "<dict_dir> [<download_url>]")
exit(1)
dict_dir = sys.argv[1]
download_url = "https://cgit.freedesktop.org/libreoffice/dictionaries/plain"
if len(sys.argv) > 2:
download_url = sys.argv[2]
mirror_url = "https://translator.gres.biz/resources/dictionaries"
language_names = parse_language_names()
preferred = ['sr.aff', 'sv_FI.aff',
'en_US.aff', 'de_DE_frami.aff', 'nb_NO.aff']
files = {}
it = os.scandir(dict_dir)
for d in it:
if not d.is_dir():
continue
lang = d.name
if '_' in lang:
lang = lang[0:lang.index('_')]
affs = []
fit = os.scandir(os.path.join(dict_dir, d.name))
for f in fit:
if not f.is_file or not f.name.endswith('.aff'):
continue
affs.append(f.name)
aff = ''
if len(affs) == 0:
continue
if len(affs) == 1:
aff = affs[0]
else:
for p in preferred:
if p in affs:
aff = p
break
if len(aff) == 0:
print('no aff for', lang, affs)
continue
aff = os.path.join(d.name, aff)
dic = aff[:aff.rindex('.')] + '.dic'
if not os.path.exists(os.path.join(dict_dir, dic)):
print('no dic exists', dic)
files[lang] = [aff, dic]
print(',"hunspell": {')
comma = ''
unknown_names = []
for lang in sorted(files.keys()):
file_names = files[lang]
if not lang in language_names:
unknown_names.append(lang)
continue
lang_name = language_names[lang]
print(' {}"{}":{{"files":['.format(comma, lang_name))
comma = ', '
lang_comma = ''
for file_name in file_names:
git_cmd = ['git', 'log', '-1', '--pretty=format:%cI', file_name]
date = subprocess.run(git_cmd, cwd=dict_dir, universal_newlines=True,
stdout=subprocess.PIPE, check=True).stdout
size = os.path.getsize(os.path.join(dict_dir, file_name))
installed = lang + file_name[file_name.index('/'):]
mirror = ',"' + mirror_url + '/' + file_name + \
'.zip"' if len(mirror_url) > 0 else ''
print(' {}{{"url":["{}/{}"{}], "path":"$hunspell$/{}", "date":"{}", "size":{}}}'.format(
lang_comma, download_url, file_name, mirror, installed, date, size))
lang_comma = ','
print(' ]}')
print('}')
print('unknown names', unknown_names)
``` |
{
"source": "315386775/gluon-cv",
"score": 2
} |
#### File: gluoncv/model_zoo/shufflenet.py
```python
from mxnet.context import cpu
from mxnet.gluon import nn
from mxnet.gluon.nn import BatchNorm
from mxnet.gluon.block import HybridBlock
__all__ = [
'ShuffleNetV1',
'shufflenet_v1',
'get_shufflenet_v1',
'ShuffleNetV2',
'shufflenet_v2',
'get_shufflenet_v2']
def _conv2d(channel, kernel=1, padding=0, stride=1, num_group=1, use_act=True, use_bias=True, norm_layer=BatchNorm, norm_kwargs=None):
cell = nn.HybridSequential(prefix='')
cell.add(nn.Conv2D(channel, kernel_size=kernel, strides=stride, padding=padding, groups=num_group, use_bias=use_bias))
cell.add(norm_layer(epsilon=1e-5, momentum=0.9, **({} if norm_kwargs is None else norm_kwargs)))
if use_act:
cell.add(nn.Activation('relu'))
return cell
class shuffleUnit(HybridBlock):
def __init__(self, in_channels, out_channels, combine_type, groups=3, grouped_conv=True,
norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
super(shuffleUnit, self).__init__(**kwargs)
if combine_type == 'add':
self.DWConv_stride = 1
elif combine_type == 'concat':
self.DWConv_stride = 2
out_channels -= in_channels
self.first_groups = groups if grouped_conv else 1
self.bottleneck_channels = out_channels // 4
self.grouped_conv = grouped_conv
self.output_channel = out_channels
self.groups = groups
self.combine_type = combine_type
with self.name_scope():
self.conv_beforshuffle = nn.HybridSequential()
self.conv_beforshuffle.add(_conv2d(channel=self.bottleneck_channels, kernel=1, stride=1,
num_group=self.first_groups))
self.conv_aftershuffle = nn.HybridSequential()
self.conv_aftershuffle.add(_conv2d(channel=self.bottleneck_channels, kernel=3, padding=1,
stride=self.DWConv_stride, num_group=self.bottleneck_channels, use_act=False))
self.conv_aftershuffle.add(_conv2d(channel=self.output_channel, kernel=1, stride=1, num_group=groups,
use_act=False))
def combine(self, F, branch1, branch2, combine):
if combine == 'add':
data = branch1 + branch2
data = F.Activation(data, act_type='relu')
elif combine == 'concat':
data = F.concat(branch1, branch2, dim=1)
data = F.Activation(data, act_type='relu')
return data
def channel_shuffle(self, F, data, groups):
data = F.reshape(data, shape=(0, -4, groups, -1, -2))
data = F.swapaxes(data, 1, 2)
data = F.reshape(data, shape=(0, -3, -2))
return data
def hybrid_forward(self, F, x):
res = x
x = self.conv_beforshuffle(x)
if self.grouped_conv:
x = self.channel_shuffle(F, x, groups=self.groups)
x = self.conv_aftershuffle(x)
if self.combine_type == 'concat':
res = F.Pooling(data=res, kernel=(3, 3), pool_type='avg', stride=(2, 2), pad=(1, 1))
x = self.combine(F, res, x, combine=self.combine_type)
return x
class ShuffleNetV1(HybridBlock):
def __init__(self, groups=3, classes=1000, norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
super(ShuffleNetV1, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(nn.Conv2D(24, kernel_size=3, strides=2, padding=1))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
self.features.add(self.make_stage(2))
self.features.add(self.make_stage(3))
self.features.add(self.make_stage(4))
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.Dense(classes)
def make_stage(self, stage, groups=3):
stage_repeats = [3, 7, 3]
grouped_conv = stage > 2
if groups == 1:
out_channels = [-1, 24, 144, 288, 567]
elif groups == 2:
out_channels = [-1, 24, 200, 400, 800]
elif groups == 3:
out_channels = [-1, 24, 240, 480, 960]
elif groups == 4:
out_channels = [-1, 24, 272, 544, 1088]
elif groups == 8:
out_channels = [-1, 24, 384, 768, 1536]
body = nn.HybridSequential()
body.add(shuffleUnit(out_channels[stage - 1], out_channels[stage], 'concat', groups, grouped_conv))
for i in range(stage_repeats[stage - 2]):
body.add(shuffleUnit(out_channels[stage], out_channels[stage], 'add', groups, True))
return body
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
class shuffleUnitV2(HybridBlock):
def __init__(self, in_channels, out_channels, split, norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
super(shuffleUnitV2, self).__init__(**kwargs)
self.in_channels = in_channels
self.equal_channels = out_channels // 2
self.split = split
if split == True:
self.DWConv_stride = 1
else:
self.DWConv_stride = 2
with self.name_scope():
self.branch1_conv = nn.HybridSequential()
self.branch1_conv.add(_conv2d(channel=self.in_channels, kernel=3, padding=1, stride=self.DWConv_stride,
num_group=self.in_channels, use_act=False, use_bias=False))
self.branch1_conv.add(_conv2d(channel=self.equal_channels, kernel=1, stride=1,
use_act=True, use_bias=False))
with self.name_scope():
self.branch2_conv = nn.HybridSequential()
self.branch2_conv.add(_conv2d(channel=self.equal_channels, kernel=1, stride=1, use_act=True, use_bias=False))
self.branch2_conv.add(_conv2d(channel=self.equal_channels, kernel=3, padding=1, stride=self.DWConv_stride,
num_group=self.equal_channels, use_act=False, use_bias=False))
self.branch2_conv.add(_conv2d(channel=self.equal_channels, kernel=1, stride=1,
use_act=True, use_bias=False))
def channel_shuffle(self, F, data, groups):
data = F.reshape(data, shape=(0, -4, groups, -1, -2))
data = F.swapaxes(data, 1, 2)
data = F.reshape(data, shape=(0, -3, -2))
return data
def hybrid_forward(self, F, x):
if self.split == True:
branch1 = F.slice_axis(x, axis=1, begin=0, end=self.in_channels // 2)
branch2 = F.slice_axis(x, axis=1, begin=self.in_channels // 2, end=self.in_channels)
else:
branch1 = x
branch2 = x
branch1 = self.branch1_conv(branch1)
branch2 = self.branch2_conv(branch2)
x = F.concat(branch1, branch2, dim=1)
x = self.channel_shuffle(F, data=x, groups=2)
return x
class ShuffleNetV2(HybridBlock):
def __init__(self, classes=1000, norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
super(ShuffleNetV2, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(_conv2d(channel=24, kernel=3, stride=2, padding=1, use_act=True, use_bias=False))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
self.features.add(self.make_stage(2))
self.features.add(self.make_stage(3))
self.features.add(self.make_stage(4))
self.features.add(_conv2d(channel=1024, kernel=1, stride=1, use_act=True, use_bias=False))
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.Dense(classes)
def make_stage(self, stage, multiplier=1):
stage_repeats = [3, 7, 3]
if multiplier == 0.5:
out_channels = [-1, 24, 48, 96, 192]
elif multiplier == 1:
out_channels = [-1, 24, 116, 232, 464]
elif multiplier == 1.5:
out_channels = [-1, 24, 176, 352, 704]
elif multiplier == 2:
out_channels = [-1, 24, 244, 488, 976]
body = nn.HybridSequential()
body.add(shuffleUnitV2(out_channels[stage - 1], out_channels[stage], split=False))
for i in range(stage_repeats[stage - 2]):
body.add(shuffleUnitV2(out_channels[stage], out_channels[stage], split=True))
return body
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shufflenet_v1(pretrained=False, root='~/.mxnet/models', ctx=cpu(), norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
net = ShuffleNetV1(norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs)
from ..data import ImageNet1kAttr
attrib = ImageNet1kAttr()
net.synset = attrib.synset
net.classes = attrib.classes
net.classes_long = attrib.classes_long
return net
def shufflenet_v1(**kwargs):
return get_shufflenet_v1(**kwargs)
def get_shufflenet_v2(pretrained=False, root='~/.mxnet/models', ctx=cpu(), norm_layer=BatchNorm, norm_kwargs=None, **kwargs):
net = ShuffleNetV2(norm_layer=norm_layer, norm_kwargs=norm_kwargs, **kwargs)
from ..data import ImageNet1kAttr
attrib = ImageNet1kAttr()
net.synset = attrib.synset
net.classes = attrib.classes
net.classes_long = attrib.classes_long
return net
def shufflenet_v2(**kwargs):
return get_shufflenet_v2(**kwargs)
``` |
{
"source": "316k/sms-server",
"score": 3
} |
#### File: 316k/sms-server/sms-server.py
```python
import android
import time
from datetime import datetime
from urllib import request
from parsehtml import strip_tags
droid = android.Android()
methods = []
class services():
"""Available services are defined here"""
def ts(arg):
"""Returns the current timestamp"""
return str(int(time.time()))
def ping(arg):
"""Tells the time at which the message was received"""
return "Message received at " + str(datetime.now().hour) + 'h' + str(datetime.now().minute) + 'm' + str(datetime.now().second) + '.' + str(datetime.now().microsecond) + 's'
def wifi(arg):
"""Tells if the Wifi is enabled or not"""
return "Enabled" if droid.checkWifiState().result else "Disabled"
def battery(arg):
"""Returns the phone's battery level"""
droid.batteryStartMonitoring()
time.sleep(1)
droid.batteryStopMonitoring()
return str(droid.batteryGetLevel().result) + " %"
def _wiki(page):
"""Returns the first 500 characters of an english wikipedia page (not currently working)"""
page = request.urlopen("https://en.m.wikipedia.org/wiki/" + page)
lines = page.readlines()
text = ''.join([i.decode("utf-8").strip() for i in lines])
content = strip_tags(text).strip()[:300]
print(content)
return content
def reverse(s):
"""Reverses the input"""
return s[::-1]
def hello(name):
"""Says hello to the given name"""
return "Hello to you, " + name
def echo(words):
"""Echoes a line of text"""
return words
def help(arg):
"""Displays help about this script"""
if arg in methods:
return eval("services." + arg + ".__doc__")
return "*SmsBot help* Available services : " + str(methods)
# Already parsed messages
parsed_messages = []
# Available methods
methods = [method for method in dir(services) if not method.startswith('_')]
while True:
messages = droid.smsGetMessages(True).result
for sms in messages:
if sms["_id"] in parsed_messages:
# Ignore previously parsed messages
continue
parsed_messages.append(sms["_id"])
txt = sms["body"].split(' ')
command = txt[0][1:]
if txt[0][0] == '!' and command in methods:
args = ' '.join(txt[1:])
out = eval("services." + command + "(" + repr(args) + ")")
if out:
droid.smsSend(sms["address"], out)
droid.vibrate()
time.sleep(1)
``` |
{
"source": "317070/nntools",
"score": 3
} |
#### File: nntools/layers/corrmm.py
```python
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from theano.sandbox.cuda.blas import GpuCorrMM
from .. import init
from .. import nonlinearities
from . import base
# base class for all layers that rely on GpuCorrMM directly
class MMLayer(base.Layer):
pass
class Conv2DMMLayer(MMLayer):
def __init__(self, input_layer, num_filters, filter_size, strides=(1, 1), border_mode=None, untie_biases=False,
W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, pad=None,
flip_filters=False):
super(Conv2DMMLayer, self).__init__(input_layer)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_filters = num_filters
self.filter_size = filter_size
self.strides = strides
self.untie_biases = untie_biases
self.flip_filters = flip_filters
if border_mode is not None and pad is not None:
raise RuntimeError("You cannot specify both 'border_mode' and 'pad'. To avoid ambiguity, please specify only one of them.")
elif border_mode is None and pad is None:
# no option specified, default to valid mode
self.pad = (0, 0)
elif border_mode is not None:
if border_mode == 'valid':
self.pad = (0, 0)
elif border_mode == 'full':
self.pad = (self.filter_size[0] - 1, self.filter_size[1] -1)
elif border_mode == 'same':
# only works for odd filter size, but the even filter size case is probably not worth supporting.
self.pad = ((self.filter_size[0] - 1) // 2, (self.filter_size[1] - 1) // 2)
else:
raise RuntimeError("Unsupported border_mode for Conv2DMMLayer: %s" % border_mode)
else:
self.pad = pad
self.W = self.create_param(W, self.get_W_shape())
if b is None:
self.b = None
elif self.untie_biases:
output_shape = self.get_output_shape()
self.b = self.create_param(b, (num_filters, output_shape[2], output_shape[3]))
else:
self.b = self.create_param(b, (num_filters,))
self.corr_mm_op = GpuCorrMM(subsample=self.strides, pad=self.pad)
def get_W_shape(self):
num_input_channels = self.input_layer.get_output_shape()[1]
return (self.num_filters, num_input_channels, self.filter_size[0], self.filter_size[1])
def get_params(self):
return [self.W] + self.get_bias_params()
def get_bias_params(self):
return [self.b] if self.b is not None else []
def get_output_shape_for(self, input_shape):
batch_size = input_shape[0]
input_width, input_height = input_shape[2:4]
output_width = (input_width + 2*self.pad[0] - self.filter_size[0]) // self.strides[0] + 1
output_height = (input_height + 2*self.pad[1] - self.filter_size[1]) // self.strides[1] + 1
return (batch_size, self.num_filters, output_width, output_height)
def get_output_for(self, input, *args, **kwargs):
filters = self.W
if self.flip_filters:
filters = filters[:, :, ::-1, ::-1] # flip width, height
contiguous_filters = gpu_contiguous(filters)
contiguous_input = gpu_contiguous(input)
conved = self.corr_mm_op(contiguous_input, contiguous_filters)
if self.b is None:
activation = conved
elif self.untie_biases:
activation = conved + self.b.dimshuffle('x', 0, 1, 2)
else:
activation = conved + self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(activation)
```
#### File: nntools/layers/cuda_convnet.py
```python
import numpy as np
import theano
import theano.tensor as T
from .. import init
from .. import nonlinearities
from . import base
from theano.sandbox.cuda.basic_ops import gpu_contiguous
# TODO: make sure to document the limitations and 'best practices' (i.e. minibatch size % 128 == 0)
# TODO: see if the 'dimshuffle' logic can be put in the base class instead.
# base class for all layers that use ops from pylearn2.sandbox.cuda_convnet
class CCLayer(base.Layer):
pass
class Conv2DCCLayer(CCLayer):
def __init__(self, input_layer, num_filters, filter_size, strides=(1, 1), border_mode=None, untie_biases=False,
W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, pad=None,
dimshuffle=True, flip_filters=False, partial_sum=1):
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
super(Conv2DCCLayer, self).__init__(input_layer)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
if filter_size[0] != filter_size[1]:
raise RuntimeError("Conv2DCCLayer only supports square filters, but filter_size=(%d, %d)" % filter_size)
if strides[0] != strides[1]:
raise RuntimeError("Conv2DCCLayer only supports square strides, but strides=(%d, %d)" % strides)
if num_filters % 16 != 0:
raise RuntimeError("Conv2DCCLayer requires num_filters to be a multiple of 16, but num_filters is %d" % num_filters)
self.num_filters = num_filters
self.filter_size = filter_size[0]
self.stride = strides[0]
self.untie_biases = untie_biases
self.dimshuffle = dimshuffle
self.flip_filters = flip_filters
self.partial_sum = partial_sum
if border_mode is not None and pad is not None:
raise RuntimeError("You cannot specify both 'border_mode' and 'pad'. To avoid ambiguity, please specify only one of them.")
elif border_mode is None and pad is None:
# no option specified, default to valid mode
self.pad = 0
elif border_mode is not None:
if border_mode == 'valid':
self.pad = 0
elif border_mode == 'full':
self.pad = self.filter_size - 1
elif border_mode == 'same':
# only works for odd filter size, but the even filter size case is probably not worth supporting.
self.pad = (self.filter_size - 1) // 2
else:
raise RuntimeError("Unsupported border_mode for Conv2DCCLayer: %s" % border_mode)
else:
self.pad = pad
self.W = self.create_param(W, self.get_W_shape())
if b is None:
self.b = None
elif self.untie_biases:
output_shape = self.get_output_shape()
if self.dimshuffle:
self.b = self.create_param(b, (num_filters, output_shape[2], output_shape[3]))
else:
self.b = self.create_param(b, (num_filters, output_shape[1], output_shape[2]))
else:
self.b = self.create_param(b, (num_filters,))
self.filter_acts_op = FilterActs(stride=self.stride, partial_sum=self.partial_sum, pad=self.pad)
def get_W_shape(self):
if self.dimshuffle:
num_input_channels = self.input_layer.get_output_shape()[1]
return (self.num_filters, num_input_channels, self.filter_size, self.filter_size)
else:
num_input_channels = self.input_layer.get_output_shape()[0]
return (num_input_channels, self.filter_size, self.filter_size, self.num_filters)
def get_params(self):
return [self.W] + self.get_bias_params()
def get_bias_params(self):
return [self.b] if self.b is not None else []
def get_output_shape_for(self, input_shape):
if self.dimshuffle:
batch_size = input_shape[0]
input_width, input_height = input_shape[2:4]
else:
batch_size = input_shape[3]
input_width, input_height = input_shape[1:3]
output_width = (input_width + 2*self.pad - self.filter_size) // self.stride + 1
output_height = (input_height + 2*self.pad - self.filter_size) // self.stride + 1
if self.dimshuffle:
return (batch_size, self.num_filters, output_width, output_height)
else:
return (self.num_filters, output_width, output_height, batch_size)
def get_output_for(self, input, *args, **kwargs):
if self.dimshuffle:
filters = self.W.dimshuffle(1, 2, 3, 0) # bc01 to c01b
input = input.dimshuffle(1, 2, 3, 0) # bc01 to c01b
else:
filters = self.W
if self.flip_filters:
filters = filters[:, ::-1, ::-1, :] # flip width, height
contiguous_filters = gpu_contiguous(filters)
contiguous_input = gpu_contiguous(input)
conved = self.filter_acts_op(contiguous_input, contiguous_filters)
if self.b is not None:
if self.untie_biases:
biases = self.b.dimshuffle(0, 1, 2, 'x') # c01 to c01b
else:
biases = self.b.dimshuffle(0, 'x', 'x', 'x') # c to c01b
conved += biases
conved = self.nonlinearity(conved)
if self.dimshuffle:
return conved.dimshuffle(3, 0, 1, 2) # c01b to bc01
else:
return conved
class MaxPool2DCCLayer(CCLayer):
def __init__(self, input_layer, ds, ignore_border=False, strides=None, dimshuffle=True):
from pylearn2.sandbox.cuda_convnet.pool import MaxPool
super(MaxPool2DCCLayer, self).__init__(input_layer)
if ds[0] != ds[1]:
raise RuntimeError("MaxPool2DCCLayer only supports square pooling regions, but ds=(%d, %d)" % ds)
if strides is not None and strides[0] != strides[1]:
raise RuntimeError("MaxPool2DCCLayer only supports using the same stride in both directions, but strides=(%d, %d)" % strides)
# ignore_border argument is for compatibility with MaxPool2DLayer.
# it is not supported. Borders are never ignored.
if ignore_border != False:
raise RuntimeError("MaxPool2DCCLayer does not support ignore_border.")
self.ds = ds[0]
if strides is None:
self.stride = self.ds
else:
self.stride = strides[0]
self.dimshuffle = dimshuffle
self.pool_op = MaxPool(ds=self.ds, stride=self.stride)
def get_output_shape_for(self, input_shape):
if self.dimshuffle:
batch_size = input_shape[0]
num_input_channels = input_shape[1]
input_width, input_height = input_shape[2:4]
else:
batch_size = input_shape[3]
num_input_channels = input_shape[0]
input_width, input_height = input_shape[1:3]
output_width = int(np.ceil(float(input_width - self.ds + self.stride) / self.stride))
output_height = int(np.ceil(float(input_height - self.ds + self.stride) / self.stride))
if self.dimshuffle:
return (batch_size, num_input_channels, output_width, output_height)
else:
return (num_input_channels, output_width, output_height, batch_size)
def get_output_for(self, input, *args, **kwargs):
if self.dimshuffle:
input = input.dimshuffle(1, 2, 3, 0) # bc01 to c01b
contiguous_input = gpu_contiguous(input)
pooled = self.pool_op(contiguous_input)
if self.dimshuffle:
return pooled.dimshuffle(3, 0, 1, 2) # c01b to bc01
else:
return pooled
# TODO: crossmapnorm
# from pylearn2.sandbox.cuda_convnet.response_norm import CrossMapNorm
## Helper classes for switching between bc01 and c01b input formats
class ShuffleBC01ToC01BLayer(base.Layer):
"""
This layer dimshuffles 4D input for interoperability between c01b and bc01 ops.
bc01 (theano) -> c01b (cuda-convnet)
"""
def get_output_shape_for(self, input_shape):
return (input_shape[1], input_shape[2], input_shape[3], input_shape[0])
def get_output_for(self, input, *args, **kwargs):
return input.dimshuffle(1, 2, 3, 0)
bc01_to_c01b = ShuffleBC01ToC01BLayer # shortcut
class ShuffleC01BToBC01Layer(base.Layer):
"""
This layer dimshuffles 4D input for interoperability between c01b and bc01 ops.
c01b (cuda-convnet) -> bc01 (theano)
"""
def get_output_shape_for(self, input_shape):
return (input_shape[3], input_shape[0], input_shape[1], input_shape[2])
def get_output_for(self, input, *args, **kwargs):
return input.dimshuffle(3, 0, 1, 2)
c01b_to_bc01 = ShuffleC01BToBC01Layer # shortcut
## c01b versions of other Layer classes
class NINLayer_c01b(base.Layer):
"""
This does the same as nntools.layers.NINLayer, but operates with c01b
axis arrangement instead of bc01. This reduces the number of shuffles
and reshapes required and might be faster as a result.
"""
def __init__(self, input_layer, num_units, untie_biases=False,
W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify):
super(NINLayer_c01b, self).__init__(input_layer)
if nonlinearity is None:
self.nonlinearity = nonlinearities.identity
else:
self.nonlinearity = nonlinearity
self.num_units = num_units
self.untie_biases = untie_biases
output_shape = self.input_layer.get_output_shape()
num_input_channels = output_shape[0]
self.W = self.create_param(W, (num_units, num_input_channels))
if b is None:
self.b = None
elif self.untie_biases:
output_shape = self.get_output_shape()
self.b = self.create_param(b, (num_units,) + output_shape[1:-1])
else:
self.b = self.create_param(b, (num_units,))
def get_params(self):
return [self.W] + self.get_bias_params()
def get_bias_params(self):
return [self.b] if self.b is not None else []
def get_output_shape_for(self, input_shape):
return (self.num_units,) + input_shape[1:]
def get_output_for(self, input, *args, **kwargs):
out = T.tensordot(self.W, input, axes=[[1], [0]]) # fc * c01b... = f01b...
if self.b is None:
activation = out
else:
if self.untie_biases:
bias_axes = range(input.ndim - 1) + ['x']
else:
bias_axes = [0] + (['x'] * (input.ndim - 1))
b_shuffled = self.b.dimshuffle(bias_axes)
activation = out + b_shuffled
return self.nonlinearity(activation)
``` |
{
"source": "317070/Recipes",
"score": 2
} |
#### File: Recipes/modelzoo/vgg_cnn_s.py
```python
from lasagne.layers import InputLayer, DenseLayer, DropoutLayer
from lasagne.layers import NonlinearityLayer
from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer
from lasagne.layers import MaxPool2DLayer as PoolLayer
from lasagne.layers import LocalResponseNormalization2DLayer as NormLayer
from lasagne.nonlinearities import softmax
def build_model():
net = {}
net['input'] = InputLayer((None, 3, 224, 224))
net['conv1'] = ConvLayer(net['input'],
num_filters=96,
filter_size=7,
stride=2)
# caffe has alpha = alpha * pool_size
net['norm1'] = NormLayer(net['conv1'], alpha=0.0001)
net['pool1'] = PoolLayer(net['norm1'],
pool_size=3,
stride=3,
ignore_border=False)
net['conv2'] = ConvLayer(net['pool1'], num_filters=256, filter_size=5)
net['pool2'] = PoolLayer(net['conv2'],
pool_size=2,
stride=2,
ignore_border=False)
net['conv3'] = ConvLayer(net['pool2'],
num_filters=512,
filter_size=3,
pad=1)
net['conv4'] = ConvLayer(net['conv3'],
num_filters=512,
filter_size=3,
pad=1)
net['conv5'] = ConvLayer(net['conv4'],
num_filters=512,
filter_size=3,
pad=1)
net['pool5'] = PoolLayer(net['conv5'],
pool_size=3,
stride=3,
ignore_border=False)
net['fc6'] = DenseLayer(net['pool5'], num_units=4096)
net['drop6'] = DropoutLayer(net['fc6'], p=0.5)
net['fc7'] = DenseLayer(net['drop6'], num_units=4096)
net['drop7'] = DropoutLayer(net['fc7'], p=0.5)
net['fc8'] = DenseLayer(net['drop7'], num_units=1000, nonlinearity=None)
net['prob'] = NonlinearityLayer(net['fc8'], softmax)
return net
``` |
{
"source": "317070/Twitch-plays-LSD-neural-net",
"score": 3
} |
#### File: Twitch-plays-LSD-neural-net/models/default_image.py
```python
from functools import partial
import theano
import theano.tensor as T
import numpy as np
import lasagne as nn
import scipy.misc
from glob import glob
from lasagne.layers import dnn
import utils
batch_size = 1
learning_rate = 5.0
momentum = theano.shared(np.float32(0.9))
steps_per_zoom = 1
network_power = 1
prior_strength = 1000
zoomspeed = 1.05
width = 1024 #1024 #600#1280
height = 576 #576 #336#720 #multiple of 2
estimated_input_fps=15./steps_per_zoom
n_classes = 1000
total_steps = 100
#Determines also the scale of the entire thing!
image = scipy.misc.imread(glob("hd1.*")[0])
"""
batch_size = 1
learning_rate = 2.0
momentum = theano.shared(np.float32(0.9))
steps_per_zoom = 30
network_power = 1
prior_strength = 10
zoomspeed = 1.05
width = 960
height = 540 #multiple of 2
estimated_input_fps=80./steps_per_zoom
n_classes = 1000
"""
pretrained_params = np.load("data/vgg16.npy")
# image = scipy.misc.imread("image.png")
print image.dtype
mean_img = np.transpose(np.load("data/mean.npy").astype("float32"), axes=(2,0,1)).mean() #.mean() for any size
# image -= mean_img
image = np.transpose(image, axes=(2,0,1))
conv3 = partial(dnn.Conv2DDNNLayer,
strides=(1, 1),
border_mode="same",
filter_size=(3,3),
nonlinearity=nn.nonlinearities.rectify)
class custom_flatten_dense(nn.layers.DenseLayer):
def get_output_for(self, input, **kwargs):
if input.ndim > 2:
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input = input.dimshuffle(0,3,1,2)
input = input.flatten(2)
activation = T.dot(input, self.W)
if self.b is not None:
activation = activation + self.b.dimshuffle('x', 0)
return self.nonlinearity(activation)
dense = partial(nn.layers.DenseLayer,
nonlinearity=nn.nonlinearities.rectify)
max_pool = partial(dnn.MaxPool2DDNNLayer,
ds=(2,2),
strides=(2,2))
def build_model(batch_size=batch_size):
l_in = nn.layers.InputLayer(shape=(batch_size,)+image.shape)
l = l_in
l = conv3(l, num_filters=64)
l = conv3(l, num_filters=64)
l = max_pool(l)
l = conv3(l, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l = dnn.Conv2DDNNLayer(l,
num_filters=4096,
strides=(1, 1),
border_mode="valid",
filter_size=(7,7))
l = dnn.Conv2DDNNLayer(l,
num_filters=4096,
strides=(1, 1),
border_mode="same",
filter_size=(1,1))
l = dnn.Conv2DDNNLayer(l,
num_filters=n_classes,
strides=(1,1),
border_mode="same",
filter_size=(1,1),
nonlinearity=None)
l_to_strengthen = l
l_out = l
return utils.struct(
input=l_in,
out=l_out,
to_strengthen=l_to_strengthen)
def build_updates(loss, all_params, learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-8):
all_grads = theano.grad(loss, all_params)
updates = []
resets = []
t = theano.shared(1) # timestep, for bias correction
for param_i, grad_i in zip(all_params, all_grads):
mparam_i = theano.shared(np.zeros(param_i.get_value().shape, dtype=theano.config.floatX)) # 1st moment
vparam_i = theano.shared(np.zeros(param_i.get_value().shape, dtype=theano.config.floatX)) # 2nd moment
m = beta1 * grad_i + (1 - beta1) * mparam_i # new value for 1st moment estimate
v = beta2 * T.sqr(grad_i) + (1 - beta2) * vparam_i # new value for 2nd moment estimate
m_unbiased = m / (1 - (1 - beta1) ** t.astype(theano.config.floatX))
v_unbiased = v / (1 - (1 - beta2) ** t.astype(theano.config.floatX))
w = param_i - learning_rate * m_unbiased / (T.sqrt(v_unbiased) + epsilon) # new parameter values
updates.append((mparam_i, m))
updates.append((vparam_i, v))
updates.append((param_i, w))
resets.append([mparam_i, np.zeros(param_i.get_value().shape, dtype=theano.config.floatX)])
resets.append([vparam_i, np.zeros(param_i.get_value().shape, dtype=theano.config.floatX)])
resets.append([t, 1])
updates.append((t, t + 1))
return updates, resets
```
#### File: Twitch-plays-LSD-neural-net/models/default.py
```python
from functools import partial
import theano
import theano.tensor as T
import numpy as np
import lasagne as nn
import scipy.misc
from glob import glob
from lasagne.layers import dnn
import utils
""" The twitter login keys """
#""" #toggle this comment to use test or live version
#test-version
TWITCH_USERNAME = ""
TWITCH_OAUTH = ""
TWITCH_STREAM_KEY = ""
"""
#live version
TWITCH_USERNAME = ""
TWITCH_OAUTH = ""
TWITCH_STREAM_KEY = ""
#"""
"""The number of streams created simultaneously (1 usually)"""
batch_size = 1
"""Learning rate"""
learning_rate = 5.0
"""Momentum (not used?)"""
momentum = theano.shared(np.float32(0.9))
"""Number of gradient steps per zoom step"""
steps_per_zoom = 10
"""exponential power of the network (not used?)"""
network_power = 1
"""Strength of the image prior (very important!)"""
prior_strength = 10
"""Zoom speed in percentage per second (very important!)"""
zoomspeed = 1.05
"""Width and height of the stream, NOT of the image optimized"""
width = 1024
height = 576 #multiple of 2!
"""Estimated number of optimized frames generated, important in beginning of stream only"""
estimated_input_fps=15./steps_per_zoom
"""Number of classes. Change this when using other networks."""
n_classes = 1000
"""The image used to start with, also determines the size of the image optimized!"""
image = scipy.misc.imread(glob("image7.*")[0])
"""The network used"""
pretrained_params = np.load("data/vgg16.npy")
print image.dtype
mean_img = np.transpose(np.load("data/mean.npy").astype("float32"), axes=(2,0,1)).mean() #.mean() for any size
# image -= mean_img
image = np.transpose(image, axes=(2,0,1))
conv3 = partial(dnn.Conv2DDNNLayer,
strides=(1, 1),
border_mode="same",
filter_size=(3,3),
nonlinearity=nn.nonlinearities.rectify)
dense = partial(nn.layers.DenseLayer,
nonlinearity=nn.nonlinearities.rectify)
max_pool = partial(dnn.MaxPool2DDNNLayer,
ds=(2,2),
strides=(2,2))
def build_model(batch_size=batch_size):
l_in = nn.layers.InputLayer(shape=(batch_size,)+image.shape)
l = l_in
l = conv3(l, num_filters=64)
l = conv3(l, num_filters=64)
l = max_pool(l)
l = conv3(l, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l = dnn.Conv2DDNNLayer(l,
num_filters=4096,
strides=(1, 1),
border_mode="valid",
filter_size=(7,7))
l = dnn.Conv2DDNNLayer(l,
num_filters=4096,
strides=(1, 1),
border_mode="same",
filter_size=(1,1))
l = dnn.Conv2DDNNLayer(l,
num_filters=n_classes,
strides=(1,1),
border_mode="same",
filter_size=(1,1),
nonlinearity=None)
l_to_strengthen = l
l_out = l
return utils.struct(
input=l_in,
out=l_out,
to_strengthen=l_to_strengthen)
"""The update step. Here using Adam's method."""
def build_updates(loss, all_params, learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-8):
all_grads = theano.grad(loss, all_params)
updates = []
resets = []
t = theano.shared(1) # timestep, for bias correction
for param_i, grad_i in zip(all_params, all_grads):
mparam_i = theano.shared(np.zeros(param_i.get_value().shape, dtype=theano.config.floatX)) # 1st moment
vparam_i = theano.shared(np.zeros(param_i.get_value().shape, dtype=theano.config.floatX)) # 2nd moment
m = beta1 * grad_i + (1 - beta1) * mparam_i # new value for 1st moment estimate
v = beta2 * T.sqr(grad_i) + (1 - beta2) * vparam_i # new value for 2nd moment estimate
m_unbiased = m / (1 - (1 - beta1) ** t.astype(theano.config.floatX))
v_unbiased = v / (1 - (1 - beta2) ** t.astype(theano.config.floatX))
w = param_i - learning_rate * m_unbiased / (T.sqrt(v_unbiased) + epsilon) # new parameter values
updates.append((mparam_i, m))
updates.append((vparam_i, v))
updates.append((param_i, w))
resets.append([mparam_i, np.zeros(param_i.get_value().shape, dtype=theano.config.floatX)])
resets.append([vparam_i, np.zeros(param_i.get_value().shape, dtype=theano.config.floatX)])
resets.append([t, 1])
updates.append((t, t + 1))
return updates, resets
```
#### File: 317070/Twitch-plays-LSD-neural-net/read_the_chat.py
```python
import numpy as np
import re
from collections import deque
from twitch import TwitchChatStream
import random
import time
import exrex
import copy
EXREX_REGEX_ONE = ("(@__username__: (Wow|Amazing|Fascinating|Incredible|Marvelous|Wonderful|AAAAAah|OMG)\. __WORD__, that's (deep|wild|trippy|dope|weird|spacy), (man|dude|brother|bro|buddy|my man|mate|homie|brah|dawg)\. (Thanks|Kudos|Props|Respect) for (writing stuff|sending ideas) to steer my trip\.)|"
"(@__username__: __WORD__, __WORD__, __WORD__ (EVERYWHERE|ALL AROUND|ALL OVER)\. (WaaaaAAAah|Wooooooooooow))|"
#"(Wow, very @__username__, such __word__, much (amazing|bazinga|space|woop)\.)" #disliked by native english people
)
EXREX_REGEX_TWO = ("(@__username__: One __word0__ with __word1__ coming (up next|your way)!)|"
"(@__username__: Yeah, let's (try|create|dream of|watch) __word0__ with a (topping|layer) of __word1__!)"
)
EXREX_REGEX_MORE = ("(@__username__: __words__, I'll mash them all up for ya\.)")
class ChatReader(TwitchChatStream):
def __init__(self, *args, **kwargs):
super(ChatReader, self).__init__(*args, **kwargs)
#make a data structure to easily parse chat messages
self.classes = np.load("data/classes.npy")
ignore_list = ['the', 'of', 't', 'and', "the", "be", "to", "of", "and", "a", "in", "that", "have", "i", "it",
"for", "not", "on", "with", "he", "as", "you", "do", "at", "this", "but", "his", "by",
"from", "they", "we", "say", "her", "she", "or", "an", "will", "my", "one", "all", "would",
"there", "their", "what", "so", "up", "out", "if", "about", "who", "get", "which",
"go", "me", "when", "make", "can", "like", "time", "no", "just", "him", "know", "take",
"people", "into", "year", "your", "good", "some", "could", "them", "see", "other",
"than", "then", "now", "look", "only", "come", "its", "over", "think", "also",
"back", "after", "use", "two", "how", "our", "work", "first", "well", "way", "even", "new",
"want", "because", "any", "these", "give", "day", "most", "us"]
# First, check if the complete string matches
d = {d[0]:i for i,d in enumerate(self.classes)}
self.full_dictionary = {}
for str,i in d.iteritems():
for word in str.split(','):
word = word.lower().strip()
if word in ignore_list:
continue
if word in self.full_dictionary:
self.full_dictionary[word].append(i)
else:
self.full_dictionary[word] = [i]
# r'\bAND\b | \bOR\b | \bNOT\b'
self.regexes = []
regex_string = " | ".join([r"^%s$"%word.replace(" ",r"\ ") for word in self.full_dictionary.keys()])
self.regexes.append((self.full_dictionary, re.compile(regex_string, flags=re.I | re.X)))
regex_string2 = " | ".join([r"\b%s\b"%word.replace(" ",r"\ ") for word in self.full_dictionary.keys()])
self.dictionary = copy.deepcopy(self.full_dictionary)
# Second, check if complete string matches a word
for str,i in d.iteritems():
for word in re.findall(r"[\w']+", str):
word = word.lower()
if word in ignore_list:
continue
if word in self.dictionary:
self.dictionary[word].append(i)
else:
self.dictionary[word] = [i]
regex_string = " | ".join([r"^%s$"%word.replace(" ",r"\ ") for word in self.dictionary.keys()])
self.regexes.append((self.dictionary, re.compile(regex_string, flags=re.I | re.X)))
# This was deemed too sensitive by a lot of people
"""
# third, check if complete thing is in string
self.regexes.append((self.full_dictionary, re.compile(regex_string2, flags=re.I | re.X)))
# fourth, check if words are found in the string
regex_string = " | ".join([r"\b%s\b"%word.replace(" ",r"\ ") for word in self.dictionary.keys()])
self.regexes.append((self.dictionary, re.compile(regex_string, flags=re.I | re.X)))
"""
self.currentwords = deque(maxlen=1)
self.current_features = [random.randint(0,999)]
self.last_read_time = 0
self.hold_subject_seconds = 60
self.display_string = ""
self.message_queue = deque(maxlen=100)
self.max_features = 2
@staticmethod
def get_cheesy_chat_message(username, words):
if len(words)==1:
return exrex.getone(EXREX_REGEX_ONE).replace("__username__", username)\
.replace("__USERNAME__", username.capitalize())\
.replace("__word__",words[0])\
.replace("__WORD__",words[0].capitalize())
elif len(words)==2:
return exrex.getone(EXREX_REGEX_TWO).replace("__username__", username)\
.replace("__USERNAME__", username.capitalize())\
.replace("__word0__",words[0])\
.replace("__WORD0__",words[0].capitalize())\
.replace("__word1__",words[1])\
.replace("__WORD1__",words[1].capitalize())
else:
wordstring = " & ".join(words)
return exrex.getone(EXREX_REGEX_MORE).replace("__username__", username)\
.replace("__USERNAME__", username.capitalize())\
.replace("__words__",wordstring)\
.replace("__WORDS__",wordstring.capitalize())
def process_the_chat(self):
display_string = self.display_string
features = self.current_features
messages = self.twitch_recieve_messages() #you always need to check for ping messages
self.message_queue.extend(messages)
# [{'username': '317070', 'message': u'test again', 'channel': '#317070'}]
if time.time() - self.last_read_time < self.hold_subject_seconds:
return features, display_string
try:
messages = list(self.message_queue)
random.shuffle(messages)
self.message_queue.clear()
#spaghetti code warning ahead
found = False
for message in messages:
queries = filter(None, [w.strip() for w in message['message'].split('+')])
total_features = []
total_correct_terms = []
for query in queries:
for dictionary, regex in self.regexes:
hits = regex.findall(query)
if hits:
print hits
correct_terms = []
features = []
words_used = []
for h in set(hits):
word = h.lower()
if any(current_feature in dictionary[word] for current_feature in self.current_features):
continue
feature = random.choice(dictionary[word])
features.append(feature)
correct_term = ""
#print self.classes[feature][0].lower()
for term in self.classes[feature][0].lower().split(','):
if word in term:
correct_term = term.strip()
break
correct_terms.append(correct_term)
words_used.append(word)
if len(features)==0:
continue
#We want at most (max_features) features
#print features, correct_terms
features, correct_terms, words_used = zip(*random.sample(zip(features, correct_terms, words_used), min(len(features), self.max_features)))
if len(words_used)>1:
if message['message'].index(words_used[1]) < message['message'].index(words_used[0]):
features = features.reverse()
correct_terms = correct_terms.reverse()
words_used = words_used.reverse()
#print regex.pattern
total_features.extend(features)
total_correct_terms.extend(correct_terms)
break
if len(total_features)==0:
continue
total_features = total_features[:2]
total_correct_terms = total_correct_terms[:2]
username = message['username']
if len(total_features)==1:
display_string = "@"+username+": "+total_correct_terms[0]
else:
display_string = " & ".join(total_correct_terms)
chat_message = ChatReader.get_cheesy_chat_message(username, total_correct_terms)
self.send_chat_message(chat_message)
self.last_read_time = time.time()
found = True
break
if not found:
return self.current_features, self.display_string
self.current_features = total_features
self.display_string = display_string
print [self.classes[feature][0] for feature in total_features]
return total_features, display_string
except:
# let the chat users not crash the entire program
self.message_queue.clear()
import traceback
import sys
print "current things:", self.display_string
print "messages", list(self.message_queue)
print(traceback.format_exc())
return features, display_string #return default and continue with work
```
#### File: 317070/Twitch-plays-LSD-neural-net/train.py
```python
import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
from time import strftime, localtime
import time
from subprocess import Popen
import sys
import os
import importlib
import warnings
import string
from glob import glob
import cPickle
import platform
import scipy.misc
from zoomingstream import ZoomingStream
from twitch import TwitchOutputStream, TwitchOutputStreamRepeater
from read_the_chat import ChatReader
import utils
# warnings.filterwarnings('ignore', '.*topo.*')
if len(sys.argv) < 2:
print "Usage: %s <config_path>"%os.path.basename(__file__)
cfg_path = "default"
else:
cfg_path = sys.argv[1]
cfg_name = cfg_path.split("/")[-1]
print "Model:", cfg_name
cfg = importlib.import_module("models.%s" % cfg_name)
expid = "%s-%s-%s" % (cfg_name, platform.node(), strftime("%Y%m%d-%H%M%S", localtime()))
print "expid:", expid
################################################################################
# BUILD & COMPILE
################################################################################
print "Building"
model = cfg.build_model()
pretrained_params = cfg.pretrained_params
nn.layers.set_all_param_values(model.out, pretrained_params)
all_layers = nn.layers.get_all_layers(model.out)
num_params = nn.layers.count_params(model.out)
print " number of parameters: %d" % num_params
print " layer output shapes:"
for layer in all_layers:
name = string.ljust(layer.__class__.__name__, 32)
print " %s %s" % (name, layer.get_output_shape(),)
x = nn.utils.shared_empty(dim=len(model.input.get_output_shape()))
x.set_value(cfg.image.astype("float32").reshape((1,)+cfg.image.shape))
interesting_features = theano.shared(np.array(range(cfg.n_classes), dtype='int32'))
interesting_features.set_value(np.array(range(cfg.n_classes), dtype='int32'))
all_params = [x,]
def l_from_network(inp, pool=1):
input_shape = x.get_value().shape
inp = inp[:,:,:input_shape[2]//pool*pool,:input_shape[3]//pool*pool]
inp = inp.reshape((inp.shape[0],
inp.shape[1],
inp.shape[2]/pool,
pool,
inp.shape[3]/pool,
pool))
inp = inp.mean(axis=(3,5))
network_output = model.to_strengthen.get_output(inp-cfg.mean_img)
output_shape = model.to_strengthen.get_output_shape()
return (-( network_output[0,interesting_features[0],output_shape[2]/2:,:]).mean() #first feature on lower half
-( network_output[0,interesting_features[1],:output_shape[2]/2,:]).mean() #second feature on upper half
+ ( network_output[0,:,:,:]).mean() #other classes should be low!
)
def l_with_meanpool_student(inp, pool=1):
w = np.load("student_prior_filters.npy").astype("float32")
w = np.transpose(w, axes=(3,2,0,1))
input_shape = x.get_value().shape
#downsample inp
inp = inp[:,:,:input_shape[2]//pool*pool,:input_shape[3]//pool*pool]
inp = inp.reshape((inp.shape[0],
inp.shape[1],
inp.shape[2]/pool,
pool,
inp.shape[3]/pool,
pool))
inp = inp.mean(axis=(3,5))
z = T.nnet.conv2d(inp - 128.0, theano.shared(w), subsample=(1,1),
border_mode="valid")
mu = theano.shared(np.load("student_prior_mean.npy").astype("float32"))
v = 0.665248
l = (z-mu.dimshuffle("x",0,"x","x"))**2
l = T.log(1. + l / v)
return l.mean()
def l_with_meanpool_gaussian(inp, pool=1):
w = np.load("prior_filters.npy").astype("float32")
w = np.transpose(w, axes=(3,2,0,1))
input_shape = x.get_value().shape
#downsample inp
inp = inp[:,:,:input_shape[2]//pool*pool,:input_shape[3]//pool*pool]
inp = inp.reshape((inp.shape[0],
inp.shape[1],
inp.shape[2]/pool,
pool,
inp.shape[3]/pool,
pool))
inp = inp.mean(axis=(3,5))
z = T.nnet.conv2d(inp - 128.0, theano.shared(w), subsample=(1,1),
border_mode="valid")
mu = theano.shared(np.load("prior_mean.npy").astype("float32"))
l = T.sqr(z-mu.dimshuffle("x",0,"x","x"))
l = T.sqr( (z-mu.dimshuffle("x",0,"x","x"))[:, :-1] )
return l.mean()
pool_sizes = [1,4,8,16,32]
l = np.float32(cfg.prior_strength) * sum([l_with_meanpool_student(x,pool=p) for p in pool_sizes]) / len(pool_sizes)
pool_sizes = [1]
n = sum([l_from_network(x,pool=p) for p in pool_sizes]) / len(pool_sizes)
train_loss = (n + l)
learning_rate = theano.shared(utils.cast_floatX(cfg.learning_rate))
if hasattr(cfg, 'build_updates'):
updates, resets = cfg.build_updates(train_loss, all_params, learning_rate)
else:
updates = nn.updates.sgd( train_loss, all_params,
learning_rate, )
resets = []
givens = {
# target_var: T.sqr(y),
model.input.input_var: x-cfg.mean_img
}
print "Compiling"
idx = T.lscalar('idx')
iter_train = theano.function([idx], [train_loss,l], givens=givens, updates=updates, on_unused_input='ignore')
compute_output = theano.function([idx], model.to_strengthen.get_output(deterministic=True), givens=givens, on_unused_input='ignore')
################################################################################
# TRAIN
################################################################################
n_updates = 0
print "image shape:", x.get_value().shape
files = glob("result/*.png")
for f in files: os.remove(f)
def normalize(img, new_min=0, new_max=255):
""" normalize numpy array """
old_min = img.min()
return 1.*(img-old_min)*(new_max-new_min)/(img.max()-old_min)+new_min
e = 0
chat_reader = ChatReader()
with ZoomingStream(zoomspeed=cfg.zoomspeed,
width=cfg.width,
height=cfg.height,
estimated_input_fps=cfg.estimated_input_fps,
fps=25) as stream:
while True:
if not e % cfg.steps_per_zoom:
features, string = chat_reader.process_the_chat()
print "features activated: "
print features
newframe = stream.send_frame( np.transpose(x.get_value()[0]/255.0,(1,2,0)), text=string )
if newframe is None:
#there is a problem. Commit harakiri
time.sleep(5)
chat_reader.send_chat_message("Faq, there is something wrong with the AI. It might be gaining consciousness. I'm rebooting just to make sure it's dead. Stream might be down for a minute or so! Don't forget to refresh, and tell the others. BRB")
sys.exit(1)
features = (list(features)*2)[:2]
interesting_features.set_value(np.array(features, dtype='int32'))
#interesting_features_one_hot.set_value( np.eye(cfg.n_classes, dtype='float32')[features].T )
x.set_value(np.transpose(255*newframe, (2,0,1)).astype("float32").reshape((1,)+cfg.image.shape))
for reset in resets:
reset[0].set_value(reset[1])
#img = np.round(np.clip(img,0.1,254.9))
#scipy.misc.imsave('result/result%s.png'%(str(e).zfill(4),), img.astype("uint8"))
loss, l = iter_train(0)
x_val = x.get_value()
x.set_value(np.clip(x_val, 0.0, 255.0))
print e, loss, ((x.get_value()-cfg.image)**2).mean(), l
e+=1
```
#### File: 317070/Twitch-plays-LSD-neural-net/utils.py
```python
import theano
import os
import errno
import numpy as np
import cPickle
floatX = theano.config.floatX
cast_floatX = np.float32 if floatX=="float32" else np.float64
def save_pkl(obj, path, protocol=cPickle.HIGHEST_PROTOCOL):
with file(path, 'wb') as f:
cPickle.dump(obj, f, protocol=protocol)
def load_pkl(path):
with file(path, 'rb') as f:
obj = cPickle.load(f)
return obj
def resample_list(list_, size):
orig_size = len(list_)
ofs = orig_size//size//2
delta = orig_size/float(size)
return [ list_[ofs + int(i * delta)] for i in range(size) ]
def resample_arr(arr, size):
orig_size = arr.shape[0]
ofs = orig_size//size//2
delta = orig_size/float(size)
idxs = [ofs + int(i * delta) for i in range(size)]
return arr[idxs]
def asarrayX(value):
return theano._asarray(value, dtype=theano.config.floatX)
def one_hot(vec, m=None):
if m is None: m = int(np.max(vec)) + 1
return np.eye(m)[vec]
def make_sure_path_exists(path):
"""Try to create the directory, but if it already exist we ignore the error"""
try: os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST: raise
def shared_mmp(data=None, file_name="shm", shape=(0,), dtype=floatX):
""" Shared memory, only works for linux """
if not data is None: shape = data.shape
path = "/dev/shm/lio/"
make_sure_path_exists(path)
mmp = np.memmap(path+file_name+".mmp", dtype=dtype, mode='w+', shape=shape)
if not data is None: mmp[:] = data
return mmp
def open_shared_mmp(filename, shape=None, dtype=floatX):
path = "/dev/shm/lio/"
return np.memmap(path+filename+".mmp", dtype=dtype, mode='r', shape=shape)
def normalize_zmuv(x, axis=0, epsilon=1e-9):
""" Zero Mean Unit Variance Normalization"""
mean = x.mean(axis=axis)
std = np.sqrt(x.var(axis=axis) + epsilon)
return (x - mean[np.newaxis,:]) / std[np.newaxis,:]
class struct:
def __init__(self, **entries):
self.__dict__.update(entries)
def __repr__(self):
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for
(k, v) in self.__dict__.iteritems()))
def keys(self):
return self.__dict__.keys()
``` |
{
"source": "3191110276/APIC-EM-Spark-Bot",
"score": 2
} |
#### File: APIC-EM-Spark-Bot/intents/F_get_available_ports.py
```python
from connectors import apicem
from collections import Counter
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import os
import re
def main(parameters):
'''Returns a list of 'free' ports - using ports that are currently not up in APIC-EM'''
location_req = parameters['location']
role_req = parameters['type']
output_structure = {}
all_locations = apicem.get_location()
location_available = False
if len(all_locations) > 0:
for location in all_locations:
if location_req == location['locationName']:
location_available = True
else:
if location_req != 'Any' and location_req != 'any':
answer = {
'text': 'No locations have been defined! Please either define a location in the controller, or use \"Any\" for the location.',
'markdown': 'No locations have been defined! Please either define a location in the controller, or use \"Any\" for the location.',
'file': None
}
return answer
if location_available == False:
if location_req != 'Any' and location_req != 'any':
answer = {
'text': 'No locations with this name has been defined! Please either define a location in the controller, or use \"Any\" for the location.',
'markdown': 'No locations with this name has been defined! Please either define a location in the controller, or use \"Any\" for the location.',
'file': None
}
return answer
nw_devices = apicem.get_network_device()
for device in nw_devices:
output_structure[device['id']] = {
'series': device['series'],
'lineCardCount': device['lineCardCount'],
'interfaceCount': device['interfaceCount'],
'family': device['family'],
'hostname': device['hostname'],
'roleSource': device['roleSource'],
'platformId': device['platformId'],
'role': device['role'],
'location': device['location'],
'type': device['type'],
'lineCardId': device['lineCardId'],
'locationName': device['locationName'],
'available_ports': 0,
'port_list': [],
'port_type_count': None
}
#Add ports to the devices
ports = apicem.get_interface()
for port in ports:
if port['status'] != 'up' and port['interfaceType'] == 'Physical':
output_structure[port['deviceId']]['available_ports'] += 1
output_structure[port['deviceId']]['port_list'].append(port['portName'])
#Remove devices that do not have available ports
for k in output_structure.keys():
if output_structure[k]['available_ports'] == 0:
del output_structure[k]
#Remove devices that do not fit the role requirement
for k in output_structure.keys():
if output_structure[k]['role'] != role_req:
del output_structure[k]
#Remove devices that do not fit the location requirement
if location_req != 'Any' and location_req != 'any':
for k in output_structure.keys():
if output_structure[k]['locationName'] != location_req:
del output_structure[k]
for device in output_structure:
portslist = []
for port in output_structure[device]['port_list']:
m = re.search("\d", port)
if m:
portslist.append(port[:m.start()])
output_structure[device]['port_type_count'] = Counter(portslist)
main_dir = os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
font_path = os.path.join(main_dir, 'resources', 'CiscoSansTTExtraLight.ttf')
switch_pic_path = os.path.join(main_dir, 'resources', 'switch_ports.png')
router_pic_path = os.path.join(main_dir, 'resources', 'router_ports.png')
small_font = ImageFont.truetype(font_path, 45)
normal_font = ImageFont.truetype(font_path, 50)
large_font = ImageFont.truetype(font_path, 115)
increment = 0
for device in output_structure:
if output_structure[device]['role'] == 'BORDER ROUTER':
img = Image.open(router_pic_path)
else:
img = Image.open(switch_pic_path)
draw = ImageDraw.Draw(img)
ports_overview = []
for ptype in output_structure[device]['port_type_count']:
ports_overview.append(ptype + ': ' + str(output_structure[device]['port_type_count'][ptype]))
device_name = output_structure[device]['series']
if device_name[-8:] == 'Switches':
device_name = device_name[:-8]
draw.multiline_text((265, 40), str(output_structure[device]['available_ports']), (0, 0, 0), font=large_font, align="left")
draw.multiline_text((38, 200), device_name, (0, 0, 0), font=normal_font, align="left")
for i in range(len(ports_overview)):
draw.multiline_text((680, 25+50*i), ports_overview[i], (0, 0, 0), font=small_font, align="left")
img.save('port_counts_part' + str(increment) + '.png')
increment += 1
if output_structure == {}:
text = 'I could not find a device that fits your requirements.'
answer = {
'text': text,
'markdown': text,
'file': None
}
else:
#Image stitching
img_height = 297 * increment
background_image = Image.new('RGB', (1289, img_height))
for i in range(increment):
background_image.paste(Image.open('port_counts_part' + str(i) + '.png'), (0, 297*i))
background_image.save('port_counts.png')
send_image = open('port_counts.png', 'rb')
text = 'Please check manually on the device to verify that nothing is plugged into the ports'
answer = {
'text': text,
'markdown': text,
'file': send_image
}
return answer
``` |
{
"source": "320011/core",
"score": 2
} |
#### File: accounts/templatetags/css.py
```python
from django import template
register = template.Library()
@register.filter(name='addclass')
def addclass(field, c=None):
return field.as_widget(attrs={"class": c})
@register.filter(name='addbootstrapstyle')
def addbootstrapstyle(field, placeholder=None):
if placeholder is not None:
return field.as_widget(attrs={"class": "form-control textarea-sm", "placeholder": placeholder})
else:
return field.as_widget(attrs={"class": "form-control textarea-sm"})
@register.filter(name='addph')
def addph(field, ph=None):
return field.as_widget(attrs={"placeholder": ph})
```
#### File: management/commands/seed_data.py
```python
from django.core.management.base import BaseCommand
from django.db.models.base import ObjectDoesNotExist
import sys
sys.path.append("....")
from case_study.models import Tag, TagRelationship, CaseStudy, MedicalHistory, Medication, Attempt, Comment, Other, Question
from accounts.models import User
class Command(BaseCommand):
help = 'Create or delete seed data'
def add_arguments(self, parser):
parser.add_argument('action', type=str, help='Create or delete seed data')
parser.add_argument('-a', '--action', type=str, help='Create or delete seed data', )
def handle(self, *args, **kwargs):
# Get action argument
action = kwargs['action']
if action == "create":
print("----- Create seed data start -----")
if action == "delete":
print("----- Delete seed data start -----")
if action == "delete":
try:
user = User.objects.all().delete()
print("Users have been deleted")
except:
print("Users have already been delete")
try:
question = Question.objects.all().delete()
print("Questions have been deleted")
except:
print("Questions have already been deleted")
try:
case = CaseStudy.objects.all().delete()
print("Case Studies have been deleted")
except:
print("Case Studies have already been deleted")
try:
tag_relationship = TagRelationship.objects.all().delete()
print("Tag relationships have been deleted")
except:
print("Tag relationships have already been deleted")
try:
question = Tag.objects.all().delete()
print("Tags have been deleted")
except:
print("Tags have been already deleted")
try:
medical_history = MedicalHistory.objects.all().delete()
print("Medical history have been deleted")
except:
print("Medical history have already been deleted")
try:
medication = Medication.objects.all().delete()
print("Medication have been deleted")
except:
print("Medication have already been deleted")
try:
other = Other.objects.all().delete()
print("Other have been deleted")
except:
print("Other have already been deleted")
try:
comment = Comment.objects.all().delete()
print("Comment have been deleted")
except:
print("Comment have already been deleted")
try:
attempt = Attempt.objects.all().delete()
print("Attempt have been deleted")
except:
print("Attempt have already been deleted")
test_users = [
{
"id" : 1 ,
"firstName" : "David" ,
"lastName" : "D",
"email" : "<EMAIL>",
"password" : "<PASSWORD>!",
"is_staff" : False,
"commencement_year" : 2018,
},
{
"id" : 2 ,
"firstName" : "Jess" ,
"lastName" : "J",
"email" : "<EMAIL>",
"password" : "<PASSWORD>!",
"is_staff" : False,
"commencement_year" : 2019,
},
{
"id" : 3,
"firstName" : "Bobby" ,
"lastName" : "B",
"email" : "<EMAIL>",
"password" : "<PASSWORD>!",
"is_staff" : False,
"commencement_year" : 2018,
},
{
"id" : 4,
"firstName" : "Sam" ,
"lastName" : "S",
"email" : "<EMAIL>",
"password" : "<PASSWORD>!",
"is_staff" : False,
"commencement_year" : 2018,
}
,
{
"id" : 5,
"firstName" : "Sally" ,
"lastName" : "S",
"email" : "<EMAIL>",
"password" : "<PASSWORD>!",
"is_staff" : False,
"commencement_year" : 2018,
},
{
"id" : 6,
"firstName" : "AdminTest" ,
"lastName" : "AdminTest",
"email" : "<EMAIL>",
"password" : "<PASSWORD>!",
"is_staff" : True,
"commencement_year" : 2018,
}
]
if action == "create":
for test_user in test_users:
try:
user = User.objects.get(email=test_user["email"])
print(test_user["email"] + " User has already been created")
except ObjectDoesNotExist:
user = User.objects.create(email=test_user["email"])
user.first_name = test_user["firstName"]
user.last_name = test_user["lastName"]
user.is_active = True
user.university = "UWA"
user.degree_commencement_year = test_user["commencement_year"]
user.set_password(test_user["password"])
user.is_staff = test_user["is_staff"]
user.save()
print("Created user " + test_user["email"] )
# test questions
test_questions = [
{
"id":1,
"body": "What should you tell the patient?"
},
{
"id":2,
"body": "What dosage should you provide?"
},
{
"id":3,
"body": "Who should you refer this patient to?"
},
]
if action == "create":
for test_question in test_questions:
try:
question = Question.objects.get(body=test_question["body"])
print("Question: '" + test_question["body"] + "' has already been created")
except ObjectDoesNotExist:
question = Question.objects.create(body=test_question["body"])
print("Question: '" + test_question["body"] + "' has been created")
# test tags
test_tags = [
{
"id":1,
"name": "Fever"
},
{
"id":2,
"name": "Cold"
},
{
"id":3,
"name": "Rash"
},
]
if action == "create":
for test_tag in test_tags:
try:
tag = Tag.objects.get(name=test_tag["name"])
print("Tag: '" + test_tag["name"] + "' has already been created")
except ObjectDoesNotExist:
question = Tag.objects.create(name=test_tag["name"])
print("Tag: '" + test_tag["name"] + "' has been created")
# test cases
test_cases = [
{
"id":1,
"created_by": "<EMAIL>",
"case_state": "P",
"height" : 180,
"weight" : 60,
"scr" : 1.5,
"age_type" : "Y",
"age" : 540,
"sex" : "M",
"description" : "with a bleeding knee and cuts to arms.",
"question" : "What dosage should you provide?",
"answer_a" : "Nothing",
"answer_b" : "I can't help you",
"answer_c" : "Go to a hospital",
"answer_d" : "Here's some medication for your cuts",
"answer" : "D",
"feedback" : "Be helpful",
"date_created" : "2019-09-01T08:20:30",
},
{
"id":2,
"created_by": "<EMAIL>",
"case_state": "P",
"height" : 160,
"weight" : 60,
"scr" : 2.0,
"age_type" : "Y",
"age" : 360,
"sex" : "F",
"description" : "complaining pains to her belly.",
"question" : "What should you tell the patient?",
"answer_a" : "Nothing",
"answer_b" : "I can't help you",
"answer_c" : "Go to a hospital",
"answer_d" : "Here's some medication for your tummy",
"answer" : "D",
"feedback" : "Be helpful",
"date_created" : "2019-08-30T15:20:30",
},
{
"id":3,
"created_by": "<EMAIL>",
"case_state": "P",
"height" : 190,
"weight" : 50,
"scr" : 2.0,
"age_type" : "Y",
"age" : 204,
"sex" : "F",
"description" : "complaining feeling faint.",
"question" : "Who should you refer this patient to?",
"answer_a" : "Nothing",
"answer_b" : "I can't help you",
"answer_c" : "Go to a hospital",
"answer_d" : "Here's some medication for your faintness",
"answer" : "D",
"feedback" : "Patient is underweight",
"date_created" : "2019-08-25T10:20:30",
},
]
if action == "create":
for test_case in test_cases:
try:
case = CaseStudy.objects.get(description=test_case["description"])
print(test_case["description"] + " case has already been created")
except ObjectDoesNotExist:
case = CaseStudy.objects.create(description=test_case["description"])
case.created_by = User.objects.get(email=test_case["created_by"])
case.case_state = test_case["case_state"]
case.height = test_case["height"]
case.weight = test_case["weight"]
case.scr = test_case["scr"]
case.age = test_case["age"]
case.sex = test_case["sex"]
case.description = test_case["description"]
question = Question.objects.get(body=test_case["question"])
case.question = question
case.answer_a = test_case["answer_a"]
case.answer_b = test_case["answer_b"]
case.answer_c = test_case["answer_c"]
case.answer_d = test_case["answer_d"]
case.answer = test_case["answer"]
case.feedback = test_case["feedback"]
case.date_created = test_case["date_created"]
case.save()
print("Created case " + test_case["description"] )
# test test_tag_relationships
test_tag_relationships = [
{
"id":1,
"tag": "Fever",
"case": "complaining feeling faint."
},
{
"id":2,
"tag": "Cold",
"case": "complaining feeling faint."
},
{
"id":3,
"tag": "Rash",
"case": "with a bleeding knee and cuts to arms."
},
{
"id":4,
"tag": "Cold",
"case": "complaining pains to her belly."
},
]
if action == "create":
for test_tag_relationship in test_tag_relationships:
try:
tag = Tag.objects.get(name=test_tag_relationship["tag"])
case = CaseStudy.objects.get(description=test_tag_relationship["case"])
tag_relationship = TagRelationship.objects.get(tag=tag, case_study=case)
print("Tag relationship: " + test_tag_relationship["tag"] + " with '" + test_tag_relationship["case"] + "' has already been created")
except ObjectDoesNotExist:
tag = Tag.objects.get(name=test_tag_relationship["tag"])
case = CaseStudy.objects.get(description=test_tag_relationship["case"])
tag_relationship = TagRelationship.objects.create(tag=tag, case_study=case)
print("Tag relationship: " + test_tag_relationship["tag"] + " with '" + test_tag_relationship["case"] + "' has been created")
# test medical history
test_medical_historys = [
{
"body" : "Family history of Huntington's diesease",
"case" : "complaining feeling faint."
},
{
"body" : "Asthma",
"case" : "with a bleeding knee and cuts to arms."
},
{
"body" : "Dislocated Shoulder",
"case" : "with a bleeding knee and cuts to arms."
},
{
"body" : "Asthma",
"case" : "complaining pains to her belly."
},
]
if action == "create":
for test_medical_history in test_medical_historys:
try:
case = CaseStudy.objects.get(description=test_medical_history["case"])
medical_history = MedicalHistory.objects.get(body=test_medical_history["body"], case_study=case)
print("Medical history: " + test_medical_history["body"] + " with '" + test_medical_history["case"] + "' has already been created")
except ObjectDoesNotExist:
case = CaseStudy.objects.get(description=test_medical_history["case"])
medical_history = MedicalHistory.objects.create(body=test_medical_history["body"], case_study=case)
print("Medical history: " + test_medical_history["body"] + " with '" + test_medical_history["case"] + "' has been created")
# test medications
test_medications = [
{
"name" : "<NAME>",
"case" : "complaining feeling faint."
},
{
"name" : "<NAME>",
"case" : "with a bleeding knee and cuts to arms."
},
{
"name" : "<NAME>",
"case" : "complaining pains to her belly."
},
{
"name" : "<NAME>",
"case" : "complaining pains to her belly."
},
]
if action == "create":
for test_medication in test_medications:
try:
case = CaseStudy.objects.get(description=test_medication["case"])
medication = Medication.objects.get(name=test_medication["name"], case_study=case)
print("Medication: " + test_medication["name"] + " with '" + test_medication["case"] + "' has already been created")
except ObjectDoesNotExist:
case = CaseStudy.objects.get(description=test_medication["case"])
medication = Medication.objects.create(name=test_medication["name"], case_study=case)
print("Medication: " + test_medication["name"] + " with '" + test_medication["case"] + "' has been created")
# test other
test_others = [
{
"other_body" : "Currently seeing a specialist",
"case" : "complaining feeling faint."
},
]
if action == "create":
for test_other in test_others:
try:
case = CaseStudy.objects.get(description=test_other["case"])
other = Other.objects.get(other_body=test_other["other_body"], case_study=case)
print("Other: " + test_other["other_body"] + " with '" + test_other["case"] + "' has already been created")
except ObjectDoesNotExist:
case = CaseStudy.objects.get(description=test_other["case"])
other = Other.objects.create(other_body=test_other["other_body"], case_study=case)
print("Other: " + test_other["other_body"] + " with '" + test_other["case"] + "' has been created")
# test comments
test_comments = [
{
"comment" : "Why is this answer D?",
"case" : "complaining feeling faint.",
"user" : "<EMAIL>",
"comment_date": "2019-09-01T13:20:30",
},
{
"comment" : "Because the patient has a history of huntington's diesease",
"case" : "complaining feeling faint.",
"user" : "<EMAIL>",
"comment_date": "2019-09-01T13:20:45",
},
]
if action == "create":
for test_comment in test_comments:
try:
case = CaseStudy.objects.get(description=test_comment["case"])
user = User.objects.get(email=test_comment["user"])
comment = Comment.objects.get(comment=test_comment["comment"], case_study=case, user=user, comment_date=test_comment["comment_date"])
print("Comment by: " + test_comment["user"] + "has already been created")
except ObjectDoesNotExist:
case = CaseStudy.objects.get(description=test_comment["case"])
user = User.objects.get(email=test_comment["user"])
comment = Comment.objects.create(comment=test_comment["comment"], case_study=case, user=user, comment_date=test_comment["comment_date"])
print("Comment by: " + test_comment["user"] + "has been created")
# test attempts
test_attempts = [
{
"id":1,
"user_answer": "A",
"case": "complaining feeling faint.",
"user": "<EMAIL>",
"attempt_date": "2019-09-01T09:20:30",
},
{
"id":2,
"user_answer": "B",
"case": "complaining feeling faint.",
"user": "<EMAIL>",
"attempt_date": "2019-09-02T09:20:30",
},
{
"id":3,
"user_answer": "C",
"case": "complaining feeling faint.",
"user": "<EMAIL>",
"attempt_date": "2019-09-03T10:20:30",
},
{
"id":4,
"user_answer": "D",
"case": "complaining feeling faint.",
"user": "<EMAIL>",
"attempt_date": "2019-09-04T11:20:30",
},
{
"id":5,
"user_answer": "D",
"case": "complaining feeling faint.",
"user": "<EMAIL>",
"attempt_date": "2019-09-02T12:20:30",
},
{
"id":6,
"user_answer": "D",
"case": "complaining feeling faint.",
"user": "<EMAIL>",
"attempt_date": "2019-09-03T16:20:30",
},
{
"id":7,
"user_answer": "C",
"case": "complaining feeling faint.",
"user": "<EMAIL>",
"attempt_date": "2019-09-11T20:20:30",
},
{
"id":8,
"user_answer": "D",
"case": "complaining feeling faint.",
"user": "<EMAIL>",
"attempt_date": "2019-09-15T21:20:30",
},
{
"id":9,
"user_answer": "A",
"case": "with a bleeding knee and cuts to arms.",
"user": "<EMAIL>",
"attempt_date": "2019-09-09T14:20:30",
},
{
"id":10,
"user_answer": "B",
"case": "with a bleeding knee and cuts to arms.",
"user": "<EMAIL>",
"attempt_date": "2019-09-17T15:20:30",
},
{
"id":11,
"user_answer": "C",
"case": "with a bleeding knee and cuts to arms.",
"user": "<EMAIL>",
"attempt_date": "2019-09-20T14:20:30",
},
{
"id":12,
"user_answer": "D",
"case": "with a bleeding knee and cuts to arms.",
"user": "<EMAIL>",
"attempt_date": "2019-09-15T13:20:30",
},
{
"id":13,
"user_answer": "C",
"case": "with a bleeding knee and cuts to arms.",
"user": "<EMAIL>",
"attempt_date": "2019-09-07T20:20:30",
},
{
"id":14,
"user_answer": "D",
"case": "with a bleeding knee and cuts to arms.",
"user": "<EMAIL>",
"attempt_date": "2019-09-08T10:20:30",
},
{
"id":15,
"user_answer": "A",
"case": "with a bleeding knee and cuts to arms.",
"user": "<EMAIL>",
"attempt_date": "2019-09-10T11:20:30",
},
{
"id":16,
"user_answer": "D",
"case": "with a bleeding knee and cuts to arms.",
"user": "<EMAIL>",
"attempt_date": "2019-09-11T12:20:30",
},
{
"id":17,
"user_answer": "D",
"case": "complaining pains to her belly.",
"user": "<EMAIL>",
"attempt_date": "2019-09-14T14:20:30",
},
{
"id":18,
"user_answer": "D",
"case": "complaining pains to her belly.",
"user": "<EMAIL>",
"attempt_date": "2019-09-12T15:20:30",
},
{
"id":19,
"user_answer": "D",
"case": "complaining pains to her belly.",
"user": "<EMAIL>",
"attempt_date": "2019-09-03T09:20:30",
},
{
"id":20,
"user_answer": "A",
"case": "complaining pains to her belly.",
"user": "<EMAIL>",
"attempt_date": "2019-09-06T13:20:30",
},
]
if action == "create":
for test_attempt in test_attempts:
try:
user = User.objects.get(email=test_attempt["user"])
case = CaseStudy.objects.get(description=test_attempt["case"])
attempt = Attempt.objects.get(user_answer=test_attempt["user_answer"],case_study=case,user=user,attempt_date=test_attempt["attempt_date"])
print("Attempt by: " + test_attempt["user"] + " for case '" + test_attempt["case"] + "' has already been created")
except ObjectDoesNotExist:
user = User.objects.get(email=test_attempt["user"])
case = CaseStudy.objects.get(description=test_attempt["case"])
attempt = Attempt.objects.create(user_answer=test_attempt["user_answer"],case_study=case,user=user,attempt_date=test_attempt["attempt_date"])
print("Attempt by: " + test_attempt["user"] + " for case '" + test_attempt["case"] + "' has been created")
if action == "create":
print("----- Create seed data complete -----")
if action == "delete":
print("----- Delete seed data complete -----")
```
#### File: case_admin/templatetags/dict.py
```python
from django import template
register = template.Library()
@register.filter(name='get_item')
def get_item(d, k):
return d.get(k)
```
#### File: core/case_admin/tests.py
```python
from django.test import TestCase
from case_study.models import *
from accounts.models import *
from case_admin.views.common import *
from case_admin.views import tag
# Create your tests here.
class AdminTestCase(TestCase):
def setUp(self):
Tag.objects.create(pk=1, name="test_tag")
CaseStudy.objects.create(pk=1, case_state=CaseStudy.STATE_REVIEW)
User.objects.create(is_active=False)
def test_populate_data(self):
data = populate_data(tag.schema_tag, Tag.objects.all())
self.assertEqual(len(data["entities"]), 1)
good_data = {
'endpoint': '/caseadmin/tags/',
'entities': [
[
{
'entity': 1,
'key': 'name',
'title': 'Tag',
'value': 'test_tag',
'widget': {'maxlength': 60, 'template': 'w-text.html'},
'write': True
}
],
]
}
self.assertEqual(data, good_data)
def test_patch_model(self):
class TR:
pass
req = TR()
setattr(req, "body", "{\"name\": \"new_tag\"}")
patch_model(req, Tag, tag.schema_tag, 1)
t = Tag.objects.all().first()
self.assertEqual(t.name, "new_tag")
def test_delete_model_hard(self):
class TR:
pass
req = TR()
setattr(req, "body", "{\"hard\": true}")
self.assertEqual(CaseStudy.objects.all().count(), 1)
delete_model(req, CaseStudy, 1)
self.assertEqual(CaseStudy.objects.all().count(), 0)
def test_get_badge_counts(self):
bc = get_badge_counts()
good_bc = {
"total": 2,
"users": 1,
"cases": 1,
"questions": 0,
"comments": 0,
"tags": 0
}
self.assertEqual(bc, good_bc)
```
#### File: core/case_study/tests.py
```python
from django.test import TestCase
from .models import *
from accounts.models import User
# Unit tests for CaseStudy model
class CaseStudyTestCase(TestCase):
def setUp(self):
User.objects.create(email="<EMAIL>")
CaseStudy.objects.create(age=24, answer_a="a", answer_b="b", answer_c="c", answer_d="d", answer=CaseStudy.ANSWER_D, height=175, weight=70, scr=3)
CaseStudy.objects.create(age=24, sex=CaseStudy.FEMALE, age_type=CaseStudy.MONTHS, answer_a="a", answer_b="b",
answer_c="c", answer_d="d", answer=CaseStudy.ANSWER_C, height=175, weight=70)
Attempt.objects.create(case_study_id=1, user_answer=CaseStudy.ANSWER_A, user_id=1)
Attempt.objects.create(case_study_id=1, user_answer=CaseStudy.ANSWER_D, user_id=1)
Attempt.objects.create(case_study_id=2, user_answer=CaseStudy.ANSWER_C, user_id=1)
def test_age_string(self):
first_case = CaseStudy.objects.get(pk=1)
second_case = CaseStudy.objects.get(pk=2)
self.assertEquals(first_case.get_age_string(), "2-yo")
self.assertEquals(second_case.get_age_string(), "24-mo")
def test_age_in_words(self):
first_case = CaseStudy.objects.get(pk=1)
second_case = CaseStudy.objects.get(pk=2)
self.assertEquals(first_case.get_age_in_words(), "two")
self.assertEquals(second_case.get_age_in_words(), "twenty-four")
def test_sex(self):
first_case = CaseStudy.objects.get(pk=1)
second_case = CaseStudy.objects.get(pk=2)
self.assertEquals(first_case.get_sex(), "male")
self.assertEquals(second_case.get_sex(), "female")
def test_optionals(self):
first_case = CaseStudy.objects.get(pk=1)
second_case = CaseStudy.objects.get(pk=2)
self.assertEquals(first_case.get_optionals(), "[175cm/70.0kg/3.0μmol/L SCr]")
self.assertEquals(second_case.get_optionals(), "[175cm/70.0kg]")
def test_answer_from_character(self):
first_case = CaseStudy.objects.get(pk=1)
second_case = CaseStudy.objects.get(pk=2)
self.assertEquals(first_case.get_answer_from_character("A"), "a")
self.assertEquals(second_case.get_answer_from_character("B"), "b")
def test_average_score(self):
first_case = CaseStudy.objects.get(pk=1)
second_case = CaseStudy.objects.get(pk=2)
self.assertEquals(first_case.get_average_score(), 50.0)
self.assertEquals(second_case.get_average_score(), 100.0)
# Unit tests for Playlist model
class PlaylistTestCase(TestCase):
def setUp(self):
User.objects.create(email="<EMAIL>")
Playlist.objects.create(current_position=0, case_list="3,5,7,1", owner_id=1)
Playlist.objects.create(current_position=3, case_list="8,5,7,4", owner_id=1)
def test_current_case(self):
first_playlist = Playlist.objects.get(pk=1)
second_playlist = Playlist.objects.get(pk=2)
self.assertEquals(first_playlist.current_case(), 3)
self.assertEquals(second_playlist.current_case(), 4)
def test_next_case(self):
first_playlist = Playlist.objects.get(pk=1)
second_playlist = Playlist.objects.get(pk=2)
self.assertEquals(first_playlist.next_case(), 5)
self.assertEquals(second_playlist.next_case(), None)
def test_previous_case(self):
first_playlist = Playlist.objects.get(pk=1)
second_playlist = Playlist.objects.get(pk=2)
self.assertEquals(first_playlist.previous_case(), None)
self.assertEquals(second_playlist.previous_case(), 7)
```
#### File: case_study/views/create_case.py
```python
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core import serializers
from django.http import HttpResponseRedirect, JsonResponse, HttpResponseNotFound
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.utils import timezone
from datetime import datetime, timedelta
from ..forms import CaseStudyForm, CaseStudyTagForm, MedicalHistoryForm, MedicationForm, OtherForm # , CaseTagForm
from ..models import Tag, TagRelationship, CaseStudy, MedicalHistory, Medication, Attempt, Comment, Other, Question
from django.db.models import Q
@login_required
def start_new_case(request):
draft_case_count = CaseStudy.objects.filter(created_by=request.user, case_state=CaseStudy.STATE_DRAFT).count()
c = {
"unsubmitted_count": draft_case_count
}
if draft_case_count == 0 or request.POST.get("create_new_case", False) == "true":
case = CaseStudy.objects.create(created_by=request.user)
return HttpResponseRedirect(
reverse("cases:create-new-case", kwargs={"case_study_id": case.id}))
else:
return render(request, "create_case_landing.html", c)
@login_required
def unsubmitted_cases(request):
draft_cases = CaseStudy.objects.filter(created_by=request.user, case_state=CaseStudy.STATE_DRAFT).order_by("-date_created")
c = {
"unsubmitted_cases": draft_cases
}
return render(request, "draft_cases.html", c)
@login_required
def delete_unsubmitted_case(request):
case_id = request.GET.get('id', None)
case_study = get_object_or_404(CaseStudy, pk=case_id)
case_study.delete()
success = True if not case_study.id else False
data = {
'success': success
}
return JsonResponse(data)
@login_required
def create_new_case(request, case_study_id):
# returns object (case_study), and boolean specifying whether an object was created
case_study = get_object_or_404(CaseStudy, pk=case_study_id, case_state=CaseStudy.STATE_DRAFT, created_by=request.user)
# case has been submitted or pending review so it cannot be accessed again
if case_study.case_state != CaseStudy.STATE_DRAFT:
return HttpResponseNotFound()
# return HttpResponseRedirect(reverse('cases:view-case', args=[case_study.id]))
relevant_tags = TagRelationship.objects.filter(case_study=case_study) # return Tags for that case_study
all_tags = Tag.objects.all()
medical_histories = MedicalHistory.objects.filter(case_study=case_study)
medications = Medication.objects.filter(case_study=case_study)
others = Other.objects.filter(case_study=case_study)
# Check if the choice was in years format, if yes, integer division by 12.
if case_study.age:
if case_study.age_type == 'Y':
case_study.age = case_study.age // 12
if request.method == "POST":
# Fixes mutable error
request.POST = request.POST.copy()
# obtain forms with fields populated from POST request
case_study_form = CaseStudyForm(request.POST, instance=case_study)
# -- Medical history --
medical_histories = list(MedicalHistory.objects.filter(case_study=case_study).values_list("body", flat=True))
medical_history_list = request.POST.getlist("medical-history-list")
# Create new ones
for medical_history in medical_history_list:
if medical_history not in medical_histories:
MedicalHistory.objects.create(body=medical_history, case_study=case_study)
medical_histories = list(MedicalHistory.objects.filter(case_study=case_study).values_list("body", flat=True))
# Delete ones that are removed
for medical_history in medical_histories:
if medical_history not in medical_history_list:
MedicalHistory.objects.filter(body=medical_history, case_study=case_study).delete()
# Obtain updated list of medical histories
medical_histories = MedicalHistory.objects.filter(case_study=case_study)
# -- Medication --
medications = list(Medication.objects.filter(case_study=case_study).values_list("name", flat=True))
medication_list = request.POST.getlist("medication-list")
# Create new ones
for medication in medication_list:
if medication not in medications:
Medication.objects.create(name=medication, case_study=case_study)
medications = list(Medication.objects.filter(case_study=case_study).values_list("name", flat=True))
# Delete ones that are removed
for medication in medications:
if medication not in medication_list:
Medication.objects.filter(name=medication, case_study=case_study).delete()
# Obtain updated list of medical histories
medications = Medication.objects.filter(case_study=case_study)
# -- Other --
others = list(Other.objects.filter(case_study=case_study).values_list("other_body", flat=True))
other_list = request.POST.getlist("other-list")
# Create new ones
for other in other_list:
if other not in others:
Other.objects.create(other_body=other, case_study=case_study)
others = list(Other.objects.filter(case_study=case_study).values_list("other_body", flat=True))
# Delete ones that are removed
for other in others:
if other not in other_list:
Other.objects.filter(other_body=other, case_study=case_study).delete()
# Obtain updated list of medical histories
others = Other.objects.filter(case_study=case_study)
# -- Tag --
relevant_tags = TagRelationship.objects.filter(case_study=case_study)
tag_list = request.POST.getlist("tag-list")
# Create new ones
for tag in tag_list:
tag_object = get_object_or_404(Tag, pk=tag)
if not TagRelationship.objects.filter(tag=tag_object, case_study=case_study).exists():
TagRelationship.objects.create(tag=tag_object, case_study=case_study)
relevant_tag_ids = TagRelationship.objects.filter(case_study=case_study).values_list("tag",flat=True)
relevant_tags = []
for relevant_tag in relevant_tag_ids:
relevant_tags.append(relevant_tag)
if request.POST["submission_type"] == "save":
# Checking for the type on submission, if years, store the value as months
if request.POST['age_type'] == 'Y' and request.POST['age'] != '':
request.POST['age'] = int(request.POST['age']) * 12
if case_study_form.is_valid():
case_study_form.save()
# When page is re rendered, the value from the database is taken, so if years, render the correct value
if request.POST['age_type'] == 'Y' and request.POST['age'] != '':
request.POST['age'] = int(request.POST['age']) // 12
case_study_form = CaseStudyForm(request.POST, instance=case_study)
messages.success(request, 'Case Study saved!')
return render(request, "create_new_case.html",
{
"case_study_form": case_study_form,
"relevant_tags": relevant_tags,
"all_tags": all_tags,
"medical_histories": medical_histories,
"medications": medications,
"others": others,
"case_study": case_study,
})
elif request.POST["submission_type"] == "submit":
if request.POST['age_type'] == 'Y':
age_raw = request.POST['age']
if age_raw:
request.POST['age'] = int(age_raw) * 12
else:
request.POST['age'] = None
if case_study_form.is_valid():
case_study_form = case_study_form.save(commit=False)
case_study_form.case_state = CaseStudy.STATE_REVIEW
case_study_form.save()
messages.success(request, 'Case submitted for review. '
'An admin will review your case before it is made public.')
return HttpResponseRedirect(reverse('default'))
else:
if request.POST['age_type'] == 'Y':
age_raw = request.POST['age']
if age_raw:
request.POST['age'] = int(age_raw) // 12
else:
request.POST['age'] = None
case_study_form = CaseStudyForm(request.POST, instance=case_study)
return render(request, "create_new_case.html",
{
"case_study_form": case_study_form,
"relevant_tags": relevant_tags,
"all_tags": all_tags,
"medical_histories": medical_histories,
"medications": medications,
"others": others,
"case_study":case_study,
})
else:
return render(request, "create_new_case.html",
{
"case_study_form": case_study_form,
"relevant_tags": relevant_tags,
"all_tags": all_tags,
"medical_histories": medical_histories,
"medications": medications,
"others": others,
"case_study":case_study,
})
else:
case_study_form = CaseStudyForm(instance=case_study)
return render(request, "create_new_case.html",
{
"case_study_form": case_study_form,
"relevant_tags": relevant_tags,
"all_tags": all_tags,
"medical_histories": medical_histories,
"medications": medications,
"others": others,
"case_study":case_study,
})
``` |
{
"source": "320873791/yaohaoqi",
"score": 3
} |
#### File: 320873791/yaohaoqi/data.py
```python
from openpyxl import Workbook
from openpyxl import load_workbook
import random as rd
import sys
from wx import MessageBox
__all__ = ["Data"]
class Data(object):
# 学生名单用.xlsx格式存的
def __init__(self):
self.openFileFlg = False
# self.openFile("./学生名单.xlsx")
def openFile(self, dirname_):
try:
self.dirname = dirname_
self.wb = load_workbook(self.dirname)
self.ws = self.wb.active
except Exception:
MessageBox("打开文件失败")
else:
self.openFileFlg = True
try:
self.nameTuple = self.getNamesByRow_1()
# print(self.nameTuple)
if self.nameTuple == 0:
raise Exception("")
except Exception:
MessageBox("\"姓名\"一栏格式不正确")
else:
print(self.getNamesByRow_1())
self.stuNum = len(self.nameTuple)
self.vstList = [0 for i in range(0, self.stuNum + 1)]
self.vstNum = 0
self.shuffledListGen = self.createGen()
def getNamesByRow_1(self):
it = iter(self.ws["1"])
nameCell = self.ws["A1"]
cnt = 0
for cell in it:
if (cell.value in ("名字", "姓名")):
cnt += 1
if cnt == 0:
nameCell = cell
if cnt == 0:
MessageBox("文件中没有姓名一栏,请检查文件")
return 0
elif cnt > 1:
MessageBox("您是想整爷是吧?给爷爪巴")
return 0
else:
col = self.ws[chr(nameCell.column + 65 - 1)]
length = len(col)
return col[1:length + 1]
# 正常模式
def getRandName_Normal(self):
num = rd.randint(1, self.stuNum)
nameString = self.nameTuple[num - 1].value
return nameString
# 无重复模式
def createGen(self):
shuffledList = list(self.nameTuple).copy()
rd.shuffle(shuffledList)
for i in shuffledList:
yield i
def getRandName_NoRepetition(self):
try:
nameString = next(self.shuffledListGen).value
except StopIteration:
self.shuffledListGen = self.createGen()
nameString = next(self.shuffledListGen).value
return nameString
``` |
{
"source": "3210jr/tictactoe",
"score": 3
} |
#### File: 3210jr/tictactoe/model.py
```python
import pickle
import random
with open('network.pickle', 'rb') as handle:
model = pickle.load(handle)
def flatten_board(board):
""" Turns the 3x3 board into a 1x9 board """
return [cell for row in board for cell in row]
def pre_process(board, turn="x"):
""" Takes the board with x and o and turns in into vectors with numbers. 1 is AI, -1 is human, and 0 is empty """
result = []
opposite = "o" if turn == "x" else "x"
result = [1 if x == turn else x for x in board]
result = [-1 if x == opposite else x for x in result]
return result
def next_play(board):
board = flatten_board(board)
next_move = None
tries = 0
while next_move == None and tries < 30:
prediction = model.predict([pre_process(board, "o")])
print("before: ", board[prediction[0]])
if board[prediction[0]] == 0:
next_move = prediction[0]
tries = tries + 1
if next_move == None:
next_move = random.choice([i for i, x in enumerate(board) if x == 0])
# print("play: ", prediction[0], board[prediction[0]])
return next_move
``` |
{
"source": "3215/Scrapy_jpxgmn_spider",
"score": 3
} |
#### File: jpxgmn_spider/spiders/jpxgmn.py
```python
import scrapy
from urllib import parse
from ..items import JpxgmnSpiderItem
class JpxgmnSpider(scrapy.Spider):
name = 'jpxgmn'
allowed_domains = ['www.jpxgmn.net']
start_urls = ['http://www.jpxgmn.net/']
resource_url = 'https://p.jpxgmn.net/'
def parse(self, response):
# 通过主站获取各个系列的 url
menu_item = response.xpath("//ul[@class='sub-menu']//*[@class='menu-item']/a/@href").extract()[:1]
for item in menu_item:
item_url = parse.urljoin(response.url, item)
yield scrapy.Request(url=item_url, callback=self.parse_1) # 传给 parse_1 回调函数进行进一步处理
def parse_1(self, response):
# 通过系列页面获取各个图组的 url
related_box = response.xpath("//*[@class='related_box']/a/@href").extract()[:3] # [:5]调节爬取页面数量
for box in related_box:
box_url = parse.urljoin(response.url, box)
yield scrapy.Request(url=box_url, callback=self.find_img_url) # 传给 find_img_url 回调函数进行解析
# next_page = response.xpath("//a[contains(text(), '下一页')]/@href").extract_first('')
# yield scrapy.Request(url=parse.urljoin(response.url, next_page), callback=self.parse_1)
# 通过该系列页面获得下一页的 url ,并传给回调函数进行以上处理
def find_img_url(self, response):
image_info = JpxgmnSpiderItem() # 实例化 item 对象
image_info["image_paths"] = response.xpath("//*[@class='article-title']/text()").extract_first('')
img_urls = response.xpath("//img[@onload='size(this)']/@src").extract()
image_urls = [] # 待下载的图片url列表(必须为列表)
for img_url in img_urls:
img_url = '/U' + img_url[2:]
img_url = parse.urljoin(self.resource_url, img_url)
image_urls.append(img_url)
image_info["image_urls"] = image_urls
yield image_info # 将赋值好了的 item 传出去
next_url = response.xpath("//a[contains(text(), '下一页')]/@href").extract_first('')
yield scrapy.Request(url=parse.urljoin(response.url, next_url), callback=self.find_img_url)
# 通过该图片页面获得下一页的 url ,并传给回调函数进行以上处理
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.