metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JoeyHendricks/QuickPotato",
"score": 2
} |
#### File: QuickPotato/configuration/management.py
```python
from QuickPotato.utilities.defaults import default_quick_potato_configuration
from os.path import isfile, dirname, realpath
import yaml
import sys
class Configuration(object):
FILE_NAME = "options.yaml"
PATH = dirname(realpath(__file__)) + "\\" if "\\" in dirname(realpath(__file__)) else \
dirname(realpath(__file__)) + "/"
def __init__(self):
if isfile(self.PATH + self.FILE_NAME) is False:
self.dump_configuration_to_yaml_file(default_quick_potato_configuration)
self.contents = yaml.safe_load(open(self.PATH + self.FILE_NAME))
def dump_configuration_to_yaml_file(self, contents):
with open(self.PATH + self.FILE_NAME, 'w') as file:
yaml.dump(contents, file)
@property
def enable_intrusive_profiling(self):
return self.contents["enable_intrusive_profiling"]
@enable_intrusive_profiling.setter
def enable_intrusive_profiling(self, value):
self.contents["enable_intrusive_profiling"] = value
self.dump_configuration_to_yaml_file(self.contents)
@property
def enable_policy_to_filter_out_invalid_test_ids(self):
return self.contents["enable_the_selection_of_untested_or_failed_test_ids"]
@enable_policy_to_filter_out_invalid_test_ids.setter
def enable_policy_to_filter_out_invalid_test_ids(self, value):
self.contents["enable_the_selection_of_untested_or_failed_test_ids"] = value
self.dump_configuration_to_yaml_file(self.contents)
@property
def connection_url(self):
"""Specify which database vendor you want to use.
For SQLite: "sqlite:///C:\\temp\\"
For MySQL: "mysql+pymysql://user:password@localhost"
"""
return self.contents["connection_url"]
@connection_url.setter
def connection_url(self, value):
self.contents["connection_url"] = value
self.dump_configuration_to_yaml_file(self.contents)
@property
def enable_database_echo(self):
return self.contents["enable_database_echo"]
@enable_database_echo.setter
def enable_database_echo(self, value):
self.contents["enable_database_echo"] = value
self.dump_configuration_to_yaml_file(self.contents)
@property
def enable_asynchronous_payload_delivery(self):
return self.contents["enable_asynchronous_payload_delivery"]
@enable_asynchronous_payload_delivery.setter
def enable_asynchronous_payload_delivery(self, value):
if sys.version_info[0:3] > (3, 8, 2) and value is True:
self.contents["enable_asynchronous_payload_delivery"] = True
self.dump_configuration_to_yaml_file(self.contents)
else:
self.contents["enable_asynchronous_payload_delivery"] = False
self.dump_configuration_to_yaml_file(self.contents)
@property
def enable_auto_clean_up_old_test_results(self):
return self.contents["enable_auto_clean_up_old_test_results"]
@enable_auto_clean_up_old_test_results.setter
def enable_auto_clean_up_old_test_results(self, value):
self.contents["enable_auto_clean_up_old_test_results"] = value
self.dump_configuration_to_yaml_file(self.contents)
@property
def max_number_saved_test_results(self):
return self.contents["maximum_number_saved_test_results"]
@max_number_saved_test_results.setter
def max_number_saved_test_results(self, value):
self.contents["maximum_number_saved_test_results"] = value
self.dump_configuration_to_yaml_file(self.contents)
options = Configuration()
```
#### File: QuickPotato/harness/results.py
```python
from QuickPotato.database.queries import Crud
class BoundariesTestEvidence(Crud):
def __init__(self):
super(BoundariesTestEvidence, self).__init__()
self.test_id = None
self.test_case_name = None
self.database_name = None
self.epoch_timestamp = None
self.human_timestamp = None
self.verification_name = None
self.status = None
self.value = None
self.boundary = None
def save(self):
"""
Will insert the test results into the database.
Returns
-------
Will return True on success
"""
payload = {
"test_id": self.test_id,
"test_case_name": self.test_case_name,
"epoch_timestamp": self.epoch_timestamp,
"human_timestamp": self.human_timestamp,
"verification_name": self.verification_name,
"status": self.status,
"value": self.value,
"boundary": self.boundary
}
return self.insert_boundaries_test_evidence(
database_name=self.database_name,
payload=payload
)
class RegressionTestEvidence(Crud):
def __init__(self):
super(RegressionTestEvidence, self).__init__()
self.test_id = None
self.test_case_name = None
self.database_name = None
self.epoch_timestamp = None
self.human_timestamp = None
self.verification_name = None
self.status = None
self.value = None
self.critical_value = None
def save_test_evidence(self):
"""
Will insert the test results into the database.
Returns
-------
Will return True on success
"""
payload = {
"test_id": self.test_id,
"test_case_name": self.test_case_name,
"epoch_timestamp": self.epoch_timestamp,
"human_timestamp": self.human_timestamp,
"verification_name": self.verification_name,
"status": self.status,
"value": self.value,
"critical_value": self.critical_value
}
return self.insert_regression_test_evidence(
database_name=self.database_name,
payload=payload
)
class TestReport(Crud):
def __init__(self):
super(TestReport, self).__init__()
self.test_id = None
self.test_case_name = None
self.database_name = None
self.epoch_timestamp = None
self.human_timestamp = None
self.status = None
self.boundaries_breached = None
self.regression_found = None
def save(self):
"""
Will insert the test results into the database.
Returns
-------
Will return True on success
"""
payload = {
"test_id": self.test_id,
"test_case_name": self.test_case_name,
"epoch_timestamp": self.epoch_timestamp,
"human_timestamp": self.human_timestamp,
"status": self.status,
"boundaries_breached": self.boundaries_breached,
"regression_found": self.regression_found
}
if self.check_if_test_id_exists_in_test_report(self.database_name, self.test_id):
# Update existing test results
return self.update_results_in_test_report(
database_name=self.database_name,
test_id=self.test_id,
payload=payload
)
else:
# Insert new test results
return self.insert_results_into_test_report(
database_name=self.database_name,
payload=payload
)
```
#### File: QuickPotato/utilities/decorators.py
```python
from QuickPotato.harness.results import BoundariesTestEvidence
from functools import wraps
from datetime import datetime
def save_boundary_evidence(fnc):
"""
Parameters
----------
fnc
Returns
-------
"""
@wraps(fnc)
def encapsulated_function(*args, **kwargs):
"""
Parameters
----------
args
kwargs
Returns
-------
"""
evidence = BoundariesTestEvidence()
evidence.test_id = kwargs["test_id"]
evidence.database_name = kwargs["database_name"]
evidence.test_case_name = kwargs["test_case_name"]
evidence.epoch_timestamp = datetime.now().timestamp()
evidence.human_timestamp = datetime.now()
evidence.verification_name = kwargs["validation_name"]
evidence.value = float(kwargs["value"])
evidence.boundary = float(kwargs["boundary"])
# Scrub unused meta data
del kwargs["test_id"]
del kwargs["test_case_name"]
del kwargs["validation_name"]
del kwargs["database_name"]
evidence.status = fnc(*args, **kwargs)
evidence.save()
return evidence.status
return encapsulated_function
``` |
{
"source": "joeyhome/FPS-kit",
"score": 2
} |
#### File: plugins/developerconsole/developerconsole.py
```python
from pandac.PandaModules import TextNode
from pandac.PandaModules import TextProperties
from pandac.PandaModules import TextPropertiesManager
from direct.showbase.DirectObject import DirectObject
from direct.gui.DirectGui import DirectFrame
from direct.gui.DirectGui import DirectEntry
from direct.gui.OnscreenText import OnscreenText
import sys, traceback
import __main__
from code import InteractiveInterpreter
import panda3d
TEXT_MARGIN = (0.03, -0.06)
class PseudoFile:
def __init__(self, write):
self.write = write
def readline(self): pass
def writelines(self, l): map(self.append, l)
def flush(self): pass
def isatty(self): return 1
class DeveloperConsole(InteractiveInterpreter, DirectObject):
"""The name says it all."""
def __init__(self,manager,xml):
sys.stdout = PseudoFile(self.writeOut)
sys.stderr = PseudoFile(self.writeErr)
tpErr = TextProperties()
tpErr.setTextColor(1, 0.5, 0.5, 1)
TextPropertiesManager.getGlobalPtr().setProperties("err", tpErr)
self.manager = manager
font = loader.loadFont("cmss12")
self.frame = DirectFrame(parent = base.a2dTopCenter,
text_align = TextNode.ALeft,
text_pos = (-base.getAspectRatio() + TEXT_MARGIN[0], TEXT_MARGIN[1]),
text_scale = 0.05,
text_fg = (1, 1, 1, 1),
frameSize = (-2.0, 2.0, -0.5, 0.0),
frameColor = (0, 0, 0, 0.5),
text = '',
text_font = font)
self.entry = DirectEntry(parent = base.a2dTopLeft,
command = self.command,
scale = 0.05,
width = 1000.0,
pos = (-0.02, 0, -0.48),
relief = None,
text_pos = (1.5, 0, 0),
text_fg = (1, 1, 0.5, 1),
rolloverSound = None,
clickSound = None,
text_font = font)
self.otext = OnscreenText(parent = self.entry,
scale = 1,
align = TextNode.ALeft,
pos = (1, 0, 0),
fg = (1, 1, 0.5, 1),
text = ':',
font = font)
self.lines = [''] * 9
self.commands = [] # All previously sent commands
self.cscroll = None # Index of currently navigated command, None if current
self.command = '' # Currently entered command
self.block = '' # Temporarily stores a block of commands
self.hide()
self.initialized = False
def prevCommand(self):
if self.hidden: return
if len(self.commands) == 0: return
if self.cscroll == None:
self.cscroll = len(self.commands)
self.command = self.entry.get()
elif self.cscroll <= 0:
return
else:
self.commands[self.cscroll] = self.entry.get()
self.cscroll -= 1
self.entry.set(self.commands[self.cscroll])
self.entry.setCursorPosition(len(self.commands[self.cscroll]))
def nextCommand(self):
if self.hidden: return
if len(self.commands) == 0: return
if self.cscroll == None: return
self.commands[self.cscroll] = self.entry.get()
self.cscroll += 1
if self.cscroll >= len(self.commands):
self.cscroll = None
self.entry.set(self.command)
self.entry.setCursorPosition(len(self.command))
else:
self.entry.set(self.commands[self.cscroll])
self.entry.setCursorPosition(len(self.commands[self.cscroll]))
def writeOut(self, line, copy = True):
if copy: sys.__stdout__.write(line)
lines = line.split('\n')
firstline = lines.pop(0)
self.lines[-1] += firstline
self.lines += lines
self.frame['text'] = '\n'.join(self.lines[-9:])
def writeErr(self, line, copy = True):
if copy: sys.__stderr__.write(line)
line = '\1err\1%s\2' % line
lines = line.split('\n')
firstline = lines.pop(0)
self.lines[-1] += firstline
self.lines += lines
self.frame['text'] = '\n'.join(self.lines[-9:])
def command(self, text):
if not self.hidden:
self.cscroll = None
self.command = ''
self.entry.set('')
self.entry['focus'] = True
self.writeOut(self.otext['text'] + ' ' + text + '\n', False)
if text != '' and (len(self.commands) == 0 or self.commands[-1] != text):
self.commands.append(text)
# Insert plugins into the local namespace
locals = __main__.__dict__
locals['manager'] = self.manager
for plugin in self.manager.named.keys():
locals[plugin] = self.manager.named[plugin]
locals['panda3d'] = panda3d
# Run it and print the output.
if not self.initialized:
InteractiveInterpreter.__init__(self, locals = locals)
self.initialized = True
try:
if self.runsource(self.block + '\n' + text) and text != '':
self.otext['text'] = '.'
self.block += '\n' + text
else:
self.otext['text'] = ':'
self.block = ''
except Exception: # Not just "except", it will also catch SystemExit
# Whoops! Print out a traceback.
self.writeErr(traceback.format_exc())
def toggle(self):
if self.hidden:
self.show()
else:
self.hide()
def show(self):
self.accept('arrow_up', self.prevCommand)
self.accept('arrow_up-repeat', self.prevCommand)
self.accept('arrow_down', self.nextCommand)
self.accept('arrow_down-repeat', self.nextCommand)
self.hidden = False
self.entry['focus'] = True
self.frame.show()
self.entry.show()
self.otext.show()
def hide(self):
self.ignoreAll()
self.hidden = True
self.entry['focus'] = False
self.frame.hide()
self.entry.hide()
self.otext.hide()
def destroy(self):
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
self.ignoreAll()
self.frame.destroy()
self.entry.destroy()
self.otext.destroy()
```
#### File: plugins/level/level.py
```python
import posixpath
import time
from pandac.PandaModules import *
import direct.directbase.DirectStart
from bin.shared import odeSpaceHier
class Level:
"""This loads a level - that is it loads a collection fo egg files and sticks
them at the origin. These files will typically be very large. 4 files,
all optional, are typically given - the rendered file, the collision file,
the detail file (Visible instances of high res geometry.) and the entity file.
(Lots of empties used by the programmer.)"""
def __init__(self,manager,xml):
self.reload(manager,xml)
def reload(self,manager,xml):
# Load from the xml the details needed to do the actual loading...
# Get the path to load levels from...
basePath = manager.get('paths').getConfig().find('levels').get('path')
# Get the details for the renderable geometry...
rendElem = xml.find('render')
if rendElem!=None:
self.rendPath = posixpath.join(basePath,rendElem.get('filename'))
self.rendAmb = xml.find('ambient')!=None
else:
self.rendPath = None
# Get the details for the collision geometry...
colElem = xml.find('collide')
if colElem!=None:
self.colPath = posixpath.join(basePath,colElem.get('filename'))
self.colSurface = colElem.get('surface','default')
else:
self.colPath = None
# Get the details for the instancing information - the things...
thingElem = xml.find('things')
if thingElem!=None:
self.thingPath = posixpath.join(basePath,thingElem.get('filename'))
else:
self.thingPath = None
# We need access to the physics manager to do physics...
physics = xml.find('physics')
if physics!=None:
odeName = physics.get('plugin','ode')
else:
odeName = 'ode'
self.ode = manager.get(odeName)
def postInit(self):
for i in self.postReload():
yield i
def postReload(self):
# The renderable geometry...
self.rend = None
if self.rendPath!=None:
def rendCallback(model):
self.rend = model
loader.loadModel(self.rendPath, callback=rendCallback)
while self.rend==None:
time.sleep(0.05)
yield
# Let's hide it from the shadowcam for now.
self.rend.hide(BitMask32.bit(3))
if self.rendAmb:
self.ambLight = AmbientLight('Ambient Light')
self.ambLight.setColor(VBase4(1.0,1.0,1.0,1.0))
self.ambLightNode = self.rend.attachNewNode(self.ambLight)
self.rend.setLight(self.ambLightNode)
yield
# The collision geometry...
self.colEgg = None
if self.colPath!=None:
def colCallback(model):
self.colEgg = model
loader.loadModel(self.colPath, callback=colCallback)
while self.colEgg==None:
time.sleep(0.05)
yield
surfaceType = self.ode.getSurface(self.colSurface)
for r in odeSpaceHier.eggToOde(self.colEgg,surfaceType):
yield
self.col = r
if (self.col==None):
print 'WARNING: Collision geometry contained nothing to collide against.'
else:
self.ode.getSpace().add(self.col)
# The thing egg...
self.things = None
if self.thingPath!=None:
def thingCallback(model):
self.things = model
loader.loadModel(self.thingPath, callback=thingCallback)
while self.things==None:
time.sleep(0.05)
yield
def start(self):
if self.rend: self.rend.reparentTo(render)
def stop(self):
if self.rend: self.rend.detachNode()
def getThings(self):
return self.things
def getByIsA(self,name):
"""Given a name this returns a list of all objects in the things structure
that have the tag IsA with the given name as the data. Will return an
empty list if none available."""
if self.things == None: return []
col = self.things.findAllMatches('**/=IsA='+name)
ret = []
for i in xrange(col.size()):
ret.append(col[i])
return ret
def toggleVisible(self):
if self.rend.isHidden():
self.rend.show()
else:
self.rend.hide()
```
#### File: plugins/loading/loading.py
```python
from panda3d.core import *
from direct.actor import Actor
class Loading:
"""Does a loading screen - renders some stuff whilst a transition is happenning."""
def __init__(self,manager,xml):
self.node = Actor.Actor('data/misc/loading')
self.node.reparentTo(base.render)
self.node.setShaderAuto()
self.node.hide()
self.light = PointLight('plight')
self.light.setColor(VBase4(1.0, 1.0, 1.0, 1.0))
self.lightNode = self.node.attachNewNode(self.light)
self.lightNode.setPos(0.0, 0.0, 1.5)
self.node.setLight(self.lightNode)
self.task = None
#self.stop()
def reload(self,manager,xml):
pass
def start(self):
self.node.hide()
self.node.stop()
if self.task!=None:
taskMgr.remove(self.task)
self.task = None
def stop(self):
self.node.show()
self.node.loop('slide')
self.task = taskMgr.add(self.camPos, 'LoadingCamera')
def camPos(self,task):
base.camera.setPos(0.0,0.0,20.0)
base.camera.lookAt(0.0,0.0,0.0)
return task.cont
``` |
{
"source": "joeyhsiung/Natural-Language-to-Graph-Query-KBQA-",
"score": 3
} |
#### File: Natural-Language-to-Graph-Query-KBQA-/multiHop_QA/T5.py
```python
import os
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
import os
# Importing the T5 modules from huggingface/transformers
from transformers import T5Tokenizer, T5ForConditionalGeneration
class YourDataSetClass(Dataset):
"""
Creating a custom dataset for reading the dataset and
loading it into the dataloader to pass it to the
neural network for finetuning the model
"""
def __init__(
self, dataframe, tokenizer, source_len, target_len, source_text, target_text
):
"""
Initializes a Dataset class
Args:
dataframe (pandas.DataFrame): Input dataframe
tokenizer (transformers.tokenizer): Transformers tokenizer
source_len (int): Max length of source text
target_len (int): Max length of target text
source_text (str): column name of source text
target_text (str): column name of target text
"""
self.tokenizer = tokenizer
self.data = dataframe
self.source_len = source_len
self.summ_len = target_len
self.target_text = self.data[target_text]
self.source_text = self.data[source_text]
def __len__(self):
"""returns the length of dataframe"""
return len(self.target_text)
def __getitem__(self, index):
"""return the input ids, attention masks and target ids"""
source_text = str(self.source_text[index])
target_text = str(self.target_text[index])
# cleaning data so as to ensure data is in string type
source_text = " ".join(source_text.split())
target_text = " ".join(target_text.split())
source = self.tokenizer.batch_encode_plus(
[source_text],
max_length=self.source_len,
pad_to_max_length=True,
truncation=True,
padding="max_length",
return_tensors="pt",
)
target = self.tokenizer.batch_encode_plus(
[target_text],
max_length=self.summ_len,
pad_to_max_length=True,
truncation=True,
padding="max_length",
return_tensors="pt",
)
source_ids = source["input_ids"].squeeze()
source_mask = source["attention_mask"].squeeze()
target_ids = target["input_ids"].squeeze()
target_mask = target["attention_mask"].squeeze()
return {
"source_ids": source_ids.to(dtype=torch.long),
"source_mask": source_mask.to(dtype=torch.long),
"target_ids": target_ids.to(dtype=torch.long),
"target_ids_y": target_ids.to(dtype=torch.long),
}
def train(epoch, tokenizer, model, device, loader, optimizer):
"""
Function to be called for training with the parameters passed from main function
"""
model.train()
for _, data in enumerate(loader, 0):
y = data["target_ids"].to(device, dtype=torch.long)
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone().detach()
lm_labels[y[:, 1:] == tokenizer.pad_token_id] = -100
ids = data["source_ids"].to(device, dtype=torch.long)
mask = data["source_mask"].to(device, dtype=torch.long)
outputs = model(
input_ids=ids,
attention_mask=mask,
decoder_input_ids=y_ids,
labels=lm_labels,
)
loss = outputs[0]
if _ % 10 == 0:
training_logger.add_row(str(epoch), str(_), str(loss))
console.print(training_logger)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def validate(epoch, tokenizer, model, device, loader):
"""
Function to evaluate model for predictions
"""
model.eval()
predictions = []
actuals = []
with torch.no_grad():
for _, data in enumerate(loader, 0):
y = data['target_ids'].to(device, dtype = torch.long)
ids = data['source_ids'].to(device, dtype = torch.long)
mask = data['source_mask'].to(device, dtype = torch.long)
generated_ids = model.generate(
input_ids = ids,
attention_mask = mask,
max_length=150,
num_beams=2,
repetition_penalty=2.5,
length_penalty=1.0,
early_stopping=True
)
preds = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids]
target = [tokenizer.decode(t, skip_special_tokens=True, clean_up_tokenization_spaces=True)for t in y]
if _%10==0:
console.print(f'Completed {_}')
predictions.extend(preds)
actuals.extend(target)
return predictions, actuals
def T5Trainer(
dataframe, source_text, target_text, model_params, output_dir="./outputs/"
):
"""
T5 trainer
"""
# Set random seeds and deterministic pytorch for reproducibility
torch.manual_seed(model_params["SEED"]) # pytorch random seed
np.random.seed(model_params["SEED"]) # numpy random seed
torch.backends.cudnn.deterministic = True
# logging
console.log(f"""[Model]: Loading {model_params["MODEL"]}...\n""")
# tokenzier for encoding the text
tokenizer = T5Tokenizer.from_pretrained(model_params["MODEL"])
# Defining the model. We are using t5-base model and added a Language model layer on top for generation of Summary.
# Further this model is sent to device (GPU/TPU) for using the hardware.
model = T5ForConditionalGeneration.from_pretrained(model_params["MODEL"])
model = model.to(device)
# logging
console.log(f"[Data]: Reading data...\n")
# Importing the raw dataset
dataframe = dataframe[[source_text, target_text]]
display_df(dataframe.head(2))
# Creation of Dataset and Dataloader
# Defining the train size. So 80% of the data will be used for training and the rest for validation.
train_size = 0.8
train_dataset = dataframe.sample(frac=train_size, random_state=model_params["SEED"])
val_dataset = dataframe.drop(train_dataset.index).reset_index(drop=True)
train_dataset = train_dataset.reset_index(drop=True)
console.print(f"FULL Dataset: {dataframe.shape}")
console.print(f"TRAIN Dataset: {train_dataset.shape}")
console.print(f"TEST Dataset: {val_dataset.shape}\n")
# Creating the Training and Validation dataset for further creation of Dataloader
training_set = YourDataSetClass(
train_dataset,
tokenizer,
model_params["MAX_SOURCE_TEXT_LENGTH"],
model_params["MAX_TARGET_TEXT_LENGTH"],
source_text,
target_text,
)
val_set = YourDataSetClass(
val_dataset,
tokenizer,
model_params["MAX_SOURCE_TEXT_LENGTH"],
model_params["MAX_TARGET_TEXT_LENGTH"],
source_text,
target_text,
)
# Defining the parameters for creation of dataloaders
train_params = {
"batch_size": model_params["TRAIN_BATCH_SIZE"],
"shuffle": True,
"num_workers": 0,
}
val_params = {
"batch_size": model_params["VALID_BATCH_SIZE"],
"shuffle": False,
"num_workers": 0,
}
# Creation of Dataloaders for testing and validation. This will be used down for training and validation stage for the model.
training_loader = DataLoader(training_set, **train_params)
val_loader = DataLoader(val_set, **val_params)
# Defining the optimizer that will be used to tune the weights of the network in the training session.
optimizer = torch.optim.Adam(
params=model.parameters(), lr=model_params["LEARNING_RATE"]
)
# Training loop
console.log(f"[Initiating Fine Tuning]...\n")
for epoch in range(model_params["TRAIN_EPOCHS"]):
train(epoch, tokenizer, model, device, training_loader, optimizer)
console.log(f"[Saving Model]...\n")
# Saving the model after training
path = os.path.join(output_dir, "model_files")
model.save_pretrained(path)
tokenizer.save_pretrained(path)
# evaluating test dataset
console.log(f"[Initiating Validation]...\n")
for epoch in range(model_params["VAL_EPOCHS"]):
predictions, actuals = validate(epoch, tokenizer, model, device, val_loader)
final_df = pd.DataFrame({"Generated Text": predictions, "Actual Text": actuals})
final_df.to_csv(os.path.join(output_dir, "predictions.csv"))
console.save_text(os.path.join(output_dir, "logs.txt"))
console.log(f"[Validation Completed.]\n")
console.print(
f"""[Model] Model saved @ {os.path.join(output_dir, "model_files")}\n"""
)
console.print(
f"""[Validation] Generation on Validation data saved @ {os.path.join(output_dir,'predictions.csv')}\n"""
)
console.print(f"""[Logs] Logs saved @ {os.path.join(output_dir,'logs.txt')}\n""")
# let's define model parameters specific to T5
model_params = {
"MODEL": "t5-base", # model_type: t5-base/t5-large
"TRAIN_BATCH_SIZE": 8, # training batch size
"VALID_BATCH_SIZE": 8, # validation batch size
"TRAIN_EPOCHS": 3, # number of training epochs
"VAL_EPOCHS": 1, # number of validation epochs
"LEARNING_RATE": 1e-4, # learning rate
"MAX_SOURCE_TEXT_LENGTH": 512, # max length of source text
"MAX_TARGET_TEXT_LENGTH": 50, # max length of target text
"SEED": 42, # set seed for reproducibility
}
```
#### File: Natural-Language-to-Graph-Query-KBQA-/oneHop_QA/data2spacy.py
```python
import pandas as pd
import spacy
import re
from tqdm import tqdm
from spacy.tokens import DocBin
from multiHop_QA.configures import Config_path, Config_output_path
c = Config_path()
c_output = Config_output_path()
def data2jason(input_data, q_col='question', ent_col='entity',
label_name='SimpleQuestions'): # input data should be dataframe
data = []
for _, item in input_data.iterrows():
q = item[q_col]
pattern = item[ent_col]
s, e = 0, 0
try:
for match in re.finditer(pattern, q):
s = match.start()
e = match.end()
entity = (s, e, label_name)
temp = (item[q_col], {'entities': [entity]})
data.append(temp)
except:
pass
return data
def jason2spacy(data, output_name="dev.spacy"):
nlp = spacy.blank("en") # load a new spacy model
db = DocBin() # create a DocBin object
total = 0
for text, annot in tqdm(data): # data in previous format
doc = nlp.make_doc(text) # create doc object from text
ents = []
for start, end, label in annot["entities"]: # add character indexes
span = doc.char_span(start, end, label=label)
if span:
total += 1
ents.append(span)
doc.ents = ents # label the text with the ents
db.add(doc)
db.to_disk(c_output.ner_data_path + output_name) # save the docbin object
# import json
# with open('train_jason.json', 'w', encoding='utf-8') as f:
# json.dump(train_jason, f, ensure_ascii=False, indent=2)
#
# with open('test_jason.json', 'w', encoding='utf-8') as f:
# json.dump(test_jason, f, ensure_ascii=False, indent=2)
# read train data
train_combination = pd.read_csv(c.train_combination_path)
train_combination = train_combination[train_combination.entity != '*']
# read test data
test_combination = pd.read_csv(c.test_combination_path)
test_combination = test_combination[test_combination.entity != '*']
# read validation(dev) data
dev_wiki = pd.read_csv(c.wiki_dev_path)
dev_wiki = dev_wiki[dev_wiki.entity != '*']
# extract train_entity column to list
train_entities = train_combination['entity'].unique().tolist()
# extract test_entity column to list
test_entities = test_combination['entity'].unique().tolist()
# extract dev_entity column to list
dev_entities = dev_wiki['entity'].unique().tolist()
train_jason = data2jason(train_combination)
test_jason = data2jason(test_combination)
dev_jason = data2jason(dev_wiki)
jason2spacy(train_jason, output_name="train.spacy")
jason2spacy(test_jason, output_name="test.spacy")
jason2spacy(dev_jason, output_name="dev.spacy")
```
#### File: Natural-Language-to-Graph-Query-KBQA-/QAs_generator/build_neo4jKG.py
```python
import re
import spacy
from multiHop_QA.configures import Config_path
import pandas as pd
from tqdm import tqdm
from py2neo import Graph,Node
from neo4j import GraphDatabase
c = Config_path()
nlp = spacy.load("en_core_web_sm")
##################################
## Create Neo4j Knowledge Graph ##
##################################
# push triple csv data to neo4j graph database
# initialize local database
graph = Graph("http://localhost:7474",user='neo4j',password='<PASSWORD>')
# read triple files
wiki_triples = pd.read_csv(c.triple)
# clean text
num_cols = wiki_triples.shape[1]
for col in range(num_cols):
# tokens = nlp(list(wiki_triples.iloc[:,col]))
col_list = list(wiki_triples.iloc[:,col])
col_list_lemma = []
for i in col_list:
temp_doc = nlp(str(i))
token_lemma = str()
for token in temp_doc.sents:
token_lemma += token.lemma_
col_list_lemma.append(token_lemma)
temp_re = [re.sub("[^a-zA-Z\d]", " ", str(i).lower()) for i in col_list_lemma]
wiki_triples.iloc[:, col] = temp_re
# print(temp_re)
# replace space to underline
wiki_triples['relation'] = wiki_triples['relation'].str.replace(' ','_',regex=True)
wiki_triples['label'] = wiki_triples['label'].str.replace(' ','_',regex=True)
# create source nodes
temp = []
for i,row in tqdm(wiki_triples.iterrows()):
start_node = Node(row['label'],name=row['source'])
end_node = Node(row['label'],name=row['end'])
if row['source'] not in temp:
graph.create(start_node)
temp.append(row['source'])
# if row['end'] not in temp:
# graph.create(end_node)
# temp.append(row['end'])
# graph.create(Relationship(start_node, row['relation'], end_node))
# create relations with end nodes
uri = "neo4j://localhost:7687"
driver = GraphDatabase.driver(uri, auth=("neo4j", "123"))
def create(tx, label, rel, start, end):
tx.run("MATCH (a:"+label+") WHERE a.name = $start "
"CREATE (a)-[:"+rel+"]->(:"+label+" {name: $end})",
start=start, end=end)
with driver.session() as session:
for i,row in tqdm(wiki_triples.iterrows()):
session.write_transaction(create,row['label'],row['relation'],row['source'],row['end'])
print('pushing data to neo4j is done')
driver.close()
``` |
{
"source": "joeyism/fakipedia",
"score": 3
} |
#### File: fakipedia/lib/objects.py
```python
from app import db
class GeneratedPage(db.Model):
__tablename__ = 'generated_pages'
id = db.Column(db.Integer, primary_key=True)
url = db.Column(db.String(256), nullable=False)
title = db.Column(db.String(256), nullable=False)
body = db.Column(db.Text, nullable=False)
length = db.Column(db.Integer, nullable=False)
memory = db.Column(db.Integer, nullable=False)
def __init__(self, url, title, body, length, memory):
self.url = url
self.title = title
self.body = body
self.length = length
self.memory = memory
def save(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_page_by_url(cls, url):
return db.session.query(User).filter_by(url=url).first()
@classmethod
def get_page_by_query(cls, url, length, memory):
return db.session.query(GeneratedPage).filter_by(url=url, length=length, memory=memory).first()
@classmethod
def get_random_page(cls):
return db.session.query(GeneratedPage).first()
```
#### File: fakipedia/lib/wikitext_to_html.py
```python
from mediawiki_parser import preprocessor, raw, text, html
import py3compat
import lxml.html
allowed_tags = ['p', 'span', 'b', 'i']
allowed_autoclose_tags = ['br', 'hr']
allowed_parameters = ['class', 'style', 'name', 'id', 'scope']
interwiki = {'en': 'http://en.wikipedia.org/wiki/',
'fr': 'http://fr.wikipedia.org/wiki/'}
namespaces = {'Template': 10,
u'Catégorie': 14,
'Category': 14,
'File': 6,
'Image': 6}
parser = html.make_parser(allowed_tags, allowed_autoclose_tags, allowed_parameters, interwiki, namespaces)
preprocessor_parser = preprocessor.make_parser({})
siteSubElem = lxml.html.fromstring('<div class="siteSub">From Fakipedia, the fake Wikipedia</div><div class="contentSub"/>')
def preprocess(source):
source = source.replace("\n ", "\n") \
.replace(" \n", "\n") \
.replace("= ", "=") \
.replace(" =", "=") \
.replace("@ ", " ") \
.replace(" @", " ") \
.strip()
source_split = source.split("\n")
# fixing title
source_split[0] = source_split[0].replace("==", "=")
for i, sentence in enumerate(source_split):
if "=" not in sentence:
continue
name = "".join(sentence.split("="))
begin = sentence.split(name)[0]
sentence = begin + name + begin
source_split[i] = sentence
source = "\n".join(source_split)
return source
def process(source):
source = source.strip()
if not source.endswith("\n"):
source += "\n"
preprocessed = preprocessor_parser.parseTest(source).value
return py3compat.text_type(parser.parseTest(preprocessed).leaves())
def postprocess(source):
main_elem = lxml.html.fromstring(source)
header = main_elem.find(".//h1")
if header is not None:
main_elem.insert(main_elem.index(header) + 1, siteSubElem)
return lxml.html.tostring(main_elem).decode("utf8")
def run(source):
source = preprocess(source)
source = process(source)
source = postprocess(source)
return source
```
#### File: fakipedia/models/text_generator.py
```python
import torch
import numpy as np
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from tqdm import tqdm
from lib import objects
from lib import wikitext_to_html
from lib import constants as c
device = 'cpu'
if torch.cuda.is_available():
device = 'cuda'
tokenizer = GPT2Tokenizer.from_pretrained(c.GPT2_NAME)
model = GPT2LMHeadModel.from_pretrained(c.GPT2_NAME)
model = model.to(device)
EOD_ID = tokenizer.encode("<|endoftext|>")[0]
EQUAL_ID = tokenizer.encode(" = ")[0]
NEW_LINE_ID = 198
HEADER_ID = 796
test_article = """ = Toronto Raptors =
Toronto Raptors are the best team in the world
= = History = =
Founded in 1996, they had to endure <NAME> before winning the 2018-2019 NBA Championship
"""
def generate_text(input_str, text_len=c.MAX_TEXT_LENGTH, end_of_text_id=EOD_ID, top_random=5, test=False, memory=c.DEFAULT_MODEL_MEMORY):
if test:
return test_article
cur_ids = torch.tensor(tokenizer.encode(input_str)).unsqueeze(0).long().to(device)
model.eval()
with torch.no_grad():
for i in tqdm(range(text_len)):
outputs = model(cur_ids[:, -1*memory:], labels=cur_ids[:, -1*memory:])
loss, logits = outputs[:2]
softmax_logits = torch.softmax(logits[0,-1], dim=0)
next_token_id = choose_from_top(softmax_logits.to('cpu').numpy(), n=top_random)
if next_token_id == end_of_text_id:
break
elif next_token_id == NEW_LINE_ID and cur_ids[0][-1] == HEADER_ID and cur_ids[0][-2] != HEADER_ID:
break
elif next_token_id == NEW_LINE_ID and cur_ids[0][-1] == NEW_LINE_ID:
break
cur_ids = torch.cat([cur_ids, torch.ones((1,1)).long().to(device) * next_token_id], dim=1)
output_list = list(cur_ids.squeeze().to('cpu').numpy())
output_text = tokenizer.decode(output_list)
return output_text
def choose_from_top(probs, n=5):
ind = np.argpartition(probs, -n)[-n:]
top_prob = probs[ind]
top_prob = top_prob / np.sum(top_prob) # Normalize
if EQUAL_ID in ind and np.where(ind == EQUAL_ID)[0][0] == np.argmax(top_prob): # return =
return EQUAL_ID
choice = np.random.choice(n, 1, p = top_prob)
token_id = ind[choice][0]
return int(token_id)
def clean_starting_text(title):
title = " ".join([word.capitalize() for word in title.replace("_", " ").split()])
return title
def create_starting_text(title):
return f""" = {title} =
"""
def generate_page(title, text_len, memory, cutoff=True):
page = objects.GeneratedPage.get_page_by_query(title, text_len, memory)
if page is None:
cleaned_title = clean_starting_text(title)
starting_text = create_starting_text(cleaned_title)
source = generate_text(starting_text, test=c.ENV.lower()=='test', text_len=text_len, memory=memory)
source = wikitext_to_html.run(source)
if cutoff:
source = ". ".join(source.split(". ")[:-1] + [""])
page = objects.GeneratedPage(title, cleaned_title, source, text_len, memory)
page.save()
return page
``` |
{
"source": "joeyism/jsonl-to-conll",
"score": 3
} |
#### File: jsonl-to-conll/jsonl_to_conll/cli.py
```python
from jsonl_to_conll import convert, io
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input_filename", help="Input JSONL filename", type=str)
parser.add_argument("output_filename", help="Output CONLL filename", type=str)
args = parser.parse_args()
data = io.read_jsonl(args.input_filename)
data = convert.flatten_all(data)
io.json_to_text(data, args.output_filename)
if __name__ == "__main__":
main()
``` |
{
"source": "joeyism/py-custom-google-search",
"score": 3
} |
#### File: py-custom-google-search/custom_google_search/api.py
```python
from typing import Dict
import requests
BASE_URL = "https://www.googleapis.com/customsearch/v1"
def search(query, cx=None, key=None) -> Dict:
if cx is None:
raise Exception("CX Required. Please see https://joeyism.medium.com/custom-google-search-api-fbbafe4711eb on how to obtain the API Key")
if key is None:
raise Exception("API Key required. Please see https://joeyism.medium.com/custom-google-search-api-fbbafe4711eb on how to obtain the API Key")
req = requests.get(BASE_URL, params={"q": query, "key": key, "cx": cx})
return req.json()
``` |
{
"source": "joeyism/py-image-comparer",
"score": 3
} |
#### File: py-image-comparer/image_comparer/cli.py
```python
from pathlib import Path
import argparse
import cv2
from .compare import is_similar
def cli():
parser = argparse.ArgumentParser(description='Compares two images')
parser.add_argument('image1', metavar="Image1-Path", type=Path, help='first image in comparison')
parser.add_argument('image2', metavar="Image2-Path", type=Path, help='second image in comparison')
parser.add_argument('--threshold', type=float, default=0.5, help='threshold limit (default: 0.5)')
args = parser.parse_args()
main(args.image1, args.image2)
def main(image1_path: Path, image2_path: Path):
image1 = cv2.imread(image1_path.as_posix())
image2 = cv2.imread(image2_path.as_posix())
if is_similar(image1, image2):
print(f"{image1_path.name} and {image2_path.name} are similar")
else:
print(f"{image1_path.name} and {image2_path.name} are not similar")
if __name__ == "__main__":
cli()
```
#### File: py-image-comparer/tests/test_compare.py
```python
from unittest import TestCase
import cv2
from PIL import Image
import image_comparer
class TestCompare(TestCase):
def setUp(self):
self.image = Image.open("tests/images/kobe.jpg")
self.image2 = cv2.imread("tests/images/kobe2.jpg")
def test_compare_success(self):
assert not image_comparer.is_similar(self.image, self.image2)
assert image_comparer.is_similar(self.image, self.image)
``` |
{
"source": "joeyism/py-oauth2_facebook_login",
"score": 3
} |
#### File: py-oauth2_facebook_login/oauth2_facebook_login/functions.py
```python
from selenium import webdriver
from requests_oauthlib import OAuth2Session
from requests_oauthlib.compliance_fixes import facebook_compliance_fix
def get_access_token(email, password, client_id = "", client_secret = "", scope = [], driver = None):
"""
Logins to Facebook and get token for OAuth2
Parameters
----------
email: string
login email for OAuth2 account
password: string
login password for OAuth2 account
client_id: string
App client id, taken from a specific app from https://developers.facebook.com/apps/
client_secret: string
App client secret, taken from a specific app from https://developers.facebook.com/apps/
scope: list
A list of OAuth2 scope desired for this app. For example,
scope = [
"pages_show_list",
"manage_pages",
"pages_manage_instant_articles",
"pages_manage_cta",
"ads_management",
"business_management"
]
driver: selenium.webdriver (optional)
Selenium webdriver used for login. Chrome is default, but its location has to be specified in PATH
Output
------
requests_oauthlib.OAuth2Session
"""
authorization_base_url = 'https://www.facebook.com/dialog/oauth'
token_url = 'https://graph.facebook.com/oauth/access_token'
redirect_uri = 'https://localhost/' # Should match Site URL
if driver == None:
driver = webdriver.Chrome()
facebook = OAuth2Session(client_id, redirect_uri=redirect_uri, scope=scope)
facebook = facebook_compliance_fix(facebook)
authorization_url, state = facebook.authorization_url(authorization_base_url)
driver.get(authorization_url)
driver.find_element_by_id("email").send_keys(email)
driver.find_element_by_id("pass").send_keys(password)
driver.find_element_by_id("loginbutton").click()
redirect_response = driver.current_url
facebook.fetch_token(token_url, client_secret=client_secret,
authorization_response=redirect_response)
driver.close()
return facebook
``` |
{
"source": "joeyism/s3-as-a-datastore",
"score": 2
} |
#### File: s3-as-a-datastore/s3aads/copy.py
```python
import os
class Copy(object):
def __init__(self, table, key):
self.source_table = table
self.source_key = key
def to(self, dest_table, dest_key, **kwargs) -> None:
copy_source = {
'Bucket': self.source_table.database.name,
'Key': os.path.join(self.source_table.name, self.source_key)
}
self.source_table.database.bucket.meta.client.copy_object(
CopySource=copy_source,
Bucket=dest_table.database.bucket.name,
Key=os.path.join(dest_table.name, dest_key),
**kwargs
)
```
#### File: s3-as-a-datastore/s3aads/database.py
```python
from s3aads import Table
from s3aads.resources import s3_resource, s3_client
class Database(object):
def __init__(self, name):
self.name = name
self.bucket = s3_resource.Bucket(self.name)
@property
def tables(self) -> list:
result = s3_client.list_objects(Bucket=self.name, Delimiter="/")
prefixes = result.get('CommonPrefixes')
if prefixes is None:
return []
return [prefix['Prefix'][:-1] for prefix in prefixes]
def create(self):
if self.name in DataBase.list_databases():
return
return s3_client.create_bucket(Bucket=self.name)
def get_table(self, table_name) -> Table:
if table_name in self.tables:
return Table(table_name, database=self)
def drop_table(self, table_name):
self.bucket.objects.filter(Prefix=f"{table_name}/").delete()
@classmethod
def list_databases(cls) -> list:
response = s3_client.list_buckets()
return [bucket['Name'] for bucket in response['Buckets']]
``` |
{
"source": "JoeyJiao/afl-utils",
"score": 2
} |
#### File: afl-utils/tests/test_afl_collect.py
```python
from afl_utils import afl_collect
from afl_utils.SampleIndex import SampleIndex
import os
import shutil
import subprocess
import unittest
class AflCollectTestCase(unittest.TestCase):
def setUp(self):
# Use to set up test environment prior to test case
# invocation
os.makedirs('testdata/sync/fuzz000/crashes', exist_ok=True)
os.makedirs('testdata/sync/fuzz001/crashes', exist_ok=True)
os.makedirs('testdata/output', exist_ok=True)
self.init_queue_dir('testdata/sync/fuzz000/queue')
self.init_queue_dir('testdata/sync/fuzz001/queue')
self.clean_remove('testdata/read_only')
self.clean_remove('testdata/dbfile.db')
self.clean_remove('testdata/gdbscript')
subprocess.call(['make', '-C', 'testdata/crash_process'])
if not os.path.exists('testdata/read_only_file'):
shutil.copy('testdata/collection/dummy_sample0', 'testdata/read_only_file')
os.chmod('testdata/read_only_file', 0o0444)
def tearDown(self):
# Use for clean up after tests have run
self.clean_remove_dir('testdata/sync/fuzz000/crashes')
self.clean_remove_dir('testdata/sync/fuzz001/crashes')
self.clean_remove_dir('testdata/sync/fuzz000/queue')
self.clean_remove_dir('testdata/sync/fuzz001/queue')
self.clean_remove_dir('testdata/output')
self.clean_remove_dir('testdata/test_collection_dir')
self.clean_remove('testdata/read_only')
self.clean_remove('testdata/dbfile.db')
self.clean_remove('testdata/gdbscript')
self.clean_remove_dir('testdata/crash_process/bin')
os.chmod('testdata/read_only_file', 0o744)
self.clean_remove('testdata/read_only_file')
self.clean_remove('testdata/gdb_script')
self.clean_remove('testdata/gdb_script.0')
def init_crash_dir(self, fuzzer_dir):
self.init_queue_dir(fuzzer_dir)
def init_queue_dir(self, fuzzer_dir):
self.clean_remove_dir(fuzzer_dir)
shutil.copytree('testdata/queue', fuzzer_dir)
def clean_remove(self, file):
if os.path.exists(file):
os.remove(file)
def clean_remove_dir(self, dir):
if os.path.exists(dir):
shutil.rmtree(dir)
def test_show_info(self):
self.assertIsNone(afl_collect.show_info())
def test_get_fuzzer_instances(self):
fuzzer_inst = [
('fuzz000', ['crashes']),
('fuzz001', ['crashes'])
]
self.assertListEqual(fuzzer_inst, sorted(afl_collect.get_fuzzer_instances('testdata/sync')))
fuzzer_inst = [
(os.path.abspath('testdata/sync/fuzz000'), ['crashes'])
]
self.assertListEqual(fuzzer_inst, sorted(afl_collect.get_fuzzer_instances(('testdata/sync/fuzz000'))))
fuzzer_inst = [
('fuzz000', ['queue']),
('fuzz001', ['queue'])
]
self.assertListEqual(fuzzer_inst, sorted(afl_collect.get_fuzzer_instances('testdata/sync',
crash_dirs=False)))
fuzzer_inst = [
(os.path.abspath('testdata/sync/fuzz000'), ['queue'])
]
self.assertListEqual(fuzzer_inst, sorted(afl_collect.get_fuzzer_instances(('testdata/sync/fuzz000'),
crash_dirs=False)))
def test_get_crash_directories(self):
fuzzer_inst = [
('fuzz000', ['crashes']),
('fuzz001', ['crashes'])
]
sync_dir = os.path.abspath('testdata/sync')
self.assertListEqual(fuzzer_inst, sorted(afl_collect.get_crash_directories(sync_dir, fuzzer_inst)))
def test_get_queue_directories(self):
fuzzer_inst = [
('fuzz000', ['queue']),
('fuzz001', ['queue'])
]
sync_dir = os.path.abspath('testdata/sync')
self.assertListEqual(fuzzer_inst, sorted(afl_collect.get_queue_directories(sync_dir, fuzzer_inst)))
def test_get_samples_from_dir(self):
sample_dir = 'testdata/queue'
expected_result = (5, [
'sample0',
'sample1',
'sample2',
'sample3',
'sample4'
])
result = afl_collect.get_samples_from_dir(sample_dir)
self.assertEqual(expected_result[0], result[0])
self.assertListEqual(expected_result[1], sorted(result[1]))
expected_result = (5, [
os.path.join(sample_dir, 'sample0'),
os.path.join(sample_dir, 'sample1'),
os.path.join(sample_dir, 'sample2'),
os.path.join(sample_dir, 'sample3'),
os.path.join(sample_dir, 'sample4'),
])
result = afl_collect.get_samples_from_dir(sample_dir, abs_path=True)
self.assertEqual(expected_result[0], result[0])
self.assertListEqual(expected_result[1], sorted(result[1]))
def test_collect_samples(self):
sync_dir = 'testdata/sync'
fuzzer_inst = [
('fuzz000', ['queue']),
('fuzz001', ['queue'])
]
expected_result = (10, [
('fuzz000', [
('queue', [
'sample0',
'sample1',
'sample2',
'sample3',
'sample4'
]
)]),
('fuzz001', [
('queue', [
'sample0',
'sample1',
'sample2',
'sample3',
'sample4'
]
)])
])
result = afl_collect.collect_samples(sync_dir, fuzzer_inst)
self.assertEqual(expected_result[0], result[0])
self.assertListEqual(expected_result[1], sorted(result[1]))
def test_build_sample_index(self):
sync_dir = 'testdata/sync'
out_dir = 'testdata/out'
fuzzer_inst = [
('fuzz000', ['queue']),
('fuzz001', ['queue'])
]
expected_index = [
{'input': os.path.abspath('testdata/sync/fuzz000/queue/sample0'), 'fuzzer': 'fuzz000',
'output': 'fuzz000:sample0'},
{'input': os.path.abspath('testdata/sync/fuzz000/queue/sample1'), 'fuzzer': 'fuzz000',
'output': 'fuzz000:sample1'},
{'input': os.path.abspath('testdata/sync/fuzz000/queue/sample2'), 'fuzzer': 'fuzz000',
'output': 'fuzz000:sample2'},
{'input': os.path.abspath('testdata/sync/fuzz000/queue/sample3'), 'fuzzer': 'fuzz000',
'output': 'fuzz000:sample3'},
{'input': os.path.abspath('testdata/sync/fuzz000/queue/sample4'), 'fuzzer': 'fuzz000',
'output': 'fuzz000:sample4'},
{'input': os.path.abspath('testdata/sync/fuzz001/queue/sample0'), 'fuzzer': 'fuzz001',
'output': 'fuzz001:sample0'},
{'input': os.path.abspath('testdata/sync/fuzz001/queue/sample1'), 'fuzzer': 'fuzz001',
'output': 'fuzz001:sample1'},
{'input': os.path.abspath('testdata/sync/fuzz001/queue/sample2'), 'fuzzer': 'fuzz001',
'output': 'fuzz001:sample2'},
{'input': os.path.abspath('testdata/sync/fuzz001/queue/sample3'), 'fuzzer': 'fuzz001',
'output': 'fuzz001:sample3'},
{'input': os.path.abspath('testdata/sync/fuzz001/queue/sample4'), 'fuzzer': 'fuzz001',
'output': 'fuzz001:sample4'},
]
result = afl_collect.build_sample_index(sync_dir, out_dir, fuzzer_inst)
self.assertListEqual(expected_index, result.index)
def test_copy_samples(self):
out_dir = 'testdata/output'
index = [
{'input': os.path.abspath('testdata/sync/fuzz001/queue/sample2'), 'fuzzer': 'fuzz001',
'output': 'fuzz001:sample2'},
]
si = SampleIndex(out_dir, index)
files_expected = [
os.path.join(os.path.abspath(out_dir), index[0]['output'])
]
self.assertListEqual(files_expected, afl_collect.copy_samples(si))
ls_outdir = os.listdir(out_dir)
self.assertListEqual([index[0]['output']], sorted(ls_outdir))
def test_generate_sample_list(self):
list_name = 'testdata/read_only'
files_collected = [
'dummy0',
'dummy1',
'dummy2'
]
self.assertFalse(os.path.exists('testdata/read_only'))
self.assertIsNone(afl_collect.generate_sample_list(list_name, files_collected))
self.assertTrue(os.path.exists('testdata/read_only'))
self.assertIsNone(afl_collect.generate_sample_list('/invalid', files_collected))
def test_stdin_mode(self):
self.assertTrue(afl_collect.stdin_mode('bla blubb stdin'))
self.assertFalse(afl_collect.stdin_mode('bla blubb @@'))
def test_generate_gdb_exploitable_script(self):
script_filename = 'testdata/read_only_file'
index = [
{'input': os.path.abspath('testdata/sync/fuzz001/queue/sample2'), 'fuzzer': 'fuzz001',
'output': 'fuzz001:sample2'},
]
si = SampleIndex('testdata/output', index)
self.assertIsNone(afl_collect.generate_gdb_exploitable_script(script_filename, si, 'bin/echo'))
script_filename = 'testdata/gdb_script'
self.assertIsNone(afl_collect.generate_gdb_exploitable_script(script_filename, si, '/bin/echo',
intermediate=True))
self.assertTrue(os.path.exists('testdata/gdb_script.0'))
self.assertIsNone(afl_collect.generate_gdb_exploitable_script(script_filename, si, '/bin/echo'))
self.assertTrue(os.path.exists('testdata/gdb_script'))
afl_collect.gdb_exploitable_path = 'test'
self.assertIsNone(afl_collect.generate_gdb_exploitable_script(script_filename, si, '/bin/echo'))
self.assertTrue(os.path.exists('testdata/gdb_script'))
afl_collect.gdb_exploitable_path = None
self.assertIsNone(afl_collect.generate_gdb_exploitable_script(script_filename, si, '/bin/echo @@'))
self.assertTrue(os.path.exists('testdata/gdb_script'))
def test_execute_gdb_script(self):
pass
def test_main(self):
argv = ['afl-collect', '-h']
with self.assertRaises(SystemExit):
self.assertIsNone(afl_collect.main(argv))
argv = ['afl-collect', 'testdata/invalid_sync_dir', 'testdata/test_collection_dir', '--', 'testdata/invalid']
self.assertIsNone(afl_collect.main(argv))
argv = ['afl-collect', 'testdata/sync', 'testdata/test_collection_dir', '--', 'testdata/invalid']
self.assertIsNone(afl_collect.main(argv))
argv = ['afl-collect', 'testdata/sync', 'testdata/test_collection_dir', '--', '/bin/echo']
self.assertIsNone(afl_collect.main(argv))
self.assertFalse(os.path.exists('testdata/dbfile.db'))
argv = ['afl-collect', '-d', 'testdata/dbfile.db', 'testdata/sync', 'testdata/test_collection_dir', '--', '/bin/echo']
self.assertIsNone(afl_collect.main(argv))
self.assertTrue(os.path.exists('testdata/dbfile.db'))
self.init_crash_dir('testdata/sync/fuzz000/crashes')
self.init_crash_dir('testdata/sync/fuzz001/crashes')
argv = ['afl-collect', 'testdata/sync', 'testdata/test_collection_dir', '--', '/bin/echo']
self.assertIsNone(afl_collect.main(argv))
argv = ['afl-collect', '-r', 'testdata/sync', 'testdata/test_collection_dir', '--', '/bin/echo']
self.assertIsNone(afl_collect.main(argv))
argv = ['afl-collect', '-d', 'testdata/dbfile.db', '-e', 'gdbscript', '-r', '-rr', 'testdata/sync',
'testdata/test_collection_dir', '--', 'testdata/crash_process/bin/crash']
self.assertIsNone(afl_collect.main(argv))
argv = ['afl-collect', '-g', 'gdbscript', '-f', 'testdata/read_only', 'testdata/sync',
'testdata/test_collection_dir', '--', '/bin/echo']
self.assertIsNone(afl_collect.main(argv))
```
#### File: afl-utils/tests/test_afl_vcrash.py
```python
from afl_utils import afl_vcrash
import os
import unittest
class AflVCrashTestCase(unittest.TestCase):
def setUp(self):
# Use to set up test environment prior to test case
# invocation
pass
def tearDown(self):
# Use for clean up after tests have run
if os.path.exists('/tmp/afl_multicore.PGID.unittest_sess_01'):
os.remove('/tmp/afl_multicore.PGID.unittest_sess_01')
if os.path.exists('testdata/invalid'):
os.remove('testdata/invalid')
if os.path.exists('testdata/test_coll/invalid'):
os.remove('testdata/test_coll/invalid')
if os.path.exists('testdata/test_coll'):
os.rmdir('testdata/test_coll')
if os.path.exists('testdata/vcrash_filelist'):
os.remove('testdata/vcrash_filelist')
def test_show_info(self):
self.assertIsNone(afl_vcrash.show_info())
def test_verify_samples(self):
# test for invalid crash detection
num_threads = 1
samples = ['testdata/sync/fuzz000/fuzzer_stats'] # invalid (non-crashing) sample
target_cmd = 'ls'
timeout_secs = 3
self.assertEqual((['testdata/sync/fuzz000/fuzzer_stats'], []),
afl_vcrash.verify_samples(num_threads, samples, target_cmd, timeout_secs))
# test for timeout detection
num_threads = 1
samples = ['testdata/sync/fuzz000/fuzzer_stats'] # invalid (non-crashing) sample
target_cmd = 'python testdata/dummy_process/dummyproc.py'
timeout_secs = 1
self.assertEqual(([], ['testdata/sync/fuzz000/fuzzer_stats']),
afl_vcrash.verify_samples(num_threads, samples, target_cmd, timeout_secs))
def test_remove_samples(self):
# fail
samples = ['testdata/invalid']
with self.assertRaises(FileNotFoundError):
afl_vcrash.remove_samples(samples, False)
# success
open('testdata/invalid', 'a').close()
self.assertEqual(1, afl_vcrash.remove_samples(samples, False))
def test_build_target_cmd(self):
# fail
target_cmdline = ['/some/path/to/invalid/target/binary', '--some-opt', '--some-other-opt']
with self.assertRaises(SystemExit) as se:
afl_vcrash.build_target_cmd(target_cmdline)
self.assertEqual(2, se.exception.code)
target_cmdline = ['testdata/dummy_process/dummyproc.py', '-h', '-l']
self.assertIn('testdata/dummy_process/dummyproc.py -h -l', afl_vcrash.build_target_cmd(target_cmdline))
def test_main(self):
# invalid invocation
with self.assertRaises(SystemExit) as se:
afl_vcrash.main(['afl-vcrash', '--some-invalid-opt'])
self.assertEqual(2, se.exception.code)
# invalid collection dir
with self.assertRaises(SystemExit) as se:
afl_vcrash.main(['afl-vcrash', 'testdata/test_coll', '--', '/usr/bin/ls'])
self.assertEqual(1, se.exception.code)
# prepare sample collection dir
os.mkdir('testdata/test_coll')
open('testdata/test_coll/invalid', 'a').close()
self.assertIsNone(afl_vcrash.main(['afl-vcrash', '-f', 'testdata/vcrash_filelist', 'testdata/test_coll',
'--', '/bin/ls']))
self.assertIs(True, os.path.exists('testdata/vcrash_filelist'))
self.assertIs(True, os.path.exists('testdata/test_coll/invalid'))
self.assertIsNone(afl_vcrash.main(['afl-vcrash', '-r', '-f', 'testdata/vcrash_filelist', 'testdata/test_coll',
'--', '/bin/ls']))
self.assertIs(True, os.path.exists('testdata/vcrash_filelist'))
self.assertIs(False, os.path.exists('testdata/test_coll/invalid'))
``` |
{
"source": "JoeyJiao/peach",
"score": 3
} |
#### File: IronPython-2.7.3/Lib/pickle.py
```python
__version__ = "$Revision$" # Code version
from types import *
from copy_reg import dispatch_table
from copy_reg import _extension_registry, _inverted_registry, _extension_cache
import marshal
import sys
import struct
import re
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# These are purely informational; no code uses these.
format_version = "2.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
] # Old format versions we can read
# Keep in synch with cPickle. This is the highest protocol number we
# know how to read.
HIGHEST_PROTOCOL = 2
# Why use struct.pack() for pickling but marshal.loads() for
# unpickling? struct.pack() is 40% faster than marshal.dumps(), but
# marshal.loads() is twice as fast as struct.unpack()!
mloads = marshal.loads
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# UnicodeType may or may not be exported (normally imported from types)
try:
UnicodeType
except NameError:
UnicodeType = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = '(' # push special markobject on stack
STOP = '.' # every pickle ends with STOP
POP = '0' # discard topmost stack item
POP_MARK = '1' # discard stack top through topmost markobject
DUP = '2' # duplicate top stack item
FLOAT = 'F' # push float object; decimal string argument
INT = 'I' # push integer or bool; decimal string argument
BININT = 'J' # push four-byte signed int
BININT1 = 'K' # push 1-byte unsigned int
LONG = 'L' # push long; decimal string argument
BININT2 = 'M' # push 2-byte unsigned int
NONE = 'N' # push None
PERSID = 'P' # push persistent object; id is taken from string arg
BINPERSID = 'Q' # " " " ; " " " " stack
REDUCE = 'R' # apply callable to argtuple, both on stack
STRING = 'S' # push string; NL-terminated string argument
BINSTRING = 'T' # push string; counted binary string argument
SHORT_BINSTRING = 'U' # " " ; " " " " < 256 bytes
UNICODE = 'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = 'X' # " " " ; counted UTF-8 string argument
APPEND = 'a' # append stack top to list below it
BUILD = 'b' # call __setstate__ or __dict__.update()
GLOBAL = 'c' # push self.find_class(modname, name); 2 string args
DICT = 'd' # build a dict from stack items
EMPTY_DICT = '}' # push empty dict
APPENDS = 'e' # extend list on stack by topmost stack slice
GET = 'g' # push item from memo on stack; index is string arg
BINGET = 'h' # " " " " " " ; " " 1-byte arg
INST = 'i' # build & push class instance
LONG_BINGET = 'j' # push item from memo on stack; index is 4-byte arg
LIST = 'l' # build list from topmost stack items
EMPTY_LIST = ']' # push empty list
OBJ = 'o' # build & push class instance
PUT = 'p' # store stack top in memo; index is string arg
BINPUT = 'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = 'r' # " " " " " ; " " 4-byte arg
SETITEM = 's' # add key+value pair to dict
TUPLE = 't' # build tuple from topmost stack items
EMPTY_TUPLE = ')' # push empty tuple
SETITEMS = 'u' # modify dict by adding topmost key+value pairs
BINFLOAT = 'G' # push float; arg is 8-byte float encoding
TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = '\x80' # identify pickle protocol
NEWOBJ = '\x81' # build object by applying cls.__new__ to argtuple
EXT1 = '\x82' # push object from extension registry; 1-byte index
EXT2 = '\x83' # ditto, but 2-byte index
EXT4 = '\x84' # ditto, but 4-byte index
TUPLE1 = '\x85' # build 1-tuple from stack top
TUPLE2 = '\x86' # build 2-tuple from two topmost stack items
TUPLE3 = '\x87' # build 3-tuple from three topmost stack items
NEWTRUE = '\x88' # push True
NEWFALSE = '\x89' # push False
LONG1 = '\x8a' # push long from < 256 bytes
LONG4 = '\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$",x)])
del x
# Pickling machinery
class Pickler:
def __init__(self, file, protocol=None):
"""This takes a file-like object for writing a pickle data stream.
The optional protocol argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2. The default
protocol is 0, to be backwards compatible. (Protocol 0 is the
only protocol that can be written to a file opened in text
mode and read back successfully. When using a protocol higher
than 0, make sure the file is opened in binary mode, both when
pickling and unpickling.)
Protocol 1 is more efficient than protocol 0; protocol 2 is
more efficient than protocol 1.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The file parameter must have a write() method that accepts a single
string argument. It can thus be an open file object, a StringIO
object, or any other custom object that meets this interface.
"""
if protocol is None:
protocol = 0
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
self.write = file.write
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
if self.proto >= 2:
self.write(PROTO + chr(self.proto))
self.save(obj)
self.write(STOP)
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
# Pickler memo so that transient objects are kept alive during
# pickling.
# The use of the Unpickler memo length as the memo key is just a
# convention. The only requirement is that the memo values be unique.
# But there appears no advantage to any other scheme, and this
# scheme allows the Unpickler memo to be implemented as a plain (but
# growable) array, indexed by memo key.
if self.fast:
return
assert id(obj) not in self.memo
memo_len = len(self.memo)
self.write(self.put(memo_len))
self.memo[id(obj)] = memo_len, obj
# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
def put(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINPUT + chr(i)
else:
return LONG_BINPUT + pack("<i", i)
return PUT + repr(i) + '\n'
# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
def get(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINGET + chr(i)
else:
return LONG_BINGET + pack("<i", i)
return GET + repr(i) + '\n'
def save(self, obj):
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
if pid:
self.save_pers(pid)
return
# Check the memo
x = self.memo.get(id(obj))
if x:
self.write(self.get(x[0]))
return
# Check the type dispatch table
t = type(obj)
f = self.dispatch.get(t)
if f:
f(self, obj) # Call unbound method with explicit self
return
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, TypeType)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = 0
if issc:
self.save_global(obj)
return
# Check copy_reg.dispatch_table
reduce = dispatch_table.get(t)
if reduce:
rv = reduce(obj)
else:
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if type(rv) is StringType:
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if type(rv) is not TupleType:
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid)
self.write(BINPERSID)
else:
self.write(PERSID + str(pid) + '\n')
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
# This API is called by some subclasses
# Assert that args is a tuple or None
if not isinstance(args, TupleType):
raise PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not hasattr(func, '__call__'):
raise PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
# A __reduce__ implementation can direct protocol 2 to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# new-style class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
# Methods below this point are dispatched through the dispatch table
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[NoneType] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(obj and NEWTRUE or NEWFALSE)
else:
self.write(obj and TRUE or FALSE)
dispatch[bool] = save_bool
def save_int(self, obj, pack=struct.pack):
if self.bin:
# If the int is small enough to fit in a signed 4-byte 2's-comp
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + chr(obj))
return
if obj <= 0xffff:
self.write("%c%c%c" % (BININT2, obj&0xff, obj>>8))
return
# Next check for 4-byte signed ints:
high_bits = obj >> 31 # note that Python shift sign-extends
if high_bits == 0 or high_bits == -1:
# All high bits are copies of bit 2**31, so the value
# fits in a 4-byte signed int.
self.write(BININT + pack("<i", obj))
return
# Text pickle, or int too big to fit in signed 4-byte format.
self.write(INT + repr(obj) + '\n')
dispatch[IntType] = save_int
def save_long(self, obj, pack=struct.pack):
if self.proto >= 2:
bytes = encode_long(obj)
n = len(bytes)
if n < 256:
self.write(LONG1 + chr(n) + bytes)
else:
self.write(LONG4 + pack("<i", n) + bytes)
return
self.write(LONG + repr(obj) + '\n')
dispatch[LongType] = save_long
def save_float(self, obj, pack=struct.pack):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj) + '\n')
dispatch[FloatType] = save_float
def save_string(self, obj, pack=struct.pack):
if self.bin:
n = len(obj)
if n < 256:
self.write(SHORT_BINSTRING + chr(n) + obj)
else:
self.write(BINSTRING + pack("<i", n) + obj)
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_unicode(self, obj, pack=struct.pack):
if self.bin:
encoding = obj.encode('utf-8')
n = len(encoding)
self.write(BINUNICODE + pack("<i", n) + encoding)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
self.write(UNICODE + obj.encode('raw-unicode-escape') + '\n')
self.memoize(obj)
dispatch[UnicodeType] = save_unicode
if StringType is UnicodeType:
# This is true for Jython
def save_string(self, obj, pack=struct.pack):
unicode = obj.isunicode()
if self.bin:
if unicode:
obj = obj.encode("utf-8")
l = len(obj)
if l < 256 and not unicode:
self.write(SHORT_BINSTRING + chr(l) + obj)
else:
s = pack("<i", l)
if unicode:
self.write(BINUNICODE + s + obj)
else:
self.write(BINSTRING + s + obj)
else:
if unicode:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
obj = obj.encode('raw-unicode-escape')
self.write(UNICODE + obj + '\n')
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_tuple(self, obj):
write = self.write
proto = self.proto
n = len(obj)
if n == 0:
if proto:
write(EMPTY_TUPLE)
else:
write(MARK + TUPLE)
return
save = self.save
memo = self.memo
if n <= 3 and proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
write(POP * n + get)
else:
write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if proto:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n+1) + get)
return
# No recursion.
self.write(TUPLE)
self.memoize(obj)
dispatch[TupleType] = save_tuple
# save_empty_tuple() isn't used by anything in Python 2.3. However, I
# found a Pickler subclass in Zope3 that calls it, so it's not harmless
# to remove it.
def save_empty_tuple(self, obj):
self.write(EMPTY_TUPLE)
def save_list(self, obj):
write = self.write
if self.bin:
write(EMPTY_LIST)
else: # proto 0 -- can't use EMPTY_LIST
write(MARK + LIST)
self.memoize(obj)
self._batch_appends(iter(obj))
dispatch[ListType] = save_list
# Keep in synch with cPickle's BATCHSIZE. Nothing will break if it gets
# out of synch, though.
_BATCHSIZE = 1000
def _batch_appends(self, items):
# Helper to batch up APPENDS sequences
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
x = items.next()
tmp.append(x)
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
# else tmp is empty, and we're done
def save_dict(self, obj):
write = self.write
if self.bin:
write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.iteritems())
dispatch[DictionaryType] = save_dict
if not PyStringMap is None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
tmp.append(items.next())
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
# else tmp is empty, and we're done
def save_inst(self, obj):
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
_keep_alive(args, memo)
else:
args = ()
write(MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(OBJ)
else:
for arg in args:
save(arg)
write(INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
else:
stuff = getstate()
_keep_alive(stuff, memo)
save(stuff)
write(BUILD)
dispatch[InstanceType] = save_inst
def save_global(self, obj, name=None, pack=struct.pack):
write = self.write
memo = self.memo
if name is None:
name = obj.__name__
module = getattr(obj, "__module__", None)
if module is None:
module = whichmodule(obj, name)
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module, name))
else:
if klass is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module, name))
if self.proto >= 2:
code = _extension_registry.get((module, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + chr(code))
elif code <= 0xffff:
write("%c%c%c" % (EXT2, code&0xff, code>>8))
else:
write(EXT4 + pack("<i", code))
return
write(GLOBAL + module + '\n' + name + '\n')
self.memoize(obj)
dispatch[ClassType] = save_global
dispatch[FunctionType] = save_global
dispatch[BuiltinFunctionType] = save_global
dispatch[TypeType] = save_global
# Pickling helpers
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
# A cache for whichmodule(), mapping a function object to the name of
# the module in which the function was found.
classmap = {} # called classmap for backwards compatibility
def whichmodule(func, funcname):
"""Figure out the module in which a function occurs.
Search sys.modules for the module.
Cache in classmap.
Return a module name.
If the function cannot be found, return "__main__".
"""
# Python functions should always get an __module__ from their globals.
mod = getattr(func, "__module__", None)
if mod is not None:
return mod
if func in classmap:
return classmap[func]
for name, module in sys.modules.items():
if module is None:
continue # skip dummy package entries
if name != '__main__' and getattr(module, funcname, None) is func:
break
else:
name = '__main__'
classmap[func] = name
return name
# Unpickling machinery
class Unpickler:
def __init__(self, file):
"""This takes a file-like object for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
proto argument is needed.
The file-like object must have two methods, a read() method that
takes an integer argument, and a readline() method that requires no
arguments. Both methods should return a string. Thus file-like
object can be a file object opened for reading, a StringIO object,
or any other custom object that meets this interface.
"""
self.readline = file.readline
self.read = file.read
self.memo = {}
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
self.mark = object() # any new unique object
self.stack = []
self.append = self.stack.append
read = self.read
dispatch = self.dispatch
try:
while 1:
key = read(1)
dispatch[key](self)
except _Stop, stopinst:
return stopinst.value
# Return largest index k such that self.stack[k] is self.mark.
# If the stack doesn't contain a mark, eventually raises IndexError.
# This could be sped by maintaining another stack, of indices at which
# the mark appears. For that matter, the latter stack would suffice,
# and we wouldn't need to push mark objects on self.stack at all.
# Doing so is probably a good thing, though, since if the pickle is
# corrupt (or hostile) we may get a clue from finding self.mark embedded
# in unpickled objects.
def marker(self):
stack = self.stack
mark = self.mark
k = len(stack)-1
while stack[k] is not mark: k = k-1
return k
dispatch = {}
def load_eof(self):
raise EOFError
dispatch[''] = load_eof
def load_proto(self):
proto = ord(self.read(1))
if not 0 <= proto <= 2:
raise ValueError, "unsupported pickle protocol: %d" % proto
dispatch[PROTO] = load_proto
def load_persid(self):
pid = self.readline()[:-1]
self.append(self.persistent_load(pid))
dispatch[PERSID] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
try:
val = int(data)
except ValueError:
val = long(data)
self.append(val)
dispatch[INT] = load_int
def load_binint(self):
self.append(mloads('i' + self.read(4)))
dispatch[BININT] = load_binint
def load_binint1(self):
self.append(ord(self.read(1)))
dispatch[BININT1] = load_binint1
def load_binint2(self):
self.append(mloads('i' + self.read(2) + '\000\000'))
dispatch[BININT2] = load_binint2
def load_long(self):
self.append(long(self.readline()[:-1], 0))
dispatch[LONG] = load_long
def load_long1(self):
n = ord(self.read(1))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG1] = load_long1
def load_long4(self):
n = mloads('i' + self.read(4))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG4] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT] = load_float
def load_binfloat(self, unpack=struct.unpack):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT] = load_binfloat
def load_string(self):
rep = self.readline()[:-1]
for q in "\"'": # double or single quote
if rep.startswith(q):
if not rep.endswith(q):
raise ValueError, "insecure string pickle"
rep = rep[len(q):-len(q)]
break
else:
raise ValueError, "insecure string pickle"
self.append(rep.decode("string-escape"))
dispatch[STRING] = load_string
def load_binstring(self):
len = mloads('i' + self.read(4))
self.append(self.read(len))
dispatch[BINSTRING] = load_binstring
def load_unicode(self):
self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
dispatch[UNICODE] = load_unicode
def load_binunicode(self):
len = mloads('i' + self.read(4))
self.append(unicode(self.read(len),'utf-8'))
dispatch[BINUNICODE] = load_binunicode
def load_short_binstring(self):
len = ord(self.read(1))
self.append(self.read(len))
dispatch[SHORT_BINSTRING] = load_short_binstring
def load_tuple(self):
k = self.marker()
self.stack[k:] = [tuple(self.stack[k+1:])]
dispatch[TUPLE] = load_tuple
def load_empty_tuple(self):
self.stack.append(())
dispatch[EMPTY_TUPLE] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3] = load_tuple3
def load_empty_list(self):
self.stack.append([])
dispatch[EMPTY_LIST] = load_empty_list
def load_empty_dictionary(self):
self.stack.append({})
dispatch[EMPTY_DICT] = load_empty_dictionary
def load_list(self):
k = self.marker()
self.stack[k:] = [self.stack[k+1:]]
dispatch[LIST] = load_list
def load_dict(self):
k = self.marker()
d = {}
items = self.stack[k+1:]
for i in range(0, len(items), 2):
key = items[i]
value = items[i+1]
d[key] = value
self.stack[k:] = [d]
dispatch[DICT] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a common routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, k):
args = tuple(self.stack[k+1:])
del self.stack[k:]
instantiated = 0
if (not args and
type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
instantiated = 1
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
try:
value = klass(*args)
except TypeError, err:
raise TypeError, "in constructor for %s: %s" % (
klass.__name__, str(err)), sys.exc_info()[2]
self.append(value)
def load_inst(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self._instantiate(klass, self.marker())
dispatch[INST] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
k = self.marker()
klass = self.stack.pop(k+1)
self._instantiate(klass, k)
dispatch[OBJ] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
dispatch[NEWOBJ] = load_newobj
def load_global(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL] = load_global
def load_ext1(self):
code = ord(self.read(1))
self.get_extension(code)
dispatch[EXT1] = load_ext1
def load_ext2(self):
code = mloads('i' + self.read(2) + '\000\000')
self.get_extension(code)
dispatch[EXT2] = load_ext2
def load_ext4(self):
code = mloads('i' + self.read(4))
self.get_extension(code)
dispatch[EXT4] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
value = func(*args)
stack[-1] = value
dispatch[REDUCE] = load_reduce
def load_pop(self):
del self.stack[-1]
dispatch[POP] = load_pop
def load_pop_mark(self):
k = self.marker()
del self.stack[k:]
dispatch[POP_MARK] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP] = load_dup
def load_get(self):
self.append(self.memo[self.readline()[:-1]])
dispatch[GET] = load_get
def load_binget(self):
i = ord(self.read(1))
self.append(self.memo[repr(i)])
dispatch[BINGET] = load_binget
def load_long_binget(self):
i = mloads('i' + self.read(4))
self.append(self.memo[repr(i)])
dispatch[LONG_BINGET] = load_long_binget
def load_put(self):
self.memo[self.readline()[:-1]] = self.stack[-1]
dispatch[PUT] = load_put
def load_binput(self):
i = ord(self.read(1))
self.memo[repr(i)] = self.stack[-1]
dispatch[BINPUT] = load_binput
def load_long_binput(self):
i = mloads('i' + self.read(4))
self.memo[repr(i)] = self.stack[-1]
dispatch[LONG_BINPUT] = load_long_binput
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND] = load_append
def load_appends(self):
stack = self.stack
mark = self.marker()
list = stack[mark - 1]
list.extend(stack[mark + 1:])
del stack[mark:]
dispatch[APPENDS] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM] = load_setitem
def load_setitems(self):
stack = self.stack
mark = self.marker()
dict = stack[mark - 1]
for i in range(mark + 1, len(stack), 2):
dict[stack[i]] = stack[i + 1]
del stack[mark:]
dispatch[SETITEMS] = load_setitems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
d = inst.__dict__
try:
for k, v in state.iteritems():
d[intern(k)] = v
# keys in state don't have to be strings
# don't blow up, but don't go out of our way
except TypeError:
d.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD] = load_build
def load_mark(self):
self.append(self.mark)
dispatch[MARK] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP] = load_stop
# Helper class for load_inst/load_obj
class _EmptyClass:
pass
# Encode/decode longs in linear time.
import binascii as _binascii
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0L is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
# bug 24549
#>>> encode_long(0L)
#''
#>>> encode_long(255L)
#'\xff\x00'
#>>> encode_long(32767L)
#'\xff\x7f'
#>>> encode_long(-256L)
#'\x00\xff'
#>>> encode_long(-32768L)
#'\x00\x80'
#>>> encode_long(-128L)
#'\x80'
#>>> encode_long(127L)
#'\x7f'
#>>>
"""
if x == 0:
return ''
if x > 0:
ashex = hex(x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# need an even # of nibbles for unhexlify
ashex = "0x0" + ashex[2:]
elif int(ashex[2], 16) >= 8:
# "looks negative", so need a byte of sign bits
ashex = "0x00" + ashex[2:]
else:
# Build the 256's-complement: (1L << nbytes) + x. The trick is
# to find the number of bytes in linear time (although that should
# really be a constant-time task).
ashex = hex(-x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# Extend to a full byte.
nibbles += 1
nbits = nibbles * 4
x += 1L << nbits
assert x > 0
ashex = hex(x)
njunkchars = 2 + ashex.endswith('L')
newnibbles = len(ashex) - njunkchars
if newnibbles < nibbles:
ashex = "0x" + "0" * (nibbles - newnibbles) + ashex[2:]
if int(ashex[2], 16) < 8:
# "looks positive", so need a byte of sign bits
ashex = "0xff" + ashex[2:]
if ashex.endswith('L'):
ashex = ashex[2:-1]
else:
ashex = ashex[2:]
assert len(ashex) & 1 == 0, (x, ashex)
binary = _binascii.unhexlify(ashex)
return binary[::-1]
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long('')
0L
>>> decode_long("\xff\x00")
255L
>>> decode_long("\xff\x7f")
32767L
>>> decode_long("\x00\xff")
-256L
>>> decode_long("\x00\x80")
-32768L
>>> decode_long("\x80")
-128L
>>> decode_long("\x7f")
127L
"""
nbytes = len(data)
if nbytes == 0:
return 0L
ashex = _binascii.hexlify(data[::-1])
n = long(ashex, 16) # quadratic time before Python 2.3; linear now
if data[-1] >= '\x80':
n -= 1L << (nbytes * 8)
return n
# Shorthands
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def dump(obj, file, protocol=None):
Pickler(file, protocol).dump(obj)
def dumps(obj, protocol=None):
file = StringIO()
Pickler(file, protocol).dump(obj)
return file.getvalue()
def load(file):
return Unpickler(file).load()
def loads(str):
file = StringIO(str)
return Unpickler(file).load()
# Doctest
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
```
#### File: Tools/Scripts/pyc.py
```python
import sys
import clr
clr.AddReferenceByPartialName("IronPython")
from System.Collections.Generic import List
import IronPython.Hosting as Hosting
from IronPython.Runtime.Operations import PythonOps
import System
from System.Reflection import Emit, Assembly
from System.Reflection.Emit import OpCodes, AssemblyBuilderAccess
from System.Reflection import AssemblyName, TypeAttributes, MethodAttributes, ResourceAttributes, CallingConventions
def GenerateExe(config):
"""generates the stub .EXE file for starting the app"""
aName = AssemblyName(System.IO.FileInfo(config.output).Name)
ab = PythonOps.DefineDynamicAssembly(aName, AssemblyBuilderAccess.RunAndSave)
mb = ab.DefineDynamicModule(config.output, aName.Name + ".exe")
tb = mb.DefineType("PythonMain", TypeAttributes.Public)
assemblyResolveMethod = None
if config.standalone:
print "Generating stand alone executable"
config.embed = True
for a in System.AppDomain.CurrentDomain.GetAssemblies():
n = AssemblyName(a.FullName)
if not a.IsDynamic and not a.EntryPoint and (n.Name.StartsWith("IronPython") or n.Name in ['Microsoft.Dynamic', 'Microsoft.Scripting']):
print "\tEmbedding %s %s" % (n.Name, str(n.Version))
f = System.IO.FileStream(a.Location, System.IO.FileMode.Open, System.IO.FileAccess.Read)
mb.DefineManifestResource("Dll." + n.Name, f, ResourceAttributes.Public)
# we currently do no error checking on what is passed in to the assemblyresolve event handler
assemblyResolveMethod = tb.DefineMethod("AssemblyResolve", MethodAttributes.Public | MethodAttributes.Static, clr.GetClrType(Assembly), (clr.GetClrType(System.Object), clr.GetClrType(System.ResolveEventArgs)))
gen = assemblyResolveMethod.GetILGenerator()
s = gen.DeclareLocal(clr.GetClrType(System.IO.Stream)) # resource stream
gen.Emit(OpCodes.Ldnull)
gen.Emit(OpCodes.Stloc, s)
d = gen.DeclareLocal(clr.GetClrType(System.Array[System.Byte])) # data buffer
gen.EmitCall(OpCodes.Call, clr.GetClrType(Assembly).GetMethod("GetEntryAssembly"), ())
gen.Emit(OpCodes.Ldstr, "Dll.")
gen.Emit(OpCodes.Ldarg_1) # The event args
gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(System.ResolveEventArgs).GetMethod("get_Name"), ())
gen.Emit(OpCodes.Newobj, clr.GetClrType(AssemblyName).GetConstructor((str, )))
gen.EmitCall(OpCodes.Call, clr.GetClrType(AssemblyName).GetMethod("get_Name"), ())
gen.EmitCall(OpCodes.Call, clr.GetClrType(str).GetMethod("Concat", (str, str)), ())
gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(Assembly).GetMethod("GetManifestResourceStream", (str, )), ())
gen.Emit(OpCodes.Stloc, s)
gen.Emit(OpCodes.Ldloc, s)
gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(System.IO.Stream).GetMethod("get_Length"), ())
gen.Emit(OpCodes.Newarr, clr.GetClrType(System.Byte))
gen.Emit(OpCodes.Stloc, d)
gen.Emit(OpCodes.Ldloc, s)
gen.Emit(OpCodes.Ldloc, d)
gen.Emit(OpCodes.Ldc_I4_0)
gen.Emit(OpCodes.Ldloc, s)
gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(System.IO.Stream).GetMethod("get_Length"), ())
gen.Emit(OpCodes.Conv_I4)
gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(System.IO.Stream).GetMethod("Read", (clr.GetClrType(System.Array[System.Byte]), int, int)), ())
gen.Emit(OpCodes.Pop)
gen.Emit(OpCodes.Ldloc, d)
gen.EmitCall(OpCodes.Call, clr.GetClrType(Assembly).GetMethod("Load", (clr.GetClrType(System.Array[System.Byte]), )), ())
gen.Emit(OpCodes.Ret)
# generate a static constructor to assign the AssemblyResolve handler (otherwise it tries to use IronPython before it adds the handler)
# the other way of handling this would be to move the call to InitializeModule into a separate method.
staticConstructor = tb.DefineConstructor(MethodAttributes.Public | MethodAttributes.Static, CallingConventions.Standard, System.Type.EmptyTypes)
gen = staticConstructor.GetILGenerator()
gen.EmitCall(OpCodes.Call, clr.GetClrType(System.AppDomain).GetMethod("get_CurrentDomain"), ())
gen.Emit(OpCodes.Ldnull)
gen.Emit(OpCodes.Ldftn, assemblyResolveMethod)
gen.Emit(OpCodes.Newobj, clr.GetClrType(System.ResolveEventHandler).GetConstructor((clr.GetClrType(System.Object), clr.GetClrType(System.IntPtr))))
gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(System.AppDomain).GetMethod("add_AssemblyResolve"), ())
gen.Emit(OpCodes.Ret)
mainMethod = tb.DefineMethod("Main", MethodAttributes.Public | MethodAttributes.Static, int, ())
if config.target == System.Reflection.Emit.PEFileKinds.WindowApplication and config.mta:
mainMethod.SetCustomAttribute(clr.GetClrType(System.MTAThreadAttribute).GetConstructor(()), System.Array[System.Byte](()))
elif config.target == System.Reflection.Emit.PEFileKinds.WindowApplication:
mainMethod.SetCustomAttribute(clr.GetClrType(System.STAThreadAttribute).GetConstructor(()), System.Array[System.Byte](()))
gen = mainMethod.GetILGenerator()
# get the ScriptCode assembly...
if config.embed:
# put the generated DLL into the resources for the stub exe
w = mb.DefineResource("IPDll.resources", "Embedded IronPython Generated DLL")
w.AddResource("IPDll." + config.output, System.IO.File.ReadAllBytes(config.output + ".dll"))
System.IO.File.Delete(config.output + ".dll")
# generate code to load the resource
gen.Emit(OpCodes.Ldstr, "IPDll")
gen.EmitCall(OpCodes.Call, clr.GetClrType(Assembly).GetMethod("GetEntryAssembly"), ())
gen.Emit(OpCodes.Newobj, clr.GetClrType(System.Resources.ResourceManager).GetConstructor((str, clr.GetClrType(Assembly))))
gen.Emit(OpCodes.Ldstr, "IPDll." + config.output)
gen.EmitCall(OpCodes.Call, clr.GetClrType(System.Resources.ResourceManager).GetMethod("GetObject", (str, )), ())
gen.EmitCall(OpCodes.Call, clr.GetClrType(System.Reflection.Assembly).GetMethod("Load", (clr.GetClrType(System.Array[System.Byte]), )), ())
else:
# variables for saving original working directory und return code of script
wdSave = gen.DeclareLocal(str)
# save current working directory
gen.EmitCall(OpCodes.Call, clr.GetClrType(System.Environment).GetMethod("get_CurrentDirectory"), ())
gen.Emit(OpCodes.Stloc, wdSave)
gen.EmitCall(OpCodes.Call, clr.GetClrType(Assembly).GetMethod("GetEntryAssembly"), ())
gen.EmitCall(OpCodes.Callvirt, clr.GetClrType(Assembly).GetMethod("get_Location"), ())
gen.Emit(OpCodes.Newobj, clr.GetClrType(System.IO.FileInfo).GetConstructor( (str, ) ))
gen.EmitCall(OpCodes.Call, clr.GetClrType(System.IO.FileInfo).GetMethod("get_Directory"), ())
gen.EmitCall(OpCodes.Call, clr.GetClrType(System.IO.DirectoryInfo).GetMethod("get_FullName"), ())
gen.EmitCall(OpCodes.Call, clr.GetClrType(System.Environment).GetMethod("set_CurrentDirectory"), ())
gen.Emit(OpCodes.Ldstr, config.output + ".dll")
gen.EmitCall(OpCodes.Call, clr.GetClrType(System.IO.Path).GetMethod("GetFullPath", (clr.GetClrType(str), )), ())
# result of GetFullPath stays on the stack during the restore of the
# original working directory
# restore original working directory
gen.Emit(OpCodes.Ldloc, wdSave)
gen.EmitCall(OpCodes.Call, clr.GetClrType(System.Environment).GetMethod("set_CurrentDirectory"), ())
# for the LoadFile() call, the full path of the assembly is still is on the stack
# as the result from the call to GetFullPath()
gen.EmitCall(OpCodes.Call, clr.GetClrType(System.Reflection.Assembly).GetMethod("LoadFile", (clr.GetClrType(str), )), ())
# emit module name
gen.Emit(OpCodes.Ldstr, "__main__") # main module name
gen.Emit(OpCodes.Ldnull) # no references
gen.Emit(OpCodes.Ldc_I4_0) # don't ignore environment variables for engine startup
# call InitializeModule
# (this will also run the script)
gen.EmitCall(OpCodes.Call, clr.GetClrType(PythonOps).GetMethod("InitializeModuleEx"), ())
gen.Emit(OpCodes.Ret)
tb.CreateType()
ab.SetEntryPoint(mainMethod, config.target)
ab.Save(aName.Name + ".exe", config.platform, config.machine)
class Config(object):
def __init__(self):
self.output = None
self.main = None
self.main_name = None
self.target = System.Reflection.Emit.PEFileKinds.Dll
self.embed = False
self.standalone = False
self.mta = False
self.platform = System.Reflection.PortableExecutableKinds.ILOnly
self.machine = System.Reflection.ImageFileMachine.I386
self.files = []
def ParseArgs(self, args, respFiles=[]):
for arg in args:
arg = arg.strip()
if arg.startswith("#"):
continue
if arg.startswith("/main:"):
self.main_name = self.main = arg[6:]
# only override the target kind if its current a DLL
if self.target == System.Reflection.Emit.PEFileKinds.Dll:
self.target = System.Reflection.Emit.PEFileKinds.ConsoleApplication
elif arg.startswith("/out:"):
self.output = arg[5:]
elif arg.startswith("/target:"):
tgt = arg[8:]
if tgt == "exe": self.target = System.Reflection.Emit.PEFileKinds.ConsoleApplication
elif tgt == "winexe": self.target = System.Reflection.Emit.PEFileKinds.WindowApplication
else: self.target = System.Reflection.Emit.PEFileKinds.Dll
elif arg.startswith("/platform:"):
pform = arg[10:]
if pform == "x86":
self.platform = System.Reflection.PortableExecutableKinds.ILOnly | System.Reflection.PortableExecutableKinds.Required32Bit
self.machine = System.Reflection.ImageFileMachine.I386
elif pform == "x64":
self.platform = System.Reflection.PortableExecutableKinds.ILOnly | System.Reflection.PortableExecutableKinds.PE32Plus
self.machine = System.Reflection.ImageFileMachine.AMD64
else:
self.platform = System.Reflection.PortableExecutableKinds.ILOnly
self.machine = System.Reflection.ImageFileMachine.I386
elif arg.startswith("/embed"):
self.embed = True
elif arg.startswith("/standalone"):
self.standalone = True
elif arg.startswith("/mta"):
self.mta = True
elif arg in ["/?", "-?", "/h", "-h"]:
print __doc__
sys.exit(0)
else:
if arg.startswith("@"):
respFile = System.IO.Path.GetFullPath(arg[1:])
if not respFile in respFiles:
respFiles.append(respFile)
with open(respFile, 'r') as f:
self.ParseArgs(f.readlines(), respFiles)
else:
print "WARNING: Already parsed response file '%s'\n" % arg[1:]
else:
self.files.append(arg)
def Validate(self):
if not self.files and not self.main_name:
print "No files or main defined"
return False
if self.target != System.Reflection.Emit.PEFileKinds.Dll and self.main_name == None:
print "EXEs require /main:# to be specified"
return False
if not self.output and self.main_name:
self.output = System.IO.Path.GetFileNameWithoutExtension(self.main_name)
elif not self.output and self.files:
self.output = System.IO.Path.GetFileNameWithoutExtension(self.files[0])
return True
def __repr__(self):
res = "Input Files:\n"
for file in self.files:
res += "\t%s\n" % file
res += "Output:\n\t%s\n" % self.output
res += "Target:\n\t%s\n" % self.target
res += "Platform:\n\t%s\n" % self.platform
res += "Machine:\n\t%s\n" % self.machine
if self.target == System.Reflection.Emit.PEFileKinds.WindowApplication:
res += "Threading:\n"
if self.mta:
res += "\tMTA\n"
else:
res += "\tSTA\n"
return res
def Main(args):
files = []
config = Config()
config.ParseArgs(args)
if not config.Validate():
print __doc__
sys.exit(0)
print config
print "Compiling..."
clr.CompileModules(config.output + ".dll", mainModule = config.main_name, *config.files)
if config.target != System.Reflection.Emit.PEFileKinds.Dll:
GenerateExe(config)
print "Saved to %s" % (config.output, )
if __name__ == "__main__":
Main(sys.argv[1:])
```
#### File: examples/scenarios_compiler/src2cpp.py
```python
from waflib.Task import Task
class src2cpp(Task):
run_str = '${SRC[0].abspath()} ${SRC[1].abspath()} ${TGT}'
color = 'PINK'
from waflib.TaskGen import extension
@extension('.src')
def process_src(self, node):
tg = self.bld.get_tgen_by_name('comp')
comp = tg.link_task.outputs[0]
tsk = self.create_task('src2cpp', [comp, node], node.change_ext('.cpp'))
self.source.extend(tsk.outputs)
```
#### File: playground/exclusive_link/excl.py
```python
from waflib.Utils import threading
from waflib import Task
lock = threading.Lock()
count = 0
MAX = 1
def make_exclusive(cls):
old_runnable_status = cls.runnable_status
def runnable_status(self):
global count, lock, MAX
ret = Task.ASK_LATER
if count >= MAX:
return ret
ret = old_runnable_status(self)
if ret == Task.RUN_ME:
lock.acquire()
count += 1
lock.release()
return ret
cls.runnable_status = runnable_status
old_run = cls.run
def run(self):
global count, lock
try:
ret = old_run(self)
finally:
lock.acquire()
count -= 1
lock.release()
return ret
cls.run = run
for x in 'cprogram cxxprogram cshlib cxxshlib cstlib cxxstlib fcprogram fcshlib fcstlib'.split():
if x in Task.classes:
make_exclusive(Task.classes[x])
```
#### File: waflib/Tools/gcc.py
```python
import os, sys
from waflib import Configure, Options, Utils
from waflib.Tools import ccroot, ar
from waflib.Configure import conf
@conf
def find_gcc(conf):
"""
Find the program gcc, and if present, try to detect its version number
"""
cc = conf.find_program(['gcc', 'cc'], var='CC')
cc = conf.cmd_to_list(cc)
conf.get_cc_version(cc, gcc=True)
conf.env.CC_NAME = 'gcc'
conf.env.CC = cc
@conf
def gcc_common_flags(conf):
"""
Common flags for gcc on nearly all platforms
"""
v = conf.env
v['CC_SRC_F'] = []
v['CC_TGT_F'] = ['-c', '-o']
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = []
v['CCLNK_TGT_F'] = ['-o']
v['CPPPATH_ST'] = '-I%s'
v['DEFINES_ST'] = '-D%s'
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STLIB_ST'] = '-l%s'
v['STLIBPATH_ST'] = '-L%s'
v['RPATH_ST'] = '-Wl,-rpath,%s'
v['SONAME_ST'] = '-Wl,-h,%s'
v['SHLIB_MARKER'] = '-Wl,-Bdynamic'
v['STLIB_MARKER'] = '-Wl,-Bstatic'
# program
v['cprogram_PATTERN'] = '%s'
# shared librar
v['CFLAGS_cshlib'] = ['-fPIC']
v['LINKFLAGS_cshlib'] = ['-shared']
v['cshlib_PATTERN'] = 'lib%s.so'
# static lib
v['LINKFLAGS_cstlib'] = ['-Wl,-Bstatic']
v['cstlib_PATTERN'] = 'lib%s.a'
# osx stuff
v['LINKFLAGS_MACBUNDLE'] = ['-bundle', '-undefined', 'dynamic_lookup']
v['CFLAGS_MACBUNDLE'] = ['-fPIC']
v['macbundle_PATTERN'] = '%s.bundle'
@conf
def gcc_modifier_win32(conf):
"""Configuration flags for executing gcc on Windows"""
v = conf.env
v['cprogram_PATTERN'] = '%s.exe'
v['cshlib_PATTERN'] = '%s.dll'
v['implib_PATTERN'] = 'lib%s.dll.a'
v['IMPLIB_ST'] = '-Wl,--out-implib,%s'
v['CFLAGS_cshlib'] = []
# Auto-import is enabled by default even without this option,
# but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages
# that the linker emits otherwise.
v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import'])
@conf
def gcc_modifier_cygwin(conf):
"""Configuration flags for executing gcc on Cygwin"""
gcc_modifier_win32(conf)
v = conf.env
v['cshlib_PATTERN'] = 'cyg%s.dll'
v.append_value('LINKFLAGS_cshlib', ['-Wl,--enable-auto-image-base'])
v['CFLAGS_cshlib'] = []
@conf
def gcc_modifier_darwin(conf):
"""Configuration flags for executing gcc on MacOS"""
v = conf.env
v['CFLAGS_cshlib'] = ['-fPIC', '-compatibility_version', '1', '-current_version', '1']
v['LINKFLAGS_cshlib'] = ['-dynamiclib']
v['cshlib_PATTERN'] = 'lib%s.dylib'
v['FRAMEWORKPATH_ST'] = '-F%s'
v['FRAMEWORK_ST'] = ['-framework']
v['ARCH_ST'] = ['-arch']
v['LINKFLAGS_cstlib'] = []
v['SHLIB_MARKER'] = []
v['STLIB_MARKER'] = []
v['SONAME_ST'] = []
@conf
def gcc_modifier_aix(conf):
"""Configuration flags for executing gcc on AIX"""
v = conf.env
v['LINKFLAGS_cprogram'] = ['-Wl,-brtl']
v['LINKFLAGS_cshlib'] = ['-shared','-Wl,-brtl,-bexpfull']
v['SHLIB_MARKER'] = []
@conf
def gcc_modifier_hpux(conf):
v = conf.env
v['SHLIB_MARKER'] = []
v['STLIB_MARKER'] = '-Bstatic'
v['CFLAGS_cshlib'] = ['-fPIC','-DPIC']
v['cshlib_PATTERN'] = 'lib%s.sl'
@conf
def gcc_modifier_platform(conf):
"""Execute platform-specific functions based on *gcc_modifier_+NAME*"""
# * set configurations specific for a platform.
# * the destination platform is detected automatically by looking at the macros the compiler predefines,
# and if it's not recognised, it fallbacks to sys.platform.
gcc_modifier_func = getattr(conf, 'gcc_modifier_' + conf.env.DEST_OS, None)
if gcc_modifier_func:
gcc_modifier_func()
def configure(conf):
"""
Configuration for gcc
"""
conf.find_gcc()
conf.find_ar()
conf.gcc_common_flags()
conf.gcc_modifier_platform()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
```
#### File: peach/BasicBlocksPyew/batch_example.py
```python
import os
import sys
import time
import hashlib
from pyew_core import CPyew
def printData(pyew, path, msg):
buf = pyew.getBuffer()
print "File :", path
print "MD5 :", hashlib.md5(buf).hexdigest()
print "SHA1 :", hashlib.sha1(buf).hexdigest()
print "SHA256:", hashlib.sha256(buf).hexdigest()
print "Found :", msg
def checkMebroot(path):
pyew = CPyew(batch=True)
pyew.codeanalysis = True
try:
pyew.loadFile(path)
except:
print "ERROR loading file %s" % path
return
if pyew.format == "PE":
# Get 6 bytes at offset 0xB8
if pyew.getBytes(0xB8, 6) != "Rich;\x2E":
return
printData(pyew, path, "Mebroot downloader")
print
def checkMnemonics(path):
pyew = CPyew(batch=True)
pyew.codeanalysis = True
try:
pyew.loadFile(path)
except:
print "ERROR loading file %s" % path
return
# Is it a PE file?
if pyew.format == "PE":
# The most common x86 mnemonics
commons = ["PUSH", "MOV", "SUB", "ADD", "LEA", "CALL", "JMP", "JZ", "JNZ", \
"OR", "XOR", "NOT", "POP", "AND", "TEST", "JL", "JG", "JE", \
"JLE", "CMP", "LEAVE", "RET", "NOP", "PUSHF", "POPF", "INC", \
"INT 3", "DEC", "PUSHA", "POPA"]
try:
# Get the 30 first mnemonics
mnems = pyew.GetMnems(pyew.ep, 30)
except:
print "ERROR scanning file %s" % path
return
ret = []
for x in mnems:
if x not in commons and x not in ret:
ret.append(x)
if len(ret) > 0:
printData(pyew, path, "Uncommon mnemonics")
print "Mnemonics:", ",".join(ret)
print
# Seek to the entry point
pyew.seek(pyew.ep)
# Hexdump the first 64 bytes at the entry point
print pyew.hexdump(pyew.buf, length=16, bsize=64)
def entryPointCalls(path):
pyew = CPyew(batch=True)
pyew.codeanalysis = True
try:
pyew.loadFile(path)
except KeyboardInterrupt:
print "Abort"
sys.exit(0)
except:
print "ERROR loading file %s" % path
return
if pyew.format != "PE":
return
calls = []
# Get the disassembl of the first 100 lines
l = pyew.disasm(pyew.ep, processor=pyew.processor, type=pyew.type, lines=100, bsize=1600)
for i in l:
mnem = str(i.mnemonic)
# Is it a direct or indirect jump or call?
if mnem == "CALL" or mnem.startswith("J") or mnem.startswith("LOOP"):
operands = str(i.operands).replace("[", "").replace("]", "")
try:
if pyew.imports.has_key(int(operands, 16)):
x = pyew.imports[int(operands, 16)]
if x not in calls:
calls.append(x)
except:
pass
if len(calls) > 0:
printData(pyew, path, "Library calls at Entry Point")
print "Library Calls:", ",".join(calls)
print
def doChecks(path):
# Example usage to check for the Mebroot downloader
checkMebroot(path)
# Example to extract the first (non common) mnemonics from the entry point
checkMnemonics(path)
# Example to print the API calls at the entry point
entryPointCalls(path)
def main(path):
for root, dirs, files in os.walk(path):
for x in files:
filepath = os.path.join(root, x)
print "Analyzing file %s" % filepath
t = time.time()
doChecks(filepath)
print "Time to analyze %f" % (time.time() - t)
def usage():
print "Usage:", sys.argv[0], "<path>"
if __name__ == "__main__":
if len(sys.argv) == 1:
usage()
else:
main(sys.argv[1])
```
#### File: BasicBlocksPyew/Elf/__init__.py
```python
import os
import sys
import struct
import traceback
import zlib
from stat import *
from Elf.elf_lookup import *
verbose = False
class Elf:
"""
An Elf object representation which allows manipulation
and parsing of Elf executables. Brought to you by
kenshoto.
"""
def __init__(self, initstr):
"""
Constructacon: initstr can be a filename, or a big hunka Elf lovin
(If you only give it 52 bytes, it'll just parse the header, if you give it
more, it *will* assume it has the whole thing...
"""
self.sections = []
self.pheaders = []
self.secnames = {}
self.symbols = []
self.symbols_by_name = {}
self.symbols_by_addr = {}
self.e_ident = "NOTHINGHEREATALL"
self.e_type = 0
self.e_machine = 0
self.e_version = 0
self.e_entry = 0
self.e_phoff = 0
self.e_shoff = 0
self.e_flags = 0
self.e_ehsize = 0
self.e_phentsize = 0
self.e_phnum = 0
self.e_shentsize = 0
self.e_shnum = 0
self.e_shstrndx = 0
self.fmt = "2HI3LI6H"
self.hdrlen = struct.calcsize(self.fmt) + 16
self.myname = "unknown"
bytes = initstr
pbase = self.hdrlen
sbase = self.hdrlen
if len(initstr) > 0:
if not '\000' in initstr and os.path.exists(initstr):
bytes = file(initstr, "rb").read()
self.myname = initstr
self.initFromBytes(bytes)
# If we only got the 52 bytes, we have
# no symbols to parse etc...
if len(bytes) == self.hdrlen:
return
if self.e_shoff < self.e_phoff:
raise Exception("ERROR: we only support <elf hdr><pgrm hdrs><data><sec hdrs> format now")
# Load up any program headers we find
if self.e_phoff:
pbase = self.e_phoff
plen = self.e_phentsize
for i in range(self.e_phnum):
if self.bits == 32:
pgm = Elf32Pheader(bytes[pbase:pbase+plen],elf=self)
else:
pgm = Elf64Pheader(bytes[pbase:pbase+plen],elf=self)
self.pheaders.append(pgm)
pbase += plen
# Load up all the section headers
if self.e_shoff:
# Load up the sections
sbase = self.e_shoff
# FIXME this assumes static sized section headers
slen = self.e_shentsize
for i in range(self.e_shnum):
if self.bits == 32:
sec = Elf32Section(bytes[sbase:sbase+slen],elf=self)
else:
sec = Elf64Section(bytes[sbase:sbase+slen],elf=self)
self.sections.append(sec)
sbase += slen
# Populate the section names
strsec = self.sections[self.e_shstrndx]
names = bytes[strsec.sh_offset:strsec.sh_offset+strsec.sh_size]
for sec in self.sections:
name = names[sec.sh_name:].split("\x00")[0]
if len(name) > 0:
sec.setName(name)
self.secnames[name] = sec
self.parseSymbols()
self.parseDynamic()
self.parseRelocs()
def getName(self):
return self.myname
def __str__(self):
""" Calls toString() to obtain a string summary of this ELF. Since no additional parameters make sense, default verbosity for the module is used
"""
return self.toString(verbose)
def toString(self, verbose=False):
""" Returns a string summary of this ELF. If (verbose) the summary will include Symbols, Relocs, Dynamics and Dynamic Symbol tables"""
mystr = "ELF HEADER OBJECT:" + self.myname
mystr+= "\n= Intimate Details:"
mystr+= "\n==Magic:\t\t\t\t" + self.e_ident
mystr+= "\n==Type:\t\t\t\t\t" + e_types.get(self.e_type)
mystr+= "\n==Machine Arch:\t\t\t\t" + e_machine_types.get(self.e_machine)
mystr+= "\n==Version:\t\t\t\t%d" % (self.e_version)
mystr+= "\n==Entry:\t\t\t\t0x%.8x" % (self.e_entry)
mystr+= "\n==Program Headers(offset):\t\t%d (0x%x) bytes" % (self.e_phoff, self.e_phoff)
mystr+= "\n==Section Headers(offset):\t\t%d (0x%x) bytes" % (self.e_shoff, self.e_shoff)
mystr+= "\n==Flags:\t\t\t\t" + repr(self.e_flags) + " "
mystr+= "\n==Elf Header Size:\t\t\t" + repr(self.e_ehsize) + " (" + hex(self.e_ehsize) + " bytes)"
mystr+= "\n==Program Header Size:\t\t\t" + repr(self.e_phentsize) + " (" + hex(self.e_phentsize) + " bytes)"
mystr+= "\n==Program Header Count:\t\t\t" + repr(self.e_phnum) + " (" + hex(self.e_phnum)+ ")"
mystr+= "\n==Section Header Size:\t\t\t" + repr(self.e_shentsize) + " (" + hex(self.e_shentsize) + " bytes)"
mystr+= "\n==Section Header Count:\t\t\t" + repr(self.e_shnum) + " (" + hex(self.e_shnum) + ")"
mystr+= "\n==Section Header String Index\t\t" + repr(self.e_shstrndx) + " (" + hex(self.e_shstrndx) + " bytes)"
mystr+= "\n\n= Sections:"
for sec in self.sections:
mystr+= "\n"+repr(sec)
mystr+= "\n\n= Program Headers:"
for ph in self.pheaders:
mystr+= "\n"+repr(ph)
if (verbose):
mystr+= "\n\n= Symbols table:"
for sym in self.symbols:
mystr+= "\n"+repr(sym)
mystr+= "\n\n= Relocation table:"
for reloc in self.relocs:
mystr+= "\n"+repr(reloc)
mystr+= "\n\n= Dynamics table:"
for dyn in self.dynamics:
mystr+= "\n"+repr(dyn)
mystr+= "\n\n= Dynamic Symbols table:"
for dyn in self.dynamic_symbols:
mystr+= "\n"+repr(dyn)
return mystr
def getStrtabString(self, offset, section=".strtab"):
bytes = self.getSection(section).getBytes()
index = bytes.find("\x00", offset)
return bytes[offset:index]
def initFromBytes(self, bytes):
if len(bytes) < self.hdrlen:
raise Exception("Elf format error: Not even a full Elf header (%d bytes)", self.hdrlen)
if bytes[:4] <> "\x7fELF":
raise Exception("Elf format error: Elf magic not found")
self.e_ident = bytes[:16]
(self.e_type,
self.e_machine,
self.e_version,
self.e_entry,
self.e_phoff,
self.e_shoff,
self.e_flags,
self.e_ehsize,
self.e_phentsize,
self.e_phnum,
self.e_shentsize,
self.e_shnum,
self.e_shstrndx) = struct.unpack(self.fmt, bytes[16:self.hdrlen])
if self.e_machine in e_machine_32:
self.bits = 32
elif self.e_machine in e_machine_64:
self.bits = 64
else:
raise Exception("ERROR - Unknown 32/64 bit e_machine: %d. Add to e_machine_XX" % self.e_machine)
self.data = bytes
def buildHeader(self):
"""
Return the byte representation for *just* the elf header
for this elf.
"""
hdr = struct.pack(self.fmt,
self.e_type,
self.e_machine,
self.e_version,
self.e_entry,
self.e_phoff,
self.e_shoff,
self.e_flags,
self.e_ehsize,
self.e_phentsize,
self.e_phnum,
self.e_shentsize,
self.e_shnum,
self.e_shstrndx)
return self.e_ident + hdr
def serialize(self, filename=None):
"""
If filename is specified, serialize this elf object to the specified
file, otherwise return the bytes (read string) for this elf object
"""
# Get the Elf header
buf = self.buildHeader()
# Get the program headers
#FIXME assumes order
for pgm in self.pheaders:
buf += pgm.serialize()
phlen = self.e_phentsize * self.e_phnum
# Append the actual file data
buf += self.data[self.e_ehsize+phlen:self.e_shoff]
# Append the section headers
for sec in self.sections:
buf += sec.serialize()
if filename:
f = file(filename,'wb')
f.write(buf)
f.close()
return
return buf
def lookupSymbolName(self, name):
"""
Lookup symbol entries in this elf binary by name. The result is
a long representing the address for the given symbol. Or None if
it's not found.
"""
return self.symbols_by_name.get(name, None)
def lookupSymbolAddr(self, address):
"""
lookup symbols from this elf binary by address.
This returns the name for the given symbol or None for not found
"""
return self.symbols_by_addr.get(address, None)
def getBytes(self, offset, size, file_offset=True):
"""
Modification to the bytes this returns will NOT
be saved to the file bytes.
"""
return self.data[offset:offset+size]
def insertBytes(self, offset, bytes,section=None,file_offset=True):
"""
Insert the bytes argument at offset in the data.
The default will insert the bytes at that offset
from the beginning of the file (to ease calculations
that are based on header values). The caller may optionally
specify file_offset=False to have the offset be from
the beginning of self.data. If the inserted data falls
directly on a section boundary,
The optional "section" argument specifies which section
you would like to "own" the data (aka. which one gets his
length updated. If none, the bytes will push other data down
essentially into padding between sections...
THIS CODE DOES NOT WORK YET!
"""
ilen = len(bytes)
if section:
if ( offset < section.sh_offset or
offset > (section.sh_offset+section.sh_size)):
raise Exception("ERROR - Specified section in insertBytes has wrong offset/size: offset: %d" % offset)
section.sh_size += ilen
if file_offset:
offset -= self.getDataOffset()
self.data = self.data[:offset] + bytes + self.data[offset:]
#FIXME deal with program headers...
#for pgm in self.pheaders:
for sec in self.sections:
if offset <= (sec.sh_offset-self.getDataOffset()):
sec.sh_offset += ilen
if sec.sh_offset % sec.sh_addralign:
align = sec.sh_addralign - (sec.sh_offset % sec.sh_addralign)
off = sec.sh_offset - self.getDataOffset()
# Insert the pad bytes if this insert messes up alignment
self.data = self.data[:off] + "\x00" * align + self.data[off:]
sec.sh_offset += align
ilen += align
if offset < self.e_shoff:
self.e_shoff += ilen
print "INSERTED: ",ilen," bytes"
def getDataOffset(self):
return self.hdrlen + (self.e_phentsize * self.e_phnum)
def modifyBytes(self, offset, bytes, file_offset=True):
"""
Arguments are the same as insertBytes() except that
this method will OVERWRITE the bytes at that location
(which shouldn't cause any recalculation)
"""
blen = len(bytes)
if file_offset:
offset -= self.getDataOffset()
self.data = self.data[:offset] + bytes + self.data[offset+blen:]
def appendSection(self, section, name=None):
"""
Append the section to the Elf. The optional
name will be put into the shstrtab...
"""
strtab = self.getSection(".shstrtab")
if not strtab and name:
raise Exception("ERROR - .shstrtab not found (and name specified)")
if name:
section.sh_name = strtab.sh_size
self.insertBytes(strtab.sh_offset+strtab.sh_size, name+"\x00", strtab)
self.secnames[name] = section
section.elf = self
self.sections.append(section)
self.e_shnum += 1
print repr(strtab.getBytes())
def getSection(self, secname):
return self.secnames.get(secname,None)
def getSections(self):
"""
Return the array of sections for this Elf
"""
return list(self.sections)
def getPheaders(self):
"""
Return a list of the program headers for this elf
"""
return list(self.pheaders)
def addSymbol(self, symbol):
self.symbols.append(symbol)
self.symbols_by_name[symbol.getName()] = symbol
self.symbols_by_addr[symbol.st_value] = symbol
def getSymbols(self):
return self.symbols
def parseSymbols(self):
"""
Parse out the symbols that this elf binary has for us.
"""
for sec in self.sections:
if sec.sh_type == SHT_SYMTAB:
symtab = sec.getBytes()
while symtab:
if self.bits == 32:
newsym = Elf32Symbol(symtab)
else:
newsym = Elf64Symbol(symtab)
#FIXME this is dorked!
if newsym.st_name:
name = self.getStrtabString(newsym.st_name, ".strtab")
newsym.setName(name)
self.addSymbol(newsym)
symtab = symtab[len(newsym):]
def parseRelocs(self):
"""
Parse all the relocation entries out of any sections with
sh_type == SHT_REL
"""
self.relocs = []
for sec in self.sections:
if sec.sh_type == SHT_REL:
bytes = sec.getBytes()
while bytes:
if self.bits == 32:
reloc = Elf32Reloc(bytes)
else:
reloc = Elf64Reloc(bytes)
index = reloc.getSymTabIndex()
try:
sym = self.dynamic_symbols[index]
reloc.setName(sym.getName())
except:
traceback.print_exc()
self.relocs.append(reloc)
bytes = bytes[len(reloc):]
elif sec.sh_type == SHT_RELA:
bytes = sec.getBytes()
while bytes:
if self.bits == 32:
reloc = Elf32Reloca(bytes)
else:
print "WARNING: 64bits ELF programs aren't supported yet"
return
index = reloc.getSymTabIndex()
try:
sym = self.dynamic_symbols[index]
reloc.setName(sym.getName())
except:
traceback.print_exc()
self.relocs.append(reloc)
bytes = bytes[len(reloc):]
def parseDynamic(self):
self.dynamic_symbols = []
self.dynamics = []
sec = self.getSection(".dynsym")
if not sec:
return
symtab = sec.getBytes()
while symtab:
if self.bits == 32:
newsym = Elf32Symbol(symtab)
else:
newsym = Elf64Symbol(symtab)
if newsym.st_name:
name = self.getStrtabString(newsym.st_name, ".dynstr")
newsym.setName(name)
self.dynamic_symbols.append(newsym)
symtab = symtab[len(newsym):]
dynsec = self.getSection(".dynamic")
dynbytes = dynsec.getBytes()
while dynbytes:
if self.bits == 32:
dyn = Elf32Dynamic(dynbytes)
else:
dyn = Elf64Dynamic(dynbytes)
if dyn.d_tag in Elf32Dynamic.has_string:
name = self.getStrtabString(dyn.d_value, ".dynstr")
dyn.setName(name)
self.dynamics.append(dyn)
if dyn.d_tag == DT_NULL: # Represents the end
break
dynbytes = dynbytes[len(dyn):]
def getDynamics(self):
return self.dynamics
def getDynSyms(self):
return self.dynamic_symbols
def getRelocs(self):
return self.relocs
class Elf32Dynamic:
has_string = [DT_NEEDED,DT_SONAME]
"""
An object to represent an Elf dynamic entry.
(linker/loader directives)
"""
def __init__(self, bytes=None):
self.name = ""
self.d_tag = 0
self.d_value = 0
if bytes:
self.initFromBytes(bytes)
def __repr__(self):
name = self.getName()
if not name:
name = hex(self.d_value)
return "%s %s" % (name,self.getTypeName())
def initFromBytes(self, bytes):
self.d_tag,self.d_value = struct.unpack("2L", bytes[:len(self)])
def getName(self):
return self.name
def setName(self, name):
self.name = name
def getTypeName(self):
return dt_types.get(self.d_tag,"Unknown: %s"%hex(self.d_tag))
def __len__(self):
return struct.calcsize("2L")
class Elf64Dynamic(Elf32Dynamic):
pass
class Elf32Reloc:
"""
Elf relocation entries consist mostly of "fixup" address which
are taken care of by the loader at runtime. Things like
GOT entries, PLT jmp codes etc all have an Elf relocation
entry.
"""
def __init__(self, bytes=None):
self.name = ""
self.r_offset = 0
self.r_info = 0
if bytes:
self.initFromBytes(bytes)
def __repr__(self):
return "%s %s <%s>" % (hex(self.r_offset),self.getName(),self.getTypeName())
def initFromBytes(self,bytes):
(self.r_offset, self.r_info) = struct.unpack("2L",bytes[:len(self)])
def setName(self, name):
self.name = name
def getName(self):
return self.name
def getType(self):
return self.r_info & 0xff
def getSymTabIndex(self):
return self.r_info >> 8
def getTypeName(self):
return r_types_386.get(self.getType(),"")
def __len__(self):
return struct.calcsize("2L")
class Elf32Reloca(Elf32Reloc):
def __init__(self, bytes=None):
self.r_addend = 0
Elf32Reloc.__init__(self, bytes)
def initFromBytes(self, bytes):
(self.r_offset, self.r_info, self.r_addend) = struct.unpack("3L", bytes[:len(self)])
def __len__(self):
return struct.calcsize("3L")
class Elf64Reloc(Elf32Reloc):
pass
class Elf32Symbol:
"""
An object which represents an Elf Symbol. It has the
following attributes (which are created/parsed by init:
st_name
st_value
st_size
st_info
st_other
st_shndx
"""
def __init__(self, bytes=None):
self.name = ""
self.st_name = 0
self.st_value = 0
self.st_size = 0
self.st_info = 0
self.st_other = 0
self.st_shndx = 0
if bytes:
self.initFromBytes(bytes)
def getInfoType(self):
return self.st_info & 0xf
def getInfoBind(self):
return self.st_info >> 4
def __cmp__(self, other):
if self.st_value > other.st_value:
return 1
return -1
def initFromBytes(self,bytes):
(self.st_name,
self.st_value,
self.st_size,
self.st_info,
self.st_other,
self.st_shndx) = struct.unpack("3L2BH",bytes[:len(self)])
def serialize(self):
return struct.pack("3L2BH",
self.st_name,
self.st_value,
self.st_size,
self.st_info,
self.st_other,
self.st_shndx)
def setName(self,name):
self.name = name
def getName(self):
return self.name
def __repr__(self):
return "0x%.8x %d %s" % (self.st_value, self.st_size, self.name)
def __len__(self):
return struct.calcsize("3L2BH")
class Elf64Symbol(Elf32Symbol):
def initFromBytes(self,bytes):
fmt = "IBBHLL"
(self.st_name,
self.st_info,
self.st_other,
self.st_shndx,
self.st_value,
self.st_size,
) = struct.unpack(fmt,bytes[:len(self)])
def serialize(self):
return struct.pack("IBBHLL",
self.st_name,
self.st_value,
self.st_size,
self.st_info,
self.st_other,
self.st_shndx)
def __len__(self):
return struct.calcsize("IBBHLL")
class Elf32Pheader:
"""
An object to represent ELF_Phdr structures and the segments they represent
"""
def __init__(self, bytes=None, elf=None):
self.elf = elf
self.p_type = 0
self.p_offset = 0
self.p_vaddr = 0
self.p_paddr = 0
self.p_filesz = 0
self.p_memsz = 0
self.p_flags = 0
self.p_align = 0
if bytes:
self.initFromBytes(bytes)
def __repr__(self):
return "[%35s] VMA: 0x%.8x offset: %8d memsize: %8d align: %8d (filesz: %8d) flags: %x" % (
self.getTypeName(),
self.p_vaddr,
self.p_offset,
self.p_memsz,
self.p_align,
self.p_filesz,
self.p_flags)
def getTypeName(self):
return ph_types.get(self.p_type, "Unknown")
def initFromBytes(self, bytes):
(
self.p_type,
self.p_offset,
self.p_vaddr,
self.p_paddr,
self.p_filesz,
self.p_memsz,
self.p_flags,
self.p_align,
) = struct.unpack("8L",bytes[:32])
def serialize(self):
hdr = struct.pack("8L",
self.p_type,
self.p_offset,
self.p_vaddr,
self.p_paddr,
self.p_filesz,
self.p_memsz,
self.p_flags,
self.p_align)
return hdr
def __len__(self):
return struct.calcsize("8L")
class Elf64Pheader(Elf32Pheader):
def initFromBytes(self, bytes):
fmt = "2I6L"
(
self.p_type,
self.p_flags,
self.p_offset,
self.p_vaddr,
self.p_paddr,
self.p_filesz,
self.p_memsz,
self.p_align,
) = struct.unpack(fmt,bytes[:len(self)])
def serialize(self):
fmt = "2I6L"
hdr = struct.pack(fmt,
self.p_type,
self.p_flags,
self.p_offset,
self.p_vaddr,
self.p_paddr,
self.p_filesz,
self.p_memsz,
self.p_align)
return hdr
def __len__(self):
return struct.calcsize("2I6L")
class Elf32Section:
"""
An object to represent the elf sections in the Elf binary. Constructor
takes a string representing the contents of the Elf section.
self.sh_name
self.sh_type
self.sh_flags
self.sh_addr
self.sh_offset
self.sh_size
self.sh_link
self.sh_info
self.sh_addralign
self.sh_entsize
"""
def __init__(self, initbytes=None, elf=None):
self.elf = elf
self.name = ""
self.bytes = "" # The actual data section
self.sh_name = 0 # Section name index
self.sh_type = 0
self.sh_flags = 0
self.sh_addr = 0
self.sh_offset = 0
self.sh_size = 0
self.sh_link = 0
self.sh_info = 0
self.sh_addralign = 0
self.sh_entsize = 0
self.extrajunx = "" # Stuff held in extended section headers
if initbytes:
self.initFromBytes(initbytes)
def __repr__(self):
return "Elf Section: [%20s] VMA: 0x%.8x offset: %8d ent/size: %8d/%8d align: %8d" % (
self.name,
self.sh_addr,
self.sh_offset,
self.sh_entsize,
self.sh_size,
self.sh_addralign)
def getPadSize(self, offset):
"""
Calculate the pad necissary for this section
based on the file offset given as an arg
"""
ret = 0
myalign = self.sh_addralign
if myalign > 1:
mymod = offset % myalign
if mymod:
ret = myalign-mymod
return ret
def initFromBytes(self, bytes):
(
self.sh_name,
self.sh_type,
self.sh_flags,
self.sh_addr,
self.sh_offset,
self.sh_size,
self.sh_link,
self.sh_info,
self.sh_addralign,
self.sh_entsize,
) = struct.unpack("10L", bytes[:40])
if len(bytes) > 40:
self.extrajunx = bytes[40:]
def serialize(self):
hdr = struct.pack("10L",
self.sh_name,
self.sh_type,
self.sh_flags,
self.sh_addr,
self.sh_offset,
self.sh_size,
self.sh_link,
self.sh_info,
self.sh_addralign,
self.sh_entsize)
return hdr + self.extrajunx
def getBytes(self):
"""
Get the bytes described by this section. Changes
to these bytes will NOT be changed in the Elf file data!
"""
if self.elf:
if self.sh_type == SHT_NOBITS:
return "\x00" * self.sh_size
return self.elf.getBytes(self.sh_offset,self.sh_size)
else:
raise Exception("ERROR - Section.getBytes() called when section has no elf!")
def getUncompressed(self):
"""
Get the bytes described by this section. If sh_entsize != sh_size, run uncompress before returning
"""
if self.elf:
if (self.sh_entsize > 0 and self.sh_size != self.sh_entsize):
return zlib.decompress(self.elf.getBytes(self.sh_offset,self.sh_size))
return self.elf.getBytes(self.sh_offset,self.sh_size)
else:
raise Exception("ERROR - Section.getBytes() called when section has no elf!")
def setName(self, name):
"""
The name of a section is not going to be known until
the sections have been parsed (to know which one is the
strtab)
"""
self.name = name
def getName(self):
return self.name
def __len__(self):
return struct.calcsize("10L")
class Elf64Section(Elf32Section):
"""
Elf binary section on 64 bit platforms
"""
def initFromBytes(self, bytes):
fmt = "2I4L2I2L"
(
self.sh_name,
self.sh_type,
self.sh_flags,
self.sh_addr,
self.sh_offset,
self.sh_size,
self.sh_link,
self.sh_info,
self.sh_addralign,
self.sh_entsize,
) = struct.unpack(fmt, bytes[:len(self)])
if len(bytes) > len(self):
self.extrajunx = bytes[len(self):]
def serialize(self):
fmt = "2I4L2I2L"
hdr = struct.pack(fmt,
self.sh_name,
self.sh_type,
self.sh_flags,
self.sh_addr,
self.sh_offset,
self.sh_size,
self.sh_link,
self.sh_info,
self.sh_addralign,
self.sh_entsize)
return hdr + self.extrajunx
def __len__(self):
return struct.calcsize("2I4L2I2L")
def getRelocType(val):
return val & 0xff
def getRelocSymTabIndex(val):
return val >> 8
```
#### File: BasicBlocksPyew/plugins/diagrams.py
```python
import os
import sys
class CNode:
name = None
label = None
def __init__(self, name, label):
self.name = name
self.label = label
class CDotDiagram:
def __init__(self):
self.index = 0
self.identifiers = {}
self.nodes = {}
self.connections = {}
self.antirules = []
self._verbose = False
def addNode(self, node):
if not self.nodes.has_key(node.name):
self.index += 1
self.nodes[node.name] = node.label
self.identifiers[node.name] = self.index
def addConnectedNode(self, node1, node2):
if node1.name == node2.name:
return
if self.connections.has_key(node1.name):
if self.connections[node1.name] == node2.name:
print "Connection ignored (already exists)"
return
if self.connections.has_key(node2.name):
if self.connections[node2.name] == node1.name:
print "Connection ignored (already exists)"
return
self.addNode(node1)
self.addNode(node2)
if not self.connections.has_key(node1.name):
self.connections[node1.name] = [node2.name]
else:
self.connections[node1.name].append(node2.name)
def generateDot(self):
buf = 'digraph G {\n graph [overlap=scale]; node [fontname=Courier]; \n'
if self._verbose:
print "Total of %d node(s)" % len(self.nodes)
for node in self.nodes:
buf += ' a%s [shape=box, label = "%s", color="blue"]\n' % (self.identifiers[node], self.nodes[node])
buf += "\n"
if self._verbose:
print "Total of %d connections(s)" % len(self.connections)
i = 0
for conn in self.connections:
i += 1
if self._verbose:
print "Connections for %s are %d" % (str(conn), len(self.connections[conn]))
total = len(self.connections)
print "Done %d out of %d (%f%%)" % (i, total, (i*100.00/total*1.00))
if i*100.00/total*1.00 >= 101:
break
for x in self.connections[conn]:
parent = self.identifiers[x]
child = self.identifiers[conn]
rule = str(parent) + "-" + str(child)
antirule = str(child) + "-" + str(parent)
if antirule not in self.antirules and rule not in self.antirules:
buf += " a%s -> a%s [style = bold, color=red]\n" % (child, parent)
self.antirules.append(rule)
self.antirules.append(antirule)
else:
pass
#print "antirule"
buf += "}"
return buf
```
#### File: BasicBlocksPyew/plugins/url.py
```python
import re
import sys
import urllib
def toUnicode(buf):
ret = ""
for c in buf:
ret += c + "\x00"
return ret
def urlExtract(pyew, doprint=True):
""" Search URLs in the current document """
urlfinders = [
re.compile("((http|ftp|mailto|telnet|ssh)(s){0,1}\:\/\/[\w|\/|\.|\#|\?|\&|\=|\-|\%]+)+", re.IGNORECASE | re.MULTILINE)
]
moffset = pyew.offset
pyew.offset = 0
pyew.seek(0)
buf = pyew.f.read()
ret = []
for x in urlfinders:
ret += doFind(x, buf)
if doprint and len(ret) > 0:
print "ASCII URLs"
print
for url in ret:
print url
buf = buf.replace("\x00", "")
uniret = []
for x in urlfinders:
uniret += doFind(x, buf)
if doprint and len(uniret) > 0:
i = 0
for url in ret:
if url not in ret:
if i == 0:
print "UNICODE URLs"
print
i += 1
print url
tmp = {}
for x in ret:
tmp[x] = x
ret = tmp.values()
pyew.seek(moffset)
return ret
def doFind(x, buf):
ret = []
for l in x.findall(buf, re.IGNORECASE | re.MULTILINE):
for url in l:
if len(url) > 8 and url not in ret:
ret.append(url)
return ret
def checkUrls(pyew, doprint=True):
""" Check URLs of the current file """
oks = []
urls = urlExtract(pyew, doprint=False)
if len(urls) == 0:
print "***No URLs found"
return
for url in urls:
try:
if doprint:
sys.stdout.write("Checking %s ... " % url)
sys.stdout.flush()
r = urllib.urlopen(url)
if doprint:
sys.stdout.write("OK\n")
sys.stdout.flush()
oks.append(url)
except KeyboardInterrupt:
print "Aborted"
break
except:
sys.stdout.write("DOWN\n")
sys.stdout.flush()
return oks
def checkBad(pyew, doprint=True):
""" Check for known bad URLs """
returls = []
url = "http://www.malware.com.br/cgi/submit?action=list_adblock"
try:
l = urllib.urlopen(url).readlines()
except:
print "***Error fetching URL list from www.malware.com.br:", sys.exc_info()[1]
return
urls = urlExtract(pyew, doprint=False)
if len(urls) == 0:
print "***No URLs found"
return
for url in urls:
for badurl in l:
if badurl.startswith("["):
continue
badurl = badurl.strip("\n").strip("\r")
if url.lower().find(badurl) > -1:
if doprint:
print "***Found bad URL: %s" % url
returls.append(url)
break
return returls
functions = {"url":urlExtract, "chkurl":checkUrls, "chkbad":checkBad}
```
#### File: BasicBlocksPyew/plugins/vmdetect.py
```python
import sys
def antivmSearch(pyew):
""" Search for common antivm tricks"""
tricks = {
"Red Pill":"\x0f\x01\x0d\x00\x00\x00\x00\xc3",
"VirtualPc trick":"\x0f\x3f\x07\x0b",
"VMware trick":"VMXh",
"VMCheck.dll":"\x45\xC7\x00\x01",
"VMCheck.dll for VirtualPC":"\x0f\x3f\x07\x0b\xc7\x45\xfc\xff\xff\xff\xff",
"Xen":"XenVMM", # Or XenVMMXenVMM
"Bochs & QEmu CPUID Trick":"\x44\x4d\x41\x63",
"Torpig VMM Trick": "\xE8\xED\xFF\xFF\xFF\x25\x00\x00\x00\xFF\x33\xC9\x3D\x00\x00\x00\x80\x0F\x95\xC1\x8B\xC1\xC3",
"Torpig (UPX) VMM Trick": "\x51\x51\x0F\x01\x27\x00\xC1\xFB\xB5\xD5\x35\x02\xE2\xC3\xD1\x66\x25\x32\xBD\x83\x7F\xB7\x4E\x3D\x06\x80\x0F\x95\xC1\x8B\xC1\xC3"
}
buf = pyew.getBuffer()
for trick in tricks:
pos = buf.find(tricks[trick])
if pos > -1:
print "HINT[0x%x]: Found %s" % (pos, trick)
print
size = len(tricks[trick])
print pyew.disassemble(buf[pos:pos+size], pyew.processor, pyew.type, pyew.lines, pyew.bsize, baseoffset=pos)
functions = {"antivm":antivmSearch}
```
#### File: peach/BasicBlocksPyew/pyew.py
```python
import os
import sys
import code
import pprint
import sqlite3
import StringIO
from binascii import unhexlify
from hashlib import md5, sha1, sha224, sha256, sha384, sha512, new as hashlib_new
from config import PLUGINS_PATH, DATABASE_PATH
try:
import psyco
psyco.log()
psyco.full()
except ImportError:
pass
try:
import readline
histfile = os.path.join(os.environ["HOME"], ".pyew")
try:
readline.read_history_file(histfile)
except IOError:
pass
import atexit
atexit.register(readline.write_history_file, histfile)
except:
pass
try:
import pefile
hasPefile = True
except ImportError:
hasPefile = False
try:
from Elf import Elf
hasElf = True
except ImportError:
hasElf = False
from pyew_core import CPyew
PROGRAM="PYEW! A Python tool like radare or *iew"
VERSION=0x01020000
HUMAN_VERSION="1.2.0.0"
def showHelp(pyew):
print PROGRAM, "0x%x" % VERSION, "(%s)" % HUMAN_VERSION
print
print "Commands:"
print
print "?/help Show this help"
print "x/dump/hexdump Show hexadecimal dump"
print "s/seek Seek to a new offset"
print "b Return to previous offset"
print "g/G Goto BOF (g) or EOF (G)"
print "+/- Go forward/backward one block (specified by pyew.bsize)"
print "c/d/dis/pd Show disassembly"
print "a Do code analysis"
print "r/repr Show string representation"
print "ls List scripts available or launch one if used with an argument"
print "p Print the buffer"
print "buf Print as a python buffer"
print "byte Print as a C byte array"
print "/x expr Search hexadecimal string"
print "/s expr Search strings"
print "/i expr Search string ignoring case"
print "/r expr Search regular expression"
print "/u expr Search unicode expression"
print "/U expr Search unicode expression ignoring case"
print "edit Reopen the file for reading and writting"
print "wx data Write hexadecimal data to file"
print "wa data Write ASCII data to file"
print "file Load as new file the buffer from the current offset"
print "ret Return to the original file (use after 'file')"
print "interact Open an interactive Python console"
print
print "Cryptographic functions: md5, sha1, sha224, sha256, sha384, sha512"
print
print "Examples:"
print "[0x0]> md5"
print "md5: d37b6d42a04cbc04cb2988ed947a5b0d"
print "[0x0]> md5(pyew.buf[0:7])"
print "581fd4acfc2214aa246f0b47c8ae8a4e"
print "[0x0]> md5(pyew.buf[15:35])"
print "a73b2882dd918070c6e8dfd9081fb600"
print
if pyew.pe:
print "PE specific commands:"
print
print "imports Show the import table"
print "exports Show the export table (if any)"
print
print "Current configuration options:"
print
pyew.showSettings()
print
print "Any other expression will be evaled as a Python expression"
print
def createSchema(db):
try:
sql = """create table samples (id integer not null primary key,
md5, sha1, sha256, filename, type)"""
db.execute(sql)
sql = """create table function_stats (
id integer not null primary key,
sample_id, addr, nodes, edges, cc)"""
db.execute(sql)
sql = """create table antidebugs (
id integer not null primary key,
sample_id, addr, mnemonic
)"""
db.execute(sql)
except:
pass
def saveSample(db, pyew, buf, amd5):
try:
asha1 = sha1(buf).hexdigest()
asha256 = sha256(buf).hexdigest()
name = pyew.filename
format = pyew.format
cur = db.cursor()
sql = """ insert into samples (md5, sha1, sha256, filename, type)
values (?, ?, ?, ?, ?)"""
cur.execute(sql, (amd5, asha1, asha256, name, format))
rid = cur.lastrowid
sql = """ insert into function_stats (sample_id, addr, nodes, edges, cc)
values (?, ?, ?, ?, ?) """
for f in pyew.function_stats:
addr = "0x%08x" % f
nodes, edges, cc = pyew.function_stats[f]
cur.execute(sql, (rid, addr, nodes, edges, cc))
sql = """ insert into antidebugs (sample_id, addr, mnemonic) values (?, ?, ?) """
for antidbg in pyew.antidebug:
addr, mnem = antidbg
addr = "0x%08x" % addr
cur.execute(sql, (rid, addr, mnem))
db.commit()
except:
print sys.exc_info()[1]
pass
def saveAndCompareInDatabase(pyew):
db = sqlite3.connect(DATABASE_PATH)
createSchema(db)
cur = db.cursor()
bcontinue = True
try:
buf = pyew.getBuffer()
amd5 = md5(buf).hexdigest()
name = pyew.filename
sql = """ select * from samples where md5 = ? """
cur.execute(sql, (amd5, ))
for row in cur.fetchall():
if row[4] != name:
print "NOTICE: File was previously analyzed (%s)" % row[4]
print
bcontinue = False
cur.close()
if bcontinue:
saveSample(db, pyew, buf, amd5)
except:
print sys.exc_info()[1]
raise
def setupAutoCompletion(pyew):
# Settings
commands = {"pyew": pyew}
# Plugins
for plugin in pyew.plugins:
commands[plugin.ljust(8)] = 0
# Crypto
cryptos = ["md5", "sha1", "sha224", "sha256", "sha384", "sha512"]
for crypto in cryptos:
commands[crypto] = 0
try:
import rlcompleter
readline.set_completer(rlcompleter.Completer(commands).complete)
readline.parse_and_bind("tab: complete")
except:
pass
def main(filename):
pyew = CPyew()
if os.getenv("PYEW_DEBUG"):
pyew.debug=True
else:
pyew.debug = False
pyew.loadFile(filename, "rb")
if pyew.format in ["PE", "ELF"]:
saveAndCompareInDatabase(pyew)
pyew.offset = 0
print pyew.hexdump(pyew.buf, pyew.hexcolumns)
oldpyew = None
cmd = ""
last_cmd = ""
pyew.previousoffset = []
# Add global object's references for easier usage
pe = pyew.pe
elf = pyew.elf
# Set AutoCompletion
setupAutoCompletion(pyew)
# Check if there is runme.py file
if os.path.exists('runme.py'):
f = open('runme.py', 'r')
commands = f.readlines()
f.close()
while 1:
try:
last_cmd = cmd
if len(pyew.previousoffset) > 0:
if pyew.previousoffset[len(pyew.previousoffset)-1] != pyew.offset:
pyew.previousoffset.append(pyew.offset)
else:
pyew.previousoffset.append(pyew.offset)
va = None
if pyew.virtual:
va = pyew.getVirtualAddressFromOffset(pyew.offset)
if va:
prompt = "[0x%08x:0x%08x]> " % (pyew.offset, va)
else:
prompt = "[0x%08x]> " % pyew.offset
try:
cmd = commands[0].rstrip()
commands.pop(0)
except:
cmd = raw_input(prompt)
if cmd in ["", "b"] and (last_cmd in ["b", "x", "c", "d", "dump", "hexdump", "dis", "pd", "p", "r", "buf"] or last_cmd.isdigit()):
if cmd == "b":
tmp = pyew.previousoffset.pop()
if len(pyew.previousoffset) > 0:
tmp = pyew.previousoffset[len(pyew.previousoffset)-1]
else:
tmp = 0
pyew.offset = tmp
pyew.lastasmoffset = tmp
pyew.seek(tmp)
if last_cmd.isdigit():
last_cmd = "c"
elif cmd == "b" and last_cmd == "b":
if len(pyew.previousoffset) < 2:
continue
tmp = pyew.previousoffset.pop()
tmp = pyew.previousoffset[len(pyew.previousoffset)-1]
pyew.seek(tmp)
continue
elif last_cmd in ["c", "d", "pd"] or last_cmd.isdigit():
pyew.offset = pyew.lastasmoffset
pyew.seek(pyew.offset)
if last_cmd.isdigit():
last_cmd = "c"
else:
pyew.offset = pyew.offset+pyew.bsize
pyew.seek(pyew.offset)
cmd = last_cmd
except EOFError:
break
except KeyboardInterrupt:
break
try:
if cmd.strip(" ") == "":
continue
if cmd.lower() in ["exit", "quit", "q"]:
break
elif cmd.lower() in ["a", "anal"]:
pyew.findFunctions(pyew.processor)
print
elif cmd.lower() in ["x", "dump", "hexdump"]:
print pyew.hexdump(pyew.buf, pyew.hexcolumns, baseoffset=pyew.offset)
elif cmd.split(" ")[0] in ["s", "seek"]:
data = cmd.split(" ")
if len(data) > 1:
if data[1].lower() in ["ep", "entrypoint"]:
if pyew.ep:
pyew.offset = pyew.ep
else:
pyew.names.has_key(data[1].lower())
if data[1].lower()[0] in ["+", "-"]:
pyew.offset += int(data[1])
elif data[1].lower().startswith("0x"):
pyew.offset = int(data[1], 16)
elif data[1] in pyew.names.values():
for x in pyew.names:
if pyew.names[x] == data[1]:
pyew.offset = x
break
else:
pyew.offset = int(data[1])
pyew.seek(pyew.offset)
elif cmd.lower().split(" ")[0] in ["c", "d", "dis", "pd"]:
data = cmd.lower().split(" ")
if len(data) > 1:
if not data[1].startswith("/"):
type = int(data[1])
dis = pyew.disassemble(pyew.buf, pyew.processor, pyew.type, pyew.lines, pyew.bsize, baseoffset=pyew.offset)
print dis
else:
cmd = data[1:]
if len(cmd) > 1:
ret = pyew.dosearch(pyew.f, cmd[0][1:2], cmd[1], cols=60, doprint=False, offset=pyew.offset)
else:
ret = pyew.dosearch(pyew.f, cmd[0][1:2], "", cols=60, doprint=False, offset=pyew.offset)
for x in ret:
dis = pyew.disassemble(x.values()[0], pyew.processor, pyew.type, pyew.lines, pyew.bsize, baseoffset=x.keys()[0])
print dis
else:
dis = pyew.disassemble(pyew.buf, pyew.processor, pyew.type, pyew.lines, pyew.bsize, baseoffset=pyew.offset)
print dis
elif cmd.isdigit() and int(cmd) < len(pyew.calls)+1 and int(cmd) > 0:
pyew.offset = pyew.calls[int(cmd)-1]
pyew.seek(pyew.offset)
dis = pyew.disassemble(pyew.buf, pyew.processor, pyew.type, pyew.lines, pyew.bsize, baseoffset=pyew.offset)
print dis
elif cmd == "buf":
lines = 0
line = ""
for c in pyew.buf:
line += c
if len(line) == pyew.hexcolumns:
print repr(line)
line = ""
if line != "":
print repr(line)
elif cmd == "byte":
lines = 0
line = ""
for c in pyew.buf:
line += "0x%x, " % ord(c)
if len(line) >= pyew.hexcolumns / (1.00/4.00):
print line
line = ""
if line != "":
print "%s" % line
elif cmd.lower().split(" ")[0] in ["r", "repr"]:
print repr(pyew.buf)
elif cmd.lower().split(" ")[0] in ["p"]:
print pyew.buf
elif cmd.lower() in ["settings", "options"]:
pyew.showSettings()
elif cmd.startswith("/"):
ret = pyew.dosearch(pyew.f, cmd[1:2], cmd[3:], cols=60, offset=pyew.offset)
elif cmd.lower() in ["?", "help"]:
showHelp(pyew)
elif cmd.lower() in ["imports"]:
if pyew.format == "PE":
for entry in pyew.pe.DIRECTORY_ENTRY_IMPORT:
print entry.dll
for imp in entry.imports:
print '\t', hex(imp.address), imp.name
elif pyew.format == "ELF":
for x in pyew.elf.relocs:
print x
elif cmd.lower() in ["exports"]:
if pyew.format == "PE":
for exp in pyew.pe.DIRECTORY_ENTRY_EXPORT.symbols:
print hex(pyew.pe.OPTIONAL_HEADER.ImageBase + exp.address), exp.name, exp.ordinal
elif pyew.format == "ELF":
print "Not yet implemented"
elif cmd.lower() in ["sections"]:
if pyew.format == "PE":
for x in pyew.pe.sections:
print x
elif pyew.format == "ELF":
for x in pyew.elf.secnames:
print pyew.elf.secnames[x]
elif cmd.lower() in ["elf", "pe"]:
if cmd.lower() == "elf":
print pyew.elf
else:
print pyew.pe
elif cmd.lower() == "g":
if cmd == "g":
pyew.offset = 0
else:
pyew.offset = pyew.maxsize - pyew.bsize
if pyew.offset < 0:
pyew.offset = pyew.maxsize - 32
pyew.seek(pyew.offset)
elif cmd in ["-", "+"]:
if cmd == "+":
pyew.offset += pyew.bsize
else:
pyew.offset -= pyew.bsize
pyew.seek(pyew.offset)
elif pyew.plugins.has_key(cmd.split(" ")[0]):
plg = cmd.split(" ")
if len(plg) == 1:
pyew.plugins[plg[0]](pyew)
else:
pyew.plugins[plg[0]](pyew, plg[1:])
elif cmd.lower().split(" ")[0] in ["md5", "sha1", "sha224", "sha256", "sha384", "sha512"]:
func = eval(cmd)
print "%s: %s" % (cmd, func(pyew.getBuffer()).hexdigest())
elif cmd.startswith("!"):
os.system(cmd[1:])
elif cmd == "ret" and oldpyew is not None:
pyew = oldpyew
pyew.seek(pyew.offset)
oldpyew = None
elif cmd == "file":
oldpyew = pyew
del pyew
pyew = CPyew()
buf = oldpyew.getBytes(oldpyew.offset, oldpyew.maxsize)
pyew.loadFromBuffer(buf, oldpyew.filename + "[embed]")
elif cmd == "interact":
code.interact(local=locals())
elif cmd == "edit":
pyew.f.close()
pyew.f = open(filename, "r+wb")
pyew.seek(0)
elif cmd.split(" ")[0] in ["ls"]:
data = cmd.split(" ")
if len(data) == 2:
#print "parsing script file:", data[1]
f = open('scripts/' + data[1], 'r')
commands = f.readlines()
f.close()
else:
scripts = os.listdir('scripts/')
print "Scripts available:"
for script in scripts:
print "\t", script
elif cmd.split(" ")[0] in ["wx", "wa"]:
if cmd.split(" ")[0] == "wx":
data = unhexlify(cmd.split(" ")[1])
else:
data = cmd.split(" ")[1]
pyew.f.seek(pyew.offset)
pyew.f.write(data)
pyew.seek(pyew.offset)
else:
if cmd.find("=") > -1 or cmd.startswith("print") or cmd.startswith("import "):
exec(cmd)
else:
x = eval(cmd)
if "hexdigest" in dir(x):
print "%s: %s" % (cmd, x.hexdigest())
else:
pprint.pprint(x)
except:
print "Error:", sys.exc_info()[1]
if pyew.debug:
raise
def mainBatch(directory):
pass
def usage():
print "%s Version 0x%08x (%s)" % (PROGRAM, VERSION, HUMAN_VERSION)
print
print "Usage:", sys.argv[0], "#"
if __name__ == "__main__":
if len(sys.argv) == 1:
usage()
else:
main(sys.argv[1])
```
#### File: peach/BasicBlocksPyew/safer_pickle.py
```python
import sys
import pickle
import StringIO
class SafeUnpickler(pickle.Unpickler):
PICKLE_SAFE = {
"copy_reg": set(['_reconstructor']),
"__builtin__": set(['object']),
"pyew_core":set(["CPyew", "CDisObj"]),
"anal.x86analyzer":set(["CX86Function", "CX86BasicBlock"]),
"_ctypes":["_unpickle"],
"pydistorm":["_WString"],
"Elf":["Elf", "Elf64Dynamic", "Elf32Dynamic", "Elf64Section", "Elf32Section",
"Elf64Pheader", "Elf32Pheader", "Elf64Symbol", "Elf32Symbol", "Elf64Reloca"],
"pefile":["PE", "Structure", "SectionStructure", "ImportDescData", "ImportData",
"ResourceDirData", "ResourceDirEntryData", "ResourceDataEntryData"],
}
def find_class(self, module, name):
if not module in self.PICKLE_SAFE:
raise pickle.UnpicklingError(
'Attempting to unpickle unsafe module %s' % module
)
__import__(module)
mod = sys.modules[module]
if not name in self.PICKLE_SAFE[module]:
raise pickle.UnpicklingError(
'Attempting to unpickle unsafe class %s of module %s' % (name, module)
)
klass = getattr(mod, name)
return klass
@classmethod
def loads(cls, pickle_string):
return cls(StringIO.StringIO(pickle_string)).load()
```
#### File: build/config/linux.py
```python
import os.path
from waflib import Utils
from waflib.TaskGen import feature
host_plat = [ 'linux' ]
archs = [ 'x86', 'x86_64' ]
tools = [
'gcc',
'gxx',
'cs',
'resx',
'misc',
'tools.utils',
'tools.externals',
'tools.test',
'tools.version',
'tools.xcompile',
'tools.mdoc',
]
def prepare(conf):
root = conf.path.abspath()
env = conf.env
j = os.path.join
env['MCS'] = 'dmcs'
env['CC'] = 'gcc'
env['CXX'] = 'g++'
if os.environ['ARCH'] != 'aarch64':
env['ARCH'] = ['-m%s' % ('64' in env.SUBARCH and '64' or '32')]
env['ARCH_ST'] = env['ARCH']
pin_root = env['PIN_ROOT'] or j(root, '3rdParty', 'pin')
pin = j(pin_root, 'pin-2.12-54730-gcc.4.4.7-linux')
env['EXTERNALS_x86'] = {
'pin' : {
'INCLUDES' : [
j(pin, 'source', 'include'),
j(pin, 'source', 'include', 'gen'),
j(pin, 'extras', 'components', 'include'),
j(pin, 'extras', 'xed2-ia32', 'include'),
],
'HEADERS' : [],
'STLIBPATH' : [
j(pin, 'ia32', 'lib'),
j(pin, 'ia32', 'lib-ext'),
j(pin, 'extras', 'xed2-ia32', 'lib'),
],
'STLIB' : [ 'dwarf', 'elf', 'pin', 'xed' ],
'DEFINES' : [ 'BIGARRAY_MULTIPLIER=1', 'TARGET_LINUX', 'TARGET_IA32', 'HOST_IA32', 'USING_XED', ],
'CFLAGS' : [],
'CXXFLAGS' : [],
'LINKFLAGS' : [],
},
}
env['EXTERNALS_x86_64'] = {
'pin' : {
'INCLUDES' : [
j(pin, 'source', 'include'),
j(pin, 'source', 'include', 'gen'),
j(pin, 'extras', 'components', 'include'),
j(pin, 'extras', 'xed2-intel64', 'include'),
],
'HEADERS' : [],
'STLIBPATH' : [
j(pin, 'intel64', 'lib'),
j(pin, 'intel64', 'lib-ext'),
j(pin, 'extras', 'xed2-intel64', 'lib'),
],
'STLIB' : [ 'dwarf', 'elf', 'xed' ],
'DEFINES' : [ 'BIGARRAY_MULTIPLIER=1', 'TARGET_LINUX', 'TARGET_IA32E', 'HOST_IA32E', 'USING_XED', ],
'CFLAGS' : [],
'CXXFLAGS' : [],
'LINKFLAGS' : [],
},
}
env['EXTERNALS'] = env['EXTERNALS_%s' % env.SUBARCH]
env.append_value('supported_features', [
'linux',
'c',
'cstlib',
'cshlib',
'cprogram',
'cxx',
'cxxstlib',
'cxxshlib',
'cxxprogram',
'fake_lib',
'cs',
'test',
'debug',
'release',
'emit',
'vnum',
'subst',
'network',
])
def configure(conf):
env = conf.env
env['IS_MONO'] = 'True'
env.append_value('CSFLAGS', [
'/warn:4',
'/define:PEACH,UNIX,MONO',
'/nowarn:1591' # Missing XML comment for publicly visible type
])
env.append_value('CSFLAGS_debug', [
'/define:DEBUG,TRACE,MONO',
])
env.append_value('CSFLAGS_release', [
'/define:TRACE,MONO',
'/optimize+',
])
env['CSPLATFORM'] = 'anycpu'
env['CSDOC'] = True
env.append_value('DEFINES_debug', [
'DEBUG',
])
cppflags = [
'-pipe',
'-Werror',
'-Wno-unused',
]
cppflags_debug = [
'-ggdb',
]
cppflags_release = [
'-O3',
]
env.append_value('CPPFLAGS', cppflags)
env.append_value('CPPFLAGS_debug', cppflags_debug)
env.append_value('CPPFLAGS_release', cppflags_release)
env.append_value('LIB', [ 'dl' ])
env['VARIANTS'] = [ 'debug', 'release' ]
def debug(env):
env.CSDEBUG = 'full'
def release(env):
env.CSDEBUG = 'pdbonly'
```
#### File: build/tools/msbuild.py
```python
from waflib import Utils, Task, Options, Logs, Errors
from waflib.TaskGen import before_method, after_method, feature
from waflib.Tools import ccroot
import os.path
ccroot.USELIB_VARS['msbuild'] = set(['CSFLAGS'])
def configure(conf):
conf.find_program('msbuild')
msbuild_fmt = '''<?xml version="1.0" encoding="utf-8"?>
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003" DefaultTargets="Build" ToolsVersion="4.0">
<PropertyGroup>
{PROPERTIES}
</PropertyGroup>
{SOURCES}
<ItemGroup>
{REFERENCES}
{REF_HINTS}
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
</Project>
'''
# Compile, EmbeddedResource, Page, Resource
src_fmt = ''' <ItemGroup>
<{1} Include="{0}">
<Link>{2}</Link>
</{1}>
</ItemGroup>'''
ref_fmt = ''' <Reference Include="{0}">
<HintPath>{1}</HintPath>
</Reference>'''
use_fmt = ''' <Reference Include="{0}"/>'''
cfg_fmt = ''' <{0}>{1}</{0}>'''
def get_source_type(name):
if name.endswith('.cs'):
return 'Compile'
if name.endswith('.xaml'):
return 'Page'
return 'EmbeddedResource'
def get_link_path(self, node):
if node.is_src():
return str(node.path_from(self.path))
else:
return str(node.path_from(self.path.get_bld()))
@feature('msbuild')
@before_method('process_source')
def apply_msbuild(self):
bintype = getattr(self, 'bintype', self.gen.endswith('.dll') and 'library' or 'exe')
asm = self.path.find_or_declare(self.gen)
cfg = {}
cfg['OutputType'] = bintype
cfg['AssemblyName'] = os.path.splitext(self.gen)[0]
cfg['RootNamespace'] = getattr(self, 'namespace', cfg['AssemblyName'])
cfg['TargetFrameworkVersion'] = 'v4.0'
cfg['PlatformTarget'] = getattr(self, 'platform', 'anycpu')
cfg['IntermediateOutputPath'] = 'obj'
cfg['OutputPath'] = self.path.get_bld().abspath()
cfg['UseCommonOutputDirectory'] = 'true'
cfg['WarningLevel'] = '4'
self.gen_task = self.create_task('genproj', [], asm.change_ext('.proj'))
self.cs_task = self.create_task('msbuild', self.gen_task.outputs, asm)
main = self.to_nodes(getattr(self, 'main', []))
source = self.to_nodes(getattr(self, 'source', []))
resource = self.to_nodes(getattr(self, 'resource', []))
icon = self.to_nodes(getattr(self, 'icon', []))
srcs = []
for x in main:
srcs.append( (x.abspath(), 'ApplicationDefinition', get_link_path(self, x)) )
if x in source:
source.remove(x)
for x in source:
srcs.append( (x.abspath(), get_source_type(x.name), get_link_path(self, x)) )
for x in resource:
srcs.append( (x.abspath(), 'Resource', get_link_path(self, x)) )
if icon:
cfg['ApplicationIcon'] = icon[0].abspath()
self.gen_task.env.MSBUILD_FMT = msbuild_fmt
self.gen_task.env.MSBUILD_CFG = cfg
self.gen_task.env.MSBUILD_SRC = srcs
self.gen_task.env.MSBUILD_REF = []
self.gen_task.env.MSBUILD_USE = []
self.cs_task.dep_nodes.extend(main + source + resource + icon)
self.source = []
x.y = 1
inst_to = getattr(self, 'install_path', bintype=='exe' and '${BINDIR}' or '${LIBDIR}')
if inst_to:
# note: we are making a copy, so the files added to cs_task.outputs won't be installed automatically
mod = getattr(self, 'chmod', bintype=='exe' and Utils.O755 or Utils.O644)
self.install_task = self.bld.install_files(inst_to, self.cs_task.outputs[:], env=self.env, chmod=mod)
# if this is an exe, look for app.config and install to ${BINDIR}
if 'exe' in bintype:
cfg = self.path.find_or_declare('app.config')
self.bld.install_as('%s/%s.config' % (inst_to, self.gen), cfg, env=self.env, chmod=Utils.O755)
@feature('msbuild')
@after_method('propagate_uselib_vars')
def uselib_msbuild(self):
ccroot.propagate_uselib_vars(self)
flags = self.env.CSFLAGS
defs = ','.join( f[8:] for f in flags if '/define:' in f)
self.gen_task.env.MSBUILD_CFG['Optimize'] = '/optimize+' in flags and 'true' or 'false'
self.gen_task.env.MSBUILD_CFG['DefineConstants'] = defs
@feature('msbuild')
@after_method('apply_msbuild')
def use_msbuild(self):
names = self.to_list(getattr(self, 'use', []))
get = self.bld.get_tgen_by_name
for x in names:
try:
y = get(x)
except Errors.WafError:
self.gen_task.env.append_value('MSBUILD_USE', os.path.splitext(x)[0])
continue
y.post()
tsk = getattr(y, 'cs_task', None) or getattr(y, 'link_task', None)
if not tsk:
self.bld.fatal('cs task has no link task for use %r' % self)
self.cs_task.dep_nodes.extend(tsk.outputs) # dependency
self.cs_task.set_run_after(tsk) # order (redundant, the order is infered from the nodes inputs/outputs)
f = tsk.outputs[0]
self.gen_task.env.MSBUILD_REF.append( (f.abspath(), os.path.splitext(f.name)[0]) )
@feature('msbuild')
@after_method('apply_msbuild', 'use_msbuild')
def debug_msbuild(self):
csdebug = getattr(self, 'csdebug', self.env.CSDEBUG)
if not csdebug:
return
node = self.cs_task.outputs[0]
if self.env.CS_NAME == 'mono':
out = node.parent.find_or_declare(node.name + '.mdb')
else:
out = node.change_ext('.pdb')
self.cs_task.outputs.append(out)
try:
self.install_task.source.append(out)
except AttributeError:
pass
if csdebug == 'pdbonly':
self.gen_task.env.MSBUILD_CFG['DebugSymbols'] = 'true'
self.gen_task.env.MSBUILD_CFG['DebugType'] = 'pdbonly'
elif csdebug == 'full':
self.gen_task.env.MSBUILD_CFG['DebugSymbols'] = 'true'
self.gen_task.env.MSBUILD_CFG['DebugType'] = 'full'
else:
self.gen_task.env.MSBUILD_CFG['DebugSymbols'] = 'false'
self.gen_task.env.MSBUILD_CFG['DebugType'] = 'none'
@feature('msbuild')
@after_method('apply_msbuild', 'use_msbuild')
def doc_msbuild(self):
csdoc = getattr(self, 'csdoc', self.env.CSDOC)
if not csdoc:
return
bintype = getattr(self, 'bintype', self.gen.endswith('.dll') and 'library' or 'exe')
if bintype != 'library':
return
node = self.cs_task.outputs[0]
out = node.change_ext('.xml')
self.cs_task.outputs.append(out)
try:
self.install_task.source.append(out)
except AttributeError:
pass
self.gen_task.env.MSBUILD_CFG['DocumentationFile'] = out.name
class msbuild(Task.Task):
"""
Run msbuild
"""
color = 'YELLOW'
run_str = '${MSBUILD} ${SRC}'
class genproj(Task.Task):
color = 'PINK'
vars = [ 'MSBUILD_FMT', 'MSBUILD_CFG', 'MSBUILD_SRC', 'MSBUILD_REF', 'MSBUILD_USE' ]
def run(self):
cfg = '\n'.join([ cfg_fmt.format(k, v) for k,v in self.env.MSBUILD_CFG.items()])
src = '\n'.join([ src_fmt.format(n, t, l) for n,t,l in self.env.MSBUILD_SRC])
ref = '\n'.join([ ref_fmt.format(n, p) for p,n in self.env.MSBUILD_REF])
use = '\n'.join([ use_fmt.format(i) for i in self.env.MSBUILD_USE])
fmt = {
'PROPERTIES' : cfg,
'SOURCES' : src,
'REF_HINTS' : ref,
'REFERENCES' : use,
}
txt = self.env.MSBUILD_FMT.format(**fmt)
#print txt
self.outputs[0].write(txt)
```
#### File: build/tools/utils.py
```python
import os.path, re
from waflib.TaskGen import feature, before_method, after_method
from waflib.Configure import conf
from waflib import Utils, Logs, Task, Context, Errors
@feature('*')
@before_method('process_source')
def default_variant(self):
if not self.env.VARIANT:
return
features = set(Utils.to_list(self.features))
available = set(Utils.to_list(self.env.VARIANTS))
intersect = features & available
if not intersect:
features.add(self.env.VARIANT)
self.features = list(features)
@feature('*')
@after_method('process_source')
def install_extras(self):
try:
inst_to = self.install_path
except AttributeError:
inst_to = hasattr(self, 'link_task') and getattr(self.link_task.__class__, 'inst_to', None)
if not inst_to:
if getattr(self, 'install', []):
Logs.warn('\'%s\' has no install path but is supposed to install: %s' % (self.name, self.install))
return
extras = self.to_nodes(getattr(self, 'install', []))
if extras:
self.bld.install_files(inst_to, extras, env=self.env, cwd=self.path, relative_trick=True, chmod=Utils.O644)
@feature('win', 'linux', 'osx', 'debug', 'release', 'com', 'pin', 'network')
def dummy_platform(self):
# prevent warnings about features with unbound methods
pass
@feature('fake_lib')
@after_method('process_lib')
def install_csshlib(self):
if self.link_task.__class__.__name__ != 'fake_csshlib':
return
# install 3rdParty libs into ${LIBDIR}
self.bld.install_files('${LIBDIR}', self.link_task.outputs, chmod=Utils.O755)
# install any .config files into ${LIBDIR}
for lib in self.link_task.outputs:
config = lib.parent.find_resource(lib.name + '.config')
if config:
self.bld.install_files('${LIBDIR}', config, chmod=Utils.O755)
@feature('cs', 'msbuild')
@before_method('apply_cs', 'apply_mbuild')
def cs_helpers(self):
# set self.gen based off self.name since they are usually the same
if not getattr(self, 'gen', None):
setattr(self, 'gen', self.name)
# ensure all binaries get chmod 755
setattr(self, 'chmod', Utils.O755)
# add optional csflags
csflags = getattr(self, 'csflags', [])
if csflags:
self.env.append_value('CSFLAGS', csflags)
# ensure the appropriate platform is being set on the command line
setattr(self, 'platform', self.env.CSPLATFORM)
# ensure install_path is set
if not getattr(self, 'install_path', None):
setattr(self, 'install_path', '${BINDIR}')
@feature('cs')
@after_method('apply_cs')
def cs_resource(self):
base = getattr(self, 'namespace', os.path.splitext(self.gen)[0])
if getattr(self, 'unsafe', False):
self.env.append_value('CSFLAGS', ['/unsafe+'])
keyfile = self.to_nodes(getattr(self, 'keyfile', []))
self.cs_task.dep_nodes.extend(keyfile)
if keyfile:
self.env.append_value('CSFLAGS', '/keyfile:%s' % (keyfile[0].abspath()))
# add external resources to the dependency list and compilation command line
resources = self.to_nodes(getattr(self, 'resource', []))
self.cs_task.dep_nodes.extend(resources)
for x in resources:
rel_path = x.path_from(self.path)
name = rel_path.replace('\\', '.').replace('/', '.')
final = base + '.' + name
self.env.append_value('CSFLAGS', '/resource:%s,%s' % (x.abspath(), final))
# win32 icon support
icon = getattr(self, 'icon', None)
if icon:
node = self.path.find_or_declare(icon)
self.cs_task.dep_nodes.append(node)
self.env.append_value('CSFLAGS', ['/win32icon:%s' % node.path_from(self.bld.bldnode)])
if 'exe' in self.cs_task.env.CSTYPE:
# if this is an exe, require app.config and install to ${BINDIR}
cfg = self.path.find_or_declare('app.config')
else:
# if this is an assembly, app.config is optional
cfg = self.path.find_resource('app.config')
if cfg:
inst_to = getattr(self, 'install_path', '${BINDIR}')
self.bld.install_as('%s/%s.config' % (inst_to, self.gen), cfg, env=self.env, chmod=Utils.O755)
@conf
def clone_env(self, variant):
env = self.all_envs.get(variant, None)
if env is None:
return None
copy = env.derive()
copy.PREFIX = self.env.PREFIX
copy.BINDIR = self.env.BINDIR
copy.LIBDIR = self.env.LIBDIR
copy.DOCDIR = self.env.DOCDIR
return copy
@conf
def ensure_version(self, tool, ver_exp):
ver_exp = Utils.to_list(ver_exp)
env = self.env
environ = dict(self.environ)
environ.update(PATH = ';'.join(env['PATH']))
cmd = self.cmd_to_list(env[tool])
(out,err) = self.cmd_and_log(cmd + ['/help'], env=environ, output=Context.BOTH)
exe = os.path.split(cmd[0])[1].lower()
ver_re = re.compile('.*ersion (\d+\.\d+\.\d+\.\d+)')
m = ver_re.match(out)
if not m:
m = ver_re.match(err)
if not m:
raise Errors.WafError("Could not verify version of %s" % (exe))
ver = m.group(1)
found = False
for v in ver_exp:
found = ver.startswith(v) or found
if not found:
raise Errors.WafError("Requires %s %s but found version %s" % (exe, ver_exp, ver))
@feature('emit')
@before_method('process_rule')
def apply_emit(self):
self.env.EMIT_SOURCE = self.source
self.source = []
self.meths.remove('process_source')
outputs = [ self.path.find_or_declare(self.target) ]
self.create_task('emit', None, outputs)
class emit(Task.Task):
color = 'PINK'
vars = [ 'EMIT_SOURCE' ]
def run(self):
text = self.env['EMIT_SOURCE']
self.outputs[0].write(text)
``` |
{
"source": "JoeyJiao/python-afl",
"score": 2
} |
#### File: python-afl/tests/test_fuzz.py
```python
from __future__ import print_function
import base64
import contextlib
import distutils.version
import glob
import os
import re
import signal
import subprocess as ipc
import sys
import time
import warnings
try:
# Python >= 3.3
from shlex import quote as shell_quote
except ImportError:
# Python << 3.3
from pipes import quote as shell_quote
from .tools import (
SkipTest,
assert_true,
clean_environ,
require_commands,
tempdir,
)
here = os.path.dirname(__file__)
token = base64.b64encode(os.urandom(8))
if not isinstance(token, str):
token = token.decode('ASCII')
def get_afl_version():
require_commands('afl-fuzz')
child = ipc.Popen(['afl-fuzz'], stdout=ipc.PIPE)
version = child.stdout.readline()
child.stdout.close()
child.wait()
if str is not bytes:
version = version.decode('ASCII')
version = re.sub(r'\x1B\[[^m]+m', '', version)
match = re.match(r'^afl-fuzz\s+([0-9.]+)b?\b', version)
version = match.group(1)
return distutils.version.StrictVersion(version)
def sleep(n):
time.sleep(n)
return n
def check_core_pattern():
with open('/proc/sys/kernel/core_pattern', 'rb') as file:
pattern = file.read()
if str is not bytes:
pattern = pattern.decode('ASCII', 'replace')
pattern = pattern.rstrip('\n')
if pattern.startswith('|'):
raise SkipTest('/proc/sys/kernel/core_pattern = ' + pattern)
def _test_fuzz(workdir, target, dumb=False):
require_commands('py-afl-fuzz', 'afl-fuzz')
input_dir = workdir + '/in'
output_dir = workdir + '/out'
os.mkdir(input_dir)
os.mkdir(output_dir)
with open(input_dir + '/in', 'w') as file:
file.write('0')
crash_dir = output_dir + '/crashes'
queue_dir = output_dir + '/queue'
have_crash = False
have_paths = False
n_paths = 0
with open('/dev/null', 'wb') as devnull:
with open(workdir + '/stdout', 'wb') as stdout:
cmdline = ['py-afl-fuzz', '-i', input_dir, '-o', output_dir, '--', sys.executable, target, token]
if dumb:
cmdline[1:1] = ['-n']
print('$ ' + ' '.join(shell_quote(arg) for arg in cmdline))
afl = ipc.Popen(
cmdline,
stdout=stdout,
stdin=devnull,
preexec_fn=clean_environ,
)
try:
timeout = 10
while timeout > 0:
if afl.poll() is not None:
break
have_crash = len(glob.glob(crash_dir + '/id:*')) >= 1
n_paths = len(glob.glob(queue_dir + '/id:*'))
have_paths = (n_paths == 1) if dumb else (n_paths >= 2)
if have_crash and have_paths:
break
timeout -= sleep(0.1)
if afl.returncode is None:
afl.terminate()
afl.wait()
except:
afl.kill()
raise
with open(workdir + '/stdout', 'rb') as file:
stdout = file.read()
if str is not bytes:
stdout = stdout.decode('ASCII', 'replace')
print(stdout)
if not have_crash and '/proc/sys/kernel/core_pattern' in stdout:
check_core_pattern()
assert_true(have_crash, "target program didn't crash")
assert_true(have_paths, 'target program produced {n} distinct paths'.format(n=n_paths))
@contextlib.contextmanager
def stray_process_cleanup():
# afl-fuzz doesn't always kill the target process:
# https://groups.google.com/d/topic/afl-users/E37s4YDti7o
require_commands('ps')
try:
yield
finally:
ps = ipc.Popen(['ps', 'ax'], stdout=ipc.PIPE)
strays = []
for line in ps.stdout:
if not isinstance(line, str):
line = line.decode('ASCII', 'replace')
if token in line:
strays += [line]
if strays:
warnings.warn('stray process{es} left behind:\n{ps}'.format(
es=('' if len(strays) == 1 else 'es'),
ps=''.join(strays)
), category=RuntimeWarning)
for line in strays:
pid = int(line.split()[0])
os.kill(pid, signal.SIGKILL)
ps.wait()
def test_fuzz(dumb=False):
def t(target):
with stray_process_cleanup():
with tempdir() as workdir:
_test_fuzz(
workdir=workdir,
target=os.path.join(here, target),
dumb=dumb,
)
yield t, 'target.py'
yield t, 'target_persistent.py'
def test_fuzz_dumb():
if get_afl_version() < '1.95':
def skip():
raise SkipTest('afl-fuzz >= 1.95b is required')
else:
skip = False
for t in test_fuzz(dumb=True):
yield skip or t
# vim:ts=4 sts=4 sw=4 et
```
#### File: python-afl/tests/test_import.py
```python
import afl
from .tools import (
assert_equal,
)
exports = [
'init',
'loop',
]
deprecated = [
'start',
]
# pylint: disable=exec-used
def wildcard_import(mod):
ns = {}
exec('from {mod} import *'.format(mod=mod), {}, ns)
return ns
def test_wildcard_import():
ns = wildcard_import('afl')
assert_equal(
sorted(ns.keys()),
sorted(exports)
)
def test_dir():
assert_equal(
sorted(o for o in dir(afl) if not o.startswith('_')),
sorted(exports + deprecated)
)
# vim:ts=4 sts=4 sw=4 et
```
#### File: python-afl/tests/tools.py
```python
import contextlib
import functools
import os
import re
import shutil
import subprocess as ipc
import sys
import tempfile
import traceback
import warnings
import nose.tools
from nose import SkipTest
from nose.tools import (
assert_equal,
assert_not_equal,
assert_true,
)
def assert_fail(msg):
assert_true(False, msg=msg) # pylint: disable=redundant-unittest-assert
def noseimport(vmaj, vmin, name=None):
def wrapper(f):
if f.__module__ == 'unittest.case':
return f
if sys.version_info >= (vmaj, vmin):
return getattr(nose.tools, name or f.__name__)
return f
return wrapper
@noseimport(2, 7)
class assert_raises(object):
def __init__(self, exc_type):
self._exc_type = exc_type
self.exception = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
assert_fail('{0} not raised'.format(self._exc_type.__name__))
if not issubclass(exc_type, self._exc_type):
return False
if isinstance(exc_value, exc_type):
pass
# This branch is not always taken in Python 2.6:
# https://bugs.python.org/issue7853
elif isinstance(exc_value, tuple):
exc_value = exc_type(*exc_value)
else:
exc_value = exc_type(exc_value)
self.exception = exc_value
return True
@noseimport(2, 7, 'assert_raises_regexp')
@noseimport(3, 2)
@contextlib.contextmanager
def assert_raises_regex(exc_type, regex):
with assert_raises(exc_type) as ecm:
yield
assert_regex(str(ecm.exception), regex)
@noseimport(2, 7, 'assert_regexp_matches')
@noseimport(3, 2)
def assert_regex(text, regex):
try:
str_types = basestring
except NameError:
str_types = (str, bytes)
if isinstance(regex, str_types):
regex = re.compile(regex)
if not regex.search(text):
message = "Regex didn't match: {0!r} not found in {1!r}".format(regex.pattern, text)
assert_fail(msg=message)
@noseimport(3, 2)
@contextlib.contextmanager
def assert_warns_regex(exc_type, regex):
with warnings.catch_warnings(record=True) as wlog:
warnings.simplefilter('always', exc_type)
yield
firstw = None
for warning in wlog:
w = warning.message
if not isinstance(w, exc_type):
continue
if firstw is None:
firstw = w
if re.search(regex, str(w)):
return
if firstw is None:
assert_fail(msg='{exc} not triggered'.format(exc=exc_type.__name__))
else:
assert_fail(msg='{exc!r} does not match {re!r}'.format(exc=str(firstw), re=regex))
class IsolatedException(Exception):
pass
def _n_relevant_tb_levels(tb):
n = 0
while tb and '__unittest' not in tb.tb_frame.f_globals:
n += 1
tb = tb.tb_next
return n
def clean_environ():
for key in list(os.environ):
if key.startswith('PYTHON_AFL_'):
del os.environ[key]
os.environ['AFL_SKIP_CPUFREQ'] = '1'
os.environ['AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES'] = '1'
os.environ['AFL_NO_AFFINITY'] = '1'
os.environ['AFL_ALLOW_TMP'] = '1' # AFL >= 2.48b
os.environ['PWD'] = '//' + os.getcwd() # poor man's AFL_ALLOW_TMP for AFL << 2.48b
def require_commands(*cmds):
PATH = os.environ.get('PATH', os.defpath)
PATH = PATH.split(os.pathsep)
for cmd in cmds:
for dir in PATH:
path = os.path.join(dir, cmd)
if os.access(path, os.X_OK):
break
else:
if cmd == 'ps':
cmd = 'ps(1)'
reason = 'procps installed'
elif cmd.startswith('afl-'):
reason = 'AFL installed'
else:
reason = 'PATH set correctly'
raise RuntimeError('{cmd} not found; is {reason}?'.format(cmd=cmd, reason=reason))
def run(cmd, stdin='', xstatus=0):
child = ipc.Popen(
list(cmd),
stdin=ipc.PIPE,
stdout=ipc.PIPE,
stderr=ipc.PIPE,
preexec_fn=clean_environ,
)
(stdout, stderr) = child.communicate(stdin)
if child.returncode != xstatus:
if str is not bytes:
stderr = stderr.decode('ASCII', 'replace')
print(stderr)
raise ipc.CalledProcessError(child.returncode, cmd[0])
return (stdout, stderr)
def fork_isolation(f):
EXIT_EXCEPTION = 101
EXIT_SKIP_TEST = 102
exit = os._exit # pylint: disable=redefined-builtin,protected-access
# sys.exit() can't be used here, because nose catches all exceptions,
# including SystemExit
# pylint:disable=consider-using-sys-exit
@functools.wraps(f)
def wrapper(*args, **kwargs):
readfd, writefd = os.pipe()
pid = os.fork()
if pid == 0:
# child:
os.close(readfd)
try:
f(*args, **kwargs)
except SkipTest as exc:
s = str(exc)
if not isinstance(s, bytes):
s = s.encode('UTF-8')
with os.fdopen(writefd, 'wb') as fp:
fp.write(s)
exit(EXIT_SKIP_TEST)
except Exception: # pylint: disable=broad-except
exctp, exc, tb = sys.exc_info()
s = traceback.format_exception(exctp, exc, tb, _n_relevant_tb_levels(tb))
s = ''.join(s)
if not isinstance(s, bytes):
s = s.encode('UTF-8')
del tb
with os.fdopen(writefd, 'wb') as fp:
fp.write(s)
exit(EXIT_EXCEPTION)
exit(0)
else:
# parent:
os.close(writefd)
with os.fdopen(readfd, 'rb') as fp:
msg = fp.read()
if not isinstance(msg, str):
msg = msg.decode('UTF-8')
msg = msg.rstrip('\n')
pid, status = os.waitpid(pid, 0)
if status == (EXIT_EXCEPTION << 8):
raise IsolatedException('\n\n' + msg)
elif status == (EXIT_SKIP_TEST << 8):
raise SkipTest(msg)
elif status == 0 and msg == '':
pass
else:
raise RuntimeError('unexpected isolated process status {0}'.format(status))
# pylint:enable=consider-using-sys-exit
return wrapper
@contextlib.contextmanager
def tempdir():
d = tempfile.mkdtemp(prefix='python-afl.')
try:
yield d
finally:
shutil.rmtree(d)
__all__ = [
'SkipTest',
'assert_equal',
'assert_not_equal',
'assert_raises',
'assert_raises_regex',
'assert_regex',
'assert_true',
'assert_warns_regex',
'fork_isolation',
'require_commands',
'run',
'tempdir',
]
# vim:ts=4 sts=4 sw=4 et
``` |
{
"source": "Joeyjoejojnr22/hive-mind",
"score": 3
} |
#### File: Joeyjoejojnr22/hive-mind/HIVE MIND.py
```python
import datetime
class hivemind:
class mind:
class neurone:
def __init__(self,name,resistance=0,accelerate=0.999,brake=0.999,bayeslearningrate=10):
import random
self.learningrate={}
self.bayeslearningrate=bayeslearningrate
self.inputs={}
self.bias={}
self.bayesbias={}
if isinstance(resistance,str):
self.resistance=ramdom.random()
else:
self.resistance=resistance
self.pain=2
self.fired=[]
self.name=name
self.temp={}
self.me=0
self.accelerate=accelerate
self.brake=brake
def forward(self,imp={},bayes={},error=0):
import random
a=0
c=0
for i in bayes:
if i in self.bayesbias:
try:
c+=(self.bayesbias[i]*bayes[i])
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
print(c)
print(self.bayesbias[i])
print(bayes[i])
print(i)
print(bayes)
input("pause in forward")
else:
if error==2:
print(i)
print(self.bayesinputs)
input("pause")
self.bayesbias[i]=random.random()
self.learningrate[i]=random.random()
c+=self.bayesbias[i]
c=self.outputactivation(c)
if error==1:
print(self.name)
print(c)
input()
if c > self.resistance or self.name=="output":
a=0
for i in imp:
if i in self.bias:
a+=(self.bias[i]*imp[i])
else:
self.bias[i]=random.random()
a=self.outputactivation(a)
self.fired=imp
self.pain=a
return [self.name,a,c]
else:
return []
def backwards(self,actual,estimate,lisp,error=0):
import random
if self.name in lisp or self.name=='output':
if len(self.fired)>0:
a=0
c=actual-abs(estimate)
d=estimate/actual
e=0
if c > 0:
if self.pain < 0:
if actual >0:
sel=0
else:
sel=1
else:
sel=1
else:
if self.pain < 0:
if actual >0:
sel=1
else:
sel=0
else:
sel=0
for i in self.fired:
if i in self.temp:
if sel==1 and self.temp == 1:
self.learningrate[i]=self.learningrate[i]*self.accelerate
else:
self.learningrate[i]=self.learningrate[i]*self.brake
#self.temp[i]=c
try:
if c>0:
for i in self.fired:
self.bias[i]+=self.learningrate[i]
self.bayesbias[i]+=(self.learningrate[i]/self.bayeslearningrate)
self.temp[i]=sel
else:
for i in self.fired:
self.bias[i]-=self.learningrate[i]
self.bayesbias[i]-=(self.learningrate[i]/self.bayeslearningrate)
self.temp[i]=sel
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
print(self.fired)
print(i)
input("Error in backwards")
temp=self.fired.copy()
self.fired=[]
return temp
#mind needs to take the reply and group all the returns and then feed into next row.
#if mind gets a empty dict back for whole line then it needs to cycle through neurones and top up the bayes dict
def nonresponse(self,estimate):
import random
for i in estimate:
if i !=self.name:
if i in self.bayesbias:
self.bayesbias[i]+=1
else:
self.bayesbias[i]=random.random()+1
self.learningrate[i]=random.random()
def experience(self):
self.accelerate-=0.00000001
self.brake-=0.00000001
if self.brake<0.00000001:
self.brake=0.00000001
if self.accelerate < 1.00000001:
self.accelerate=1.00000001
def reset(self):
self.fired=[]
class Relu:
def outputactivation(self,x):
if x > 0:
return x
else:
return (x*0.1)
return 1 / (1 + math.exp(-x))
class Sigmoid:
def outputactivation(self,x):
import math
return 1 / (1 + math.exp(-x))
class Tanh:
def outputactivation(self,x):
import math
x=math.tanh(x)
return x
class sigmoidneurone(Sigmoid,neurone):
pass
class reluneurone(Relu,neurone):
pass
class tanhneurone(Tanh,neurone):
pass
def __init__(self,width,depth,repeat=0,resistance=0,bayeslearningrate=10,linearegression=0):
self.outputbias={}
self.mind=[]
self.source=[]
self.fired={}
self.repeat=repeat
self.me=0
self.critime={}
self.resistance=resistance
c=0
for i in range(depth):
cortex=[]
for w in range(width):
c+=1
name=str("No:"+str(c)+" row:"+str(i)+" width:"+str(w))
cortex.append(self.reluneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate))
if linearegression==1:
name='output'
self.output=self.reluneurone(name,resistance=0,bayeslearningrate=bayeslearningrate)
self.mind.append(cortex.copy())
name='output'
self.output=self.reluneurone(name,resistance=0,bayeslearningrate=bayeslearningrate)
def labotomy(self,width=[4,4,4,4,4],typo=['r','r','r','r','r','r'],resistance=[0,0,0,0,0,0],bayeslearningrate=[10,10,10,10,10],linearegression=[0,0,0,0,0]):
count=0
work=4
self.mind=[]
rest=0
bayes=10
c=0
for i in range(len(typo)):
try:
work=width[count]
rest=resistance[count]
bayes=bayeslearningrate[count]
except:
pass
cortex=[]
for w in range(work):
c+=1
name=str("No:"+str(c)+" row:"+str(i)+" width:"+str(w))
if typo[i].lower()=='r':
cortex.append(self.reluneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate))
if typo[i].lower()=='s':
cortex.append(self.sigmoidneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate))
if typo[i].lower()=='t':
cortex.append(self.tanhneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate))
if linearegression[i].lower()==1:
name='output'
self.output=self.reluneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate)
self.mind.append(cortex.copy())
count+=1
name='output'
self.output=self.reluneurone(name,resistance=resistance,bayeslearningrate=bayeslearningrate)
def forwardpage(self,inputs,error=0):
output=0
nay={}
bay={}
responsenay={}
responsebay={}
for i in inputs:
if isinstance(i,(int,float)):
nay[i]=i
bay[i]=i
else:
nay[i]=1
bay[i]=1
if error==2:
print(inputs)
for cortex in range(len(self.mind)):
responsenay={}
responsebay={}
for nerve in self.mind[cortex]:
response=nerve.forward(nay,bay)
if len(response) >0:
responsenay[response[0]]=response[1]
responsebay[response[0]]=response[2]
if len(responsenay)==0:
for nerve in self.mind[cortex]:
nerve.nonresponse(bay)
if error==2:
print(responsenay)
print(responsebay)
input("pause error 2 at forward page")
nay=responsenay
bay=responsebay
response=self.output.forward(nay,bay)
if len(response)==0:
self.output.nonresponse(bay)
self.output.nonresponse(bay)
else:
output=response[1]
return output
def slow(self):
for cortex in range(len(self.mind)):
for nerve in self.mind[cortex]:
nerve.experience()
def backapage(self,actual,estimate,error=0):
nex=[]
r=[]
if estimate==None:
estimate=0
nex=self.output.backwards(float(actual),float(estimate),[])
#print(nex)
#input()
for cortex in reversed(self.mind):
for nerve in cortex:
try:
response=nerve.backwards(float(actual),float(estimate),nex)
for re in response:
if not re in r:
r.append(re)
except Exception as ex:
pass
nex=r
#print(nex)
#input("Previous Rows")
self.fired=0
def learnbook(self,reader,element,accuracy=30,epochs=10,error=0,key=0,SECONDREAD=0):
estimate=0
lastcount=1
count=1
rightcount=0
mike=0
check=0
for row in reader:
if row.get(element):
project_list=list(row.values())
project_list.remove(row.get(element))
estimate=self.forwardpage(project_list)
self.backapage(row.get(element),estimate)
step=0
temp=0
while step < epochs:
lastcount=rightcount
consider=[0,0,0,0,0,0,0,0,0,0,0,0,0]
count=1
for row in reader:
if row.get(element):
count+=1
project_list=list(row.values())
if key !=0:
project_list.remove(row.get(key))
project_list.remove(row.get(element))
estimate=self.forwardpage(project_list)
if row.get(element) !=0:
self.backapage(row.get(element),estimate)
if error==1:
print(estimate)
print(row.get(element))
input("pause for error in learnbook")
try:
temp=int(round(abs(estimate-row.get(element))/accuracy,0))
except:
pass
try:
consider[temp]+=1
except Exception as ex:
pass
if error==1:
print(project_list)
print(row.get(element))
print(estimate)
print(lastcount)
input("pause error 1 in learnbook")
cumu=0
rightcount=consider[0]/count
if rightcount <check:
self.slow()
check=rightcount
for i in range(len(consider)):
cumu+=((consider[i]/count)*100)
#print("Within a accuracy " + str(i) + " we had a accuracy of " + str((consider[i]/count)*100) + " with cumulatve of " + str(cumu))
step+=1
#print("New Epoch " + str(step))
if isinstance(SECONDREAD,list):
for row in SECONDREAD:
project_list=list(row.values())
project_list.remove(row.get(element))
if key !=0:
project_list.remove(row.get(key))
estimate=self.forwardpage(project_list)
#if estimate < accuracy:
# estimate=accuracy
if error==2:
print(row)
print(project_list)
input("Error 2 in learnbook")
try:
row["ESTIMATE"]=round(estimate,0)
except:
row["ESTIMATE"]="None response from AI, unrecognised engram - pleaser forecast manually"
return SECONDREAD
def prognosticate(self,reader,key,element):
newreader=[]
for row in reader:
newrow={}
project_list=list(row.values())
project_list.remove(row.get(element))
estimate=self.forwardpage(project_list)
if estimate < 30:
estimate=30
for cortex in reversed(self.mind):
for nerve in cortex:
nerve.reset()
estimate=round(estimate,0)
newrow[key]=row[key][-(len(row[key])-(len(key)+1)):]
newrow[str(element)+" Estimate"]=estimate
newreader.append(newrow.copy())
return newreader
def testday(self,reader,accuracy,element,key=0):
newreader=[]
step=0
count=0
eva=0
eve=0
errors=0
checkframe=[]
fileframe=[]
column=0
row=0
for row in reader:
try:
eve+=row.get(element)
count+=1
except:
print(row)
print(row.get(element))
input("error in testday")
try:
average=eve/count
except:
average=0
eve=0
count=0
var=0
hypo=0
for row in reader:
count+=1
newrow={}
project_list=list(row.values())
project_list.remove(row.get(element))
if key !=0:
project_list.remove(row.get(key))
estimate=self.forwardpage(project_list)
try:
eva=estimate-row.get(element)
except:
errors+=1
if abs(eva) < accuracy:
step+=1
var=abs(row.get(element)-average)
hypo+=(var*var)
eve+=(eva*eva)
for cortex in reversed(self.mind):
for nerve in cortex:
nerve.reset()
try:
return [(step/count),(eve/count),errors,hypo/count,]
except:
return [0,0,errors,0,]
def __init__(self,reader,key,startdate,endate,renamekey,start=1,accuracy=15,csvloci=r'C:\CSVs\\',setcritdelay=14,setalert=0,taskmove=1,setpercntile=0.95,setdependency=1):
self.source=[]
self.innaccurate=[]
self.accuraccy=accuracy
self.key=key
self.uPDATE=0
self.renamekey=renamekey
self.startdate=startdate
import os
directory=csvloci+'Analysis\\'
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
self.csvloci=directory
directory=csvloci+'BrainsInAJar\\'
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
self.geniusloci=directory
directory=csvloci+'Analysis\\'
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
self.analysisloci=directory
directory=csvloci+'HIVE\\'
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
self.hiveloci=directory
self.enddate=endate
self.hive(reader,startdate)
if start!=0:
if start=="test":
self.randomdata()
else:
self.swarm()
#self.workplanner()
def run(self,reader,queenme=0):
if len(self.deps)==0:
try:
self.deps=self.Open(file_Name=self.geniusloci + '\DEPENDENCIES_FILE')
if self.deps==False:
self.deps={}
except:
self.deps={}
try:
self.tickboxes=self.Open(file_Name=self.geniusloci + '\TICKBOX_FILE')
if self.tickboxes==False:
self.tickboxes={}
except:
self.tickboxes={}
try:
self.alerts=self.Open(file_Name=self.geniusloci +'\ALERT_FILE')
if self.alerts==False:
self.alerts={}
except:
self.alerts={}
try:
self.critime=self.Open(file_Name=self.geniusloci +'\CRITIME_FILE')
if self.critime==False:
self.critime={}
except:
self.critime={}
try:
self.hardforward=self.Open(file_Name=self.geniusloci+'\HARD_FILE')
if self.hardforward==False:
self.hardforward={}
except:
self.hardforward={}
self.hive(reader,self.startdate)
x = threading.Thread(target=self.swarm, args=(self.startdate))
x.start()
q = threading.Thread(target=self.reforecast, args=())
q.start()
if queenme==1:
queeme=threading.Thread(target=self.queen, args=())
queeme.start()
def reference(self):
print("Building the Hive")
print("this is the dates i have found")
print(self.dates)
print(len(self.dates))
print("this is the labels i have found")
print(self.kill)
print(len(self.kill))
print("this is the numbers i have found")
print(self.numbers)
print(len(self.numbers))
def hive(self,reader,startdate,error=0):
def inreader(row,reader,key):
count=0
for newrow in reader:
if row[key]==newrow[key]:
return count
count+=1
return False
def addrow(row,startdate):
newrow={}
newrow["end"]=row[self.enddate]
newrow[self.key]=row[self.key]
newrow[startdate]=row[startdate]
datarea={}
for d in self.dates:
temp=self.tryfindcmrdates(newrow[startdate],row[d])
try:
if temp > 0:
dateme[d]=1
except:
pass
datarea[d]=self.tryfindcmrdates(newrow[startdate],row[d])
#print(datarea.copy())
#input()
newrow["Dates"]=datarea.copy()
datarea={}
for n in self.numbers:
try:
if isinstance(float(row[n]),(float,int)):
datarea[n]=float(row[n])
else:
datarea[n]=None
except:
datarea[n]=None
pass
newrow["Numbers"]=datarea.copy()
for k in self.kill:
if k in row:
if isinstance(row[k],str):
if not self.isdate(row[k]):
if not len(row[k])==0:
if error==1:
print(row[self.key])
print(k)
input(row[k])
datarea[k]=str(k)+':' +str(row[k])
newrow["Labels"]=datarea.copy()
if row[self.key] in tempforecastdates:
newrow["Forecast Dates"]=tempforecastdates[row[self.key]]
del tempforecastdates[row[self.key]]
else:
newrow["Forecast Dates"]={}
if row[self.key] in tempforecastnumbers:
newrow["Forecast Numbers"]=tempforecastnumbers[row[self.key]]
del tempforecastnumbers[row[self.key]]
else:
newrow["Forecast Numbers"]={}
newrow["Reforecast Dates"]={}
newrow["Overide Dates"]={}
newrow["Overide Numbers"]={}
return newrow
if len(self.source)==0:
tech=[]
self.dates=[]
self.numbers=[]
self.kill=[]
tempforecastdates={}
tempforecastnumbers={}
for s in self.source:
tempforecastdates[s[self.key]]=s["Forecast Dates"]
tempforecastnumbers[s[self.key]]=s["Forecast Numbers"]
for row in reader:
for cell in row:
if self.isdate(row[cell]) and cell !=self.key and cell !=startdate:
if not cell in self.dates:
self.dates.append(cell)
try:
if isinstance(float(row[cell]),(float,int)):
if cell !=self.key and cell !=startdate:
if not cell in self.numbers:
self.numbers.append(cell)
except:
pass
if isinstance(row[cell],str) and cell !=self.key and cell !=startdate:
if not isinstance(row[cell],(float,int)):
if not cell in self.kill:
self.kill.append(cell)
now=''
now=self.today
for row in reader:
tech.append(addrow(row,self.startdate))
self.source=tech
else:
temp=[]
for row in reader:
temp=inreader(source,self.source,self.key)
if temp==False:
self.source.append(addrow(row,now))
else:
for d in self.dates:
self.source[temp]["Dates"][d]=row[d]
for n in self.numbers:
self.source[temp]["Numbers"][n]=row[n]
for k in self.kill:
self.source[temp]["Labels"][k]=row[k]
def swarm(self,error=0):
print("Forecasting Dates")
for d in self.dates:
tempreader=[]
otherereader=[]
for row in self.source:
if not d in row["Labels"]:
newrow={}
newrow["TARGET"]=row["Dates"][d]
for k in row["Labels"]:
if k !=d:
newrow[k]=row["Labels"][k]
newrow[self.key]=row[self.key]
if newrow["TARGET"]==None:
otherereader.append(newrow.copy())
else:
if newrow["TARGET"] < 0:
newrow["TARGET"]=0
tempreader.append(newrow.copy())
elif error==1:
print(row[self.key])
print(d)
input()
#print(d)
#self.timestamp()
#print(len(tempreader))
#print(len(otherereader))
#try:
r2=[]
#print(d)
STRING=d.replace('/','-')
mymind=self.Open(file_Name=self.geniusloci + '\prognostication' + STRING + '_BRAININAJAR')
if mymind==False:
mymind=self.mind(4,5)
epo=1
else:
epo=1
r2=mymind.learnbook(tempreader,"TARGET",accuracy=self.accuraccy,epochs=epo,key=self.key,SECONDREAD=otherereader)
for row in self.source:
row=self.updaterow(row,r2,self.key,d)
self.Save(mymind,file_Name=self.geniusloci + '\prognostication' + STRING + '_BRAININAJAR')
self.csvwrite(r2,CSV=self.hiveloci + '\prognostication' + STRING + '_OUTPUT.csv',KEY=self.key,NEWKEY=self.renamekey)
csv=[]
#print(self.csvloci+'\Test_Records_' + STRING + '_OUTPUT.csv')
csv=self.csvopen(x=(self.csvloci+'\Test_Records_' + STRING + '_OUTPUT.csv'))
vale=mymind.testday(tempreader,self.accuraccy,"TARGET",key=self.key)
data={}
data["Type"]=d
data["Accuraccy"]=vale[0]
data["Loss Function"]=vale[1]
data["Date"]=self.today()
data["Variance Around Average"]=vale[3]
if vale[3]==0:
data["Hypothesis Test"]="Error in hypothesis test"
else:
data["Hypothesis Test"]=vale[1]/vale[3]
if vale[1]/vale[3] > 1:
self.innaccurate.append(d)
elif d in self.innaccurate:
self.innaccurate.remove(d)
data["Errors"]=vale[2]
csv.append(data)
self.csvwrite(csv,CSV=self.analysisloci +'\Test_Records_' + STRING + '_OUTPUT.csv',KEY="Type",NEWKEY=0)
#except:
# print(d)
# print("We found no instances of this to forecast, press enter too accept")
# input()
tempreader=[]
LOAD=''
concat=''
unload=[]
for row in self.source:
if len(row["end"]) == 0:
try:
unload=min(row["Forecast Dates"])
except:
print(row["Dates"])
print(row["Forecast Dates"])
input()
datarea={}
datarea[self.key]=row[self.key]
datarea["Next Task"]=unload
datarea["Date"]=self.today()
tempreader.append(datarea.copy())
self.csvwrite(tempreader,CSV=self.analysisloci + 'prognostication' + '_Next_Task_' + '_OUTPUT.csv',KEY=self.key,NEWKEY=self.renamekey)
self.uPDATE=0
print("Forecasting Numbers")
for d in self.numbers:
tempreader=[]
otherereader=[]
for row in self.source:
newrow={}
newrow[self.key]=row[self.key]
if len(row["end"])>0:
#print(row["Numbers"])
#print(row["end"])
#input()
newrow["TARGET"]=row["Numbers"][d]
else:
newrow["TARGET"]=None
for k in row["Labels"]:
if k !=d:
newrow[k]=row["Labels"][k]
if newrow["TARGET"]==None:
otherereader.append(newrow.copy())
elif isinstance(newrow["TARGET"],(int,float)):
tempreader.append(newrow.copy())
if len(tempreader) >0:
#try:
r2=[]
#print(d)
STRING=d.replace('/','-')
mymind=self.Open(file_Name=self.geniusloci + '\prognostication' + STRING + '_BRAININAJAR')
if mymind==False:
mymind=self.mind(4,5)
epo=1
else:
epo=1
r2=mymind.learnbook(tempreader,"TARGET",accuracy=self.accuraccy,epochs=epo,key=self.key,SECONDREAD=otherereader)
STRING=d.replace('/','-')
self.csvwrite(r2,CSV=self.hiveloci + '\prognostication' + STRING + '_OUTPUT.csv',KEY=self.key,NEWKEY=self.renamekey)
self.Save(mymind,file_Name=self.geniusloci + '\prognostication' + STRING + '_BRAININAJAR')
#except:
# print(d)
# print("We found no instances of this to forecast, press enter too accept")
# input()
csv=[]
csv=self.csvopen(x=(self.csvloci+'\Test_Records_' + STRING + '_OUTPUT.csv'))
vale=mymind.testday(tempreader,self.accuraccy,"TARGET",key=self.key)
data={}
data["Type"]=d
data["Accuraccy"]=vale[0]
data["Loss Function"]=vale[1]
data["Date"]=self.today()
data["Variance Around Average"]=vale[3]
if vale[3]==0:
data["Hypothesis Test"]="Error in hypothesis test"
else:
data["Hypothesis Test"]=vale[1]/vale[3]
if vale[1]/vale[3] > 1:
self.innaccurate.append(d)
elif d in self.innaccurate:
self.innaccurate.remove(d)
data["Errors"]=vale[2]
csv.append(data)
self.csvwrite(csv,CSV=self.analysisloci + '\Test_Records_' + STRING + '_OUTPUT.csv',KEY="Type",NEWKEY=0)
self.swarmin=0
print("Innaccurate models detected")
print(self.innaccurate)
def Save(self,a,file_Name):
import pickle
fileObject = open(file_Name,'wb')
pickle.dump(a,fileObject)
fileObject.close()
def Open(self,file_Name):
import os.path
if os.path.isfile(file_Name)==True:
import pickle
fileObject = open(file_Name,'rb')
try:
b = pickle.load(fileObject,encoding="utf8")
return b
except:
print(file_Name)
print("got a error in opening pickle RESTARTING FILE")
return False
else:
return False
def updaterow(self,row,r2,key,d,look="Forecast Dates",error=0):
for r in r2:
if row[self.key]==r[self.key]:
if r["ESTIMATE"] !="None response from AI, unrecognised engram - pleaser forecast manually":
row[look][d]=r["ESTIMATE"]
return row
return row
def isdate(self,check):
from datetime import datetime
try:
h=check.split('/')
x=datetime(int(h[2]), int(h[1]), int(h[0]), 0, 0, 0, 0)
return True
except:
return False
def today(self):
from datetime import datetime
check = datetime.now()
return (str(check.day)+'/'+str(check.month)+'/'+str(check.year))
def tryfindcmrdates(self,a,b):
from datetime import datetime
try:
h=a.split('/')
x=datetime(int(h[2]), int(h[1]), int(h[0]), 0, 0, 0, 0)
t=b.split('/')
t=datetime(int(t[2]), int(t[1]), int(t[0]), 0, 0, 0, 0)
dt = t - x
return dt.days
except:
return None
def csvwrite(self,reader,CSV='C:\CSVs\OUTPUT.csv',KEY=0,NEWKEY=0):
import csv
fieldnombre=[]
for row in reader:
for cell in row:
if not cell in fieldnombre:
fieldnombre.append(cell)
if NEWKEY !=0:
try:
fieldnombre.remove(KEY)
except:
pass
fieldnombre.append(NEWKEY)
for row in reader:
row[NEWKEY]=row.get(KEY)
frame=[]
with open(CSV, 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
spamwriter.writerow(fieldnombre)
for row in reader:
frame=[]
for field in fieldnombre:
frame.append(row.get(field))
spamwriter.writerow(frame.copy())
csvfile.close()
def csvopen(self,x):
import csv
import os.path
if os.path.isfile(x)==False:
return []
with open(x, newline='') as csvfile:
data = csv.DictReader(csvfile)
reader = [item for item in data]
newreader=[]
data=None
count=0
return reader
def randomdata(self):
import random
for row in self.source:
for d in self.dates:
row["Forecast Dates"][d]=random.randint(0,120)
for n in self.numbers:
row["Forecast Numbers"][d]=random.randint(0,120)
def multitest(self,reader,tag):
innaccurate=[]
def makeworksheet(typo,reader,num):
newreader=[]
if num==True:
for row in reader:
if self.key in row:
newrow={}
try:
newrow[self.key]=row[self.key]
except:
print(row)
print(newrow)
input("error in makeworksheet")
if isinstance(row[typo],(int,float)):
newrow["TARGET"]=self.tryfindcmrdates(row[self.startdate],row[typo])
for k in self.kill:
if k in row:
if isinstance(row[k],str):
if not self.isdate(row[k]):
if not len(row[k])==0:
newrow[k]=str(k)+':' +str(row[k])
newreader.append(newrow.copy())
else:
for row in reader:
if self.key in row:
newrow={}
try:
newrow[self.key]=row[self.key]
except:
print(row)
print(newrow)
input("error in makeworksheet")
if self.isdate(row[self.startdate]):
if self.isdate(row[typo]):
newrow["TARGET"]=self.tryfindcmrdates(row[self.startdate],row[typo])
for k in self.kill:
if k in row:
if isinstance(row[k],str):
if not self.isdate(row[k]):
if not len(row[k])==0:
newrow[k]=str(k)+':' +str(row[k])
newreader.append(newrow.copy())
return newreader
for d in self.dates:
tempreader=makeworksheet(d,reader,False)
print("multitest")
print(d)
print(len(tempreader))
if len(tempreader)>0:
STRING=d.replace('/','-')
mymind=self.Open(file_Name=self.geniusloci + '\prognostication' + STRING + '_BRAININAJAR')
csv=[]
csv=self.csvopen(x=(self.csvloci+'\Test_Records_' + STRING + '_OUTPUT.csv'))
try:
vale=mymind.testday(tempreader,self.accuraccy,"TARGET",key=self.key)
except:
print(vale)
input("error")
data={}
data["Type"]=d
data["Accuraccy"]=vale[0]
data["Loss Function"]=vale[1]
data["Date"]=self.today()
data["Tag"]=tag
data["Variance Around Average"]=vale[3]
if vale[3]==0:
data["Hypothesis Test"]="Error in hypothesis test"
else:
data["Hypothesis Test"]=vale[1]/vale[3]
if vale[1]/vale[3] > 1:
innaccurate.append(d)
data["Errors"]=vale[2]
csv.append(data)
self.csvwrite(csv,CSV=self.analysisloci + '\Test_Records_' + STRING + '_OUTPUT.csv',KEY="Type",NEWKEY=0)
for d in self.numbers:
tempreader=makeworksheet(d,reader,True)
print("multitest")
print(d)
print(len(tempreader))
if len(tempreader)>0:
STRING=d.replace('/','-')
mymind=self.Open(file_Name=self.geniusloci + '\prognostication' + STRING + '_BRAININAJAR')
csv=[]
csv=self.csvopen(x=(self.csvloci+'\Test_Records_' + STRING + '_OUTPUT.csv'))
vale=mymind.testday(tempreader,self.accuraccy,"TARGET",key=self.key)
data={}
data["Type"]=d
data["Accuraccy"]=vale[0]
data["Loss Function"]=vale[1]
data["Date"]=self.today()
data["Tag"]=tag
data["Variance Around Average"]=vale[3]
if vale[3]==0:
data["Hypothesis Test"]="Error in hypothesis test"
else:
data["Hypothesis Test"]=vale[1]/vale[3]
if vale[1]/vale[3] > 1:
innaccurate.append(d)
data["Errors"]=vale[2]
csv.append(data)
self.csvwrite(csv,CSV=self.analysisloci + '\Test_Records_' + STRING + '_OUTPUT.csv',KEY="Type",NEWKEY=0)
print("Inaccuracies in Historic Data Found")
print(innaccurate)
def workplanner(self,setcritdelay=0,setalerts=0,taskmove=0,setpercntile=0,setdependency=0):
averageburndown={}
countdates={}
burndown=0
evaluate=[]
csv=[]
csv=self.csvopen(x=(self.hiveloci+'\RESOURCE PLAN.csv'))
if len(csv)==0:
for d in self.dates:
newrow={}
newrow["Type"]=d
csv.append(newrow.copy())
self.csvwrite(csv,CSV=(self.csvloci+'\RESOURCE PLAN.csv'),KEY=0,NEWKEY=0)
newrow={}
dat={}
for c in csv:
dat[c['Task']]={}
for s in c:
if s !=dat[c['Task']]:
dat[c['Task']][s]=c[s]
for row in self.source:
if len(row[self.startdate])>0:
if len(row["end"])==0:
todah=self.tryfindcmrdates(row[self.startdate],self.today())
for d in row["Forecast Dates"]:
if not d in self.innaccurate:
if row["Dates"][d]==None:
if not d in row["Labels"]:
count=1
check=1
reforecast=0
newrow={}
for e in row["Forecast Dates"]:
if not e in self.innaccurate:
if e !=d:
if not e in row["Labels"]:
if row["Forecast Dates"][e]!=None:
if row["Dates"][e]!= None and row["Forecast Dates"][d]>row["Dates"][e]:
count+=1
elif row["Forecast Dates"][d]>row["Forecast Dates"][e]:
count+=1
if row["Dates"][e]==None:
check+=1
burndown=row["Forecast Dates"][d]/count
if burndown < 0 or burndown==row["Forecast Dates"][d]:
burndown=0
reforecast=round(todah+(check*burndown))
newrow[self.renamekey]=row[self.key]
newrow["Reforecast"]=reforecast
newrow["Burndown"]=burndown
newrow["Type"]=d
newrow["Previous Tasks"]=count
newrow["Original Forecast"]=row["Forecast Dates"][d]
newrow["Previous Tasks Remainder"]=check
if todah > row["Forecast Dates"][d]:
if todah > (row["Forecast Dates"][d]*1.5):
newrow["Late Flag"]="Late - long delay"
else:
newrow["Late Flag"]="Late"
elif reforecast < row["Forecast Dates"][d]:
newrow["Late Flag"]="Running Ahead"
elif (row["Forecast Dates"][d]-reforecast)<burndown:
newrow["Late Flag"]="On Schedule"
else:
newrow["Late Flag"]="Behind Schedule"
if d in dat:
for a in dat[d]:
if a !=d:
newrow[a]=dat[d][a]
evaluate.append(newrow.copy())
self.csvwrite(evaluate,CSV=(self.hiveloci+'\prognostication_REFORECAST.csv'),KEY=0,NEWKEY=0)
def scheduletests(self):
csv=[]
import collections
for me in self.dates:
import random
ra=[]
for m in range(20):
ra.append(m)
print(ra)
ra=random.sample(ra,len(ra))
print(ra)
for L in range(1):
for r in ra:
for b in ra:
for d in ra:
for w in ra:
newrow=collections.OrderedDict()
newrow["Type"]=me
newrow["width"]=w+1
newrow["depth"]=d+1
newrow["resistance"]=r/10
newrow["bayeslearningrate"]=b+1
newrow["linearegression"]=L
newrow["epochs"]=1
newrow["n"]=False
yield newrow
for d in self.numbers:
for l in range(1):
for r in ra:
for b in ra:
for d in ra:
for w in ra:
newrow=collections.OrderedDict()
newrow["Type"]=d
newrow["width"]=w+1
newrow["depth"]=d+1
newrow["resistance"]=r/10
newrow["bayeslearningrate"]=b+1
newrow["linearegression"]=l
newrow["epochs"]=1
newrow["n"]=True
yield newrow
def makeworksheet(self,d,reader,num):
if num==True:
tempreader=[]
otherereader=[]
for row in self.source:
newrow={}
newrow[self.key]=row[self.key]
if len(row["end"])>0:
#print(row["Numbers"])
#print(row["end"])
#input()
newrow["TARGET"]=row["Numbers"][d]
else:
newrow["TARGET"]=None
for k in row["Labels"]:
if k !=d:
newrow[k]=row["Labels"][k]
if newrow["TARGET"]!=None:
if newrow["TARGET"] > 0:
otherereader.append(newrow.copy())
else:
tempreader.append(newrow.copy())
else:
tempreader=[]
otherereader=[]
for row in self.source:
if not d in row["Labels"]:
newrow={}
newrow["TARGET"]=row["Dates"][d]
for k in row["Labels"]:
if k !=d:
newrow[k]=row["Labels"][k]
newrow[self.key]=row[self.key]
if newrow["TARGET"]==None:
otherereader.append(newrow.copy())
else:
if newrow["TARGET"] < 0:
newrow["TARGET"]=0
tempreader.append(newrow.copy())
return [tempreader,otherereader]
def queen(self,overide=0):
def chack(reader,find):
for row in reader:
if row["Type"]==find:
return True
return False
def getacc(tye):
STRING=tye.replace('/','-')
try:
CSV=self.csvopen(self.analysisloci + '\Test_Records_' + STRING + '_OUTPUT.csv')
except:
return False
ROW=CSV[(len(CSV)-1)]
vale=[]
vale.append(float(ROW["Loss Function"]))
vale.append(eval(ROW["Accuraccy"]))
vale.append((len(CSV)))
return vale
bestwidth=0
otherereader=[]
tempreader=[]
val1=[]
val2=[]
import random
import collections
comptests=[]
#def __init__(self,width,depth,repeat=0,resistance=0,bayeslearningrate=10,linearegression=0):
#def labotomy(self,width=[4,4,4,4,4],typo=['r','r','r','r','r','r'],resistance=[0,0,0,0,0,0],bayeslearningrate=[10,10,10,10,10]):
csv=self.csvopen(x=(self.csvloci+'\Test_Records_SCHEDULED_TESTS.csv'))
newcsv=[]
ty=''
for row in csv:
if len(row["date"])>0:
work=[]
if not ty ==row["Type"]:
ty =row["Type"]
tempreader=[]
otherereader=[]
work=self.makeworksheet(row["Type"],self.source,row["number"])
tempreader=work[0]
otherereader=work[1]
testmind=self.mind(width=int(row["width"]),depth=int(row["depth"]),resistance=int(row["resistance"]),bayeslearningrate=int(row["bayeslearningrate"]),linearegression=int(row["linearegression"]))
try:
if len(row["labotomy.width"]) > 0:
testmind.labotomy(width=eval(row["labotomy.width"]),depth=eval(row["labotomy.width"]),resistance=int(row["labotomy.resistance"]),bayeslearningrate=eval(row["labotomy.bayeslearningrate"]),linearegression=eval(row["labotomy.linearegression"]))
except:
pass
testmind.learnbook(tempreader,"TARGET",accuracy=int(row["accuracy"]),epochs=int(row["epochs"]),key=self.key,SECONDREAD=otherereader)
val1=getacc(row["Type"])
val1e=testmind.testday(tempreader,int(row["accuracy"]),"TARGET",key=self.key)
row["percentage"]=val1e[0]
row["loss function"]=val1e[1]
row["date"]=self.today()
if val1e[0] > val1[0] and val1e[1] > val1[1]:
row["acceptance"]=1
STRING=str(row["Type"])
STRING=STRING.replace('/','-')
self.Save(testmind,file_Name=r'C:\CSVs\BrainsInAJar\prognostication' + STRING + '_BRAININAJAR')
row["Test passed type"]="Scheduled Test Passed"
comptests.append(row.copy())
self.csvwrite(comptests,CSV=(self.csvloci+'\Test_Records_COMPLETED_TESTS.csv'),KEY=0,NEWKEY=0)
else:
row["acceptance"]=0
c=0
import time
if len(comptests)==0:
genny=self.scheduletests()
ty=''
for row in genny:
work=[]
if ty !=row["Type"]:
tempreader=[]
otherereader=[]
ty =row["Type"]
work=self.makeworksheet(row["Type"],self.source,row["n"])
tempreader=work[0]
otherereader=work[1]
val1=getacc(row["Type"])
testmind=self.mind(width=int(row["width"]),depth=int(row["depth"]),resistance=int(row["resistance"]),bayeslearningrate=int(row["bayeslearningrate"]),linearegression=int(row["linearegression"]))
testmind.learnbook(tempreader,"TARGET",accuracy=self.accuraccy,epochs=val1[2],key=self.key,SECONDREAD=otherereader)
count=0
val1e=testmind.testday(tempreader,self.accuraccy,"TARGET",key=self.key)
row["percentage original"]=val1e[0]
row["loss function"]=val1e[1]
row["date"]=self.today()
print("%")
print(val1e[0])
print("old")
print(val1[1])
print("loss")
print(val1e[1])
print("old")
print(val1[0])
print("epochs")
print(val1[2])
print(len(tempreader))
print(len(otherereader))
print(str(row["depth"]))
print(str(row["width"]))
print(str(row["resistance"]))
print(str(row["bayeslearningrate"]))
if val1e[0] > val1[1] and val1e[1] < val1[0]:
val1[1]=val1e[0]
val1[0]=val1e[1]
print("upgrade")
row["acceptance"]=1
STRING=str(row["Type"])
STRING=STRING.replace('/','-')
print(STRING)
self.Save(testmind,file_Name=r'C:\CSVs\BrainsInAJar\prognostication' + STRING + '_BRAININAJAR')
row["Test passed type"]="Auto Generated Test Passed"
comptests.append(row.copy())
self.csvwrite(comptests,CSV=(self.csvloci+'\Test_Records_COMPLETED_TESTS.csv'),KEY=0,NEWKEY=0)
csv=self.csvopen(x=(self.csvloci+'\Test_Records_COMPLETED_TESTS.csv'))
for row in csv:
testmind=mind(width=int(row["width"]),depth=int(row["width"]),resistance=int(row["resistance"]),bayeslearningrate=int(row["bayeslearningrate"]),linearegression=int(row["linearegression"]))
if len(row["labotomy.width"]) > 0:
testmind.labotomy(width=eval(row["labotomy.width"]),depth=eval(row["labotomy.width"]),resistance=int(row["labotomy.resistance"]),bayeslearningrate=eval(row["labotomy.bayeslearningrate"]),linearegression=eval(row["labotomy.linearegression"]))
c=float(inf)
d=0
work=self.makeworksheet(row["Type"],self.source)
tempreader=work[0]
otherereader=work[1]
testmind.learnbook(tempreader,"TARGET",accuracy=int(row["accuraccy"]),epochs=1,key=self.key,SECONDREAD=otherereader)
vale=testmind.testday(tempreader,int(row["accuraccy"]),"TARGET",key=self.key)
count=1
while vale[1] < c and vale[2] > d:
testmind.learnbook(tempreader,"TARGET",accuracy=int(row["accuraccy"]),epochs=1,key=self.key,SECONDREAD=otherereader)
vale=testmind.testday(tempreader,int(row["accuraccy"]),"TARGET",key=self.key)
count+=1
count-=1
newrow=row.copy()
newrow["epochs"]=count
self.Save(testmind,file_Name=self.geniusloci + '\prognostication' + str(row["Type"]) + '_BRAININAJAR')
newrow["Test passed type"]="Evaluation of earlystopping"
csv.append(newrow.copy())
self.csvwrite(csv,CSV=(self.csvloci+'\Test_Records_COMPLETED_TESTS.csv'),KEY=0,NEWKEY=0)
self.queenIN=0
def timestamp(self):
import datetime
now = datetime.datetime.now()
print(now)
def readmaker(x=0,kill=[],educational=[],ConverTOstrings=[]):
import csv
import random
import datetime
now = datetime.datetime.now()
if len(str(now.month))==1:
t='0'+str(now.month)
else:
t=str(now.month)
if len(str(now.day))==1:
y='0'+str(now.day)
else:
y=str(now.day)
if x==0:
x='\\\\wcrwvfilprd01\\shared$\\Telecoms Reporting\\QlikView nPrinting Output\\CMR\\IS_CMR_' + str(now.year) + '-' + t + '-' + y + '.csv'
def infermeaning(reader,column):
text=''
textlist=[]
corpuscount={}
count=0
average=0
import math
for row in reader:
intext=[]
text=row.get(column)
if text !='':
if text:
textlist=text.split()
for t in textlist:
count+=1
if t in corpuscount:
corpuscount[t]+=1
else:
corpuscount[t]=1
for c in corpuscount:
corpuscount[c]=math.log(count/corpuscount[c])
average+=corpuscount[c]
average=average/count
newcorpuscount={}
for c in corpuscount:
if corpuscount[c] > average:
newcorpuscount[c]=corpuscount[c]
for row in reader:
text=row.get(column)
textlist=text.split()
for t in text:
if t in newcorpuscount:
row[t]=t
del row[column]
return reader
with open(x, newline='') as csvfile:
data = csv.DictReader(csvfile)
reader = [item for item in data]
newreader=[]
data=None
count=0
for row in reader:
for k in kill:
try:
del row[k]
except:
pass
for con in ConverTOstrings:
row["StrVer:"+str(con)]=con + ':' + str(row[con])
for e in educational:
reader=infermeaning(reader,e)
return reader
def ratiosplit(reader,ratio):
count=0
ratioreader=[]
oldreader=[]
for row in reader:
count+=1
newrow=row.copy()
if count % ratio==0:
ratioreader.append(newrow)
else:
oldreader.append(newrow)
return [oldreader,ratioreader]
#SECTION TO SETUP FOR YOUR OWN DATA - ONE # = CODE LINE MULTIPLE ###### NOTES
#####DECLARE TIME
##NOW=datetime.datetime.now()
#print(datetime.datetime.now())
##### ADD NAME OF FIELD TO CONVERTS TO CONVERT SOMETHING TO STRING
#converts=[]
##### EDUCATIONAL WILL SPLIT A COMMENTS FIELD
#edX=[]
##### KILL WILL DROP A FIELD
#kill=[]
##### x IS required as a raw string to indicate the string of the filepath where the CSV you want it to use exists
#x=r''
##### below line creates a list containing ordered dicts from a CSV that represents the
#r=readmaker(x=x,kill=kill,educational=edX,ConverTOstrings=converts)
##### splits data, assumes a ratio of 5 learn to 1 test change to taste, relies on data output being sorted. Hint i suggest sort on key
#r=ratiosplit(r,5)
#r2=r[1]
#r=r[0]
##### relies on knowing the key for the CSV need to
#lockpick='KEY FOR WORK NEEDS  before string of key'
#update='KEY FOR WORK DOES NOT NEED NEEDS  before string of key - RENAMES KEY AND REMOVES  FOR FINAL OUTPUT'
##### START AND END
#START='FIELD NAME OF START DATE'
#END='FIELD NAME OF END DATE'
#ACCURACY=NUMBER OF DAYS YOU FIND AN ACCEPTABLE "CORRECT FORECAST"
#csvloci=
#csvloci=SUGGESTED: r'C:\CSVs\\' FILE LOCATION TO OUTPUT DATA AND BBUILD DATABASE AS LONG AS POINTED AT SAME LOCATION AT TIME WILL REUSE SAME AI
##### THE CODE THAT BUILDS MINDS DONT CHANGE UUNLESS READ THE FULL CODE
#for i in range(100):
# countalot+=1
# myhive=hivemind(r,lockpick,START,END,update,start=1,accuracy=ACCURACY,csvloci=csvloci)
# myhive.multitest(r2,str("Random Test "+str(datetime.datetime.now())+" (1 in 5 chosen to test) Epoch: " + str(countalot)))
#print((datetime.datetime.now()-NOW))
``` |
{
"source": "joeyjojo-zz/django_offline",
"score": 2
} |
#### File: src/django_offline/networkaccessmanager.py
```python
__author__ = 'jond'
import urllib
import urlparse
import time
from PyQt4 import QtNetwork, QtCore
import django.test
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.http import HttpRequest, SimpleCookie
import django.test.client
from django.core.handlers.wsgi import WSGIRequest
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.utils.importlib import import_module
import django_offline.handlers
class NetworkAccessManager(QtNetwork.QNetworkAccessManager):
"""
Our network access manager that is used instead of the default
This allows us to jump in and interrupt the calls to usually external
services
"""
def createRequest(self, operation, request, data):
"""
Deal with the request when it comes in
"""
t = time.time()
print 'request in', t
argd = {}
reply = None
fullheader = ''
if request.url().host() == '127.0.0.1':
# retreive the post data
if data is not None:
dataread = data.readAll()
postargs = str(dataread) # interesting that we don't unicode it here, but this seems to work
contenttypeheader = str(request.header(QtNetwork.QNetworkRequest.ContentTypeHeader).toString()).split(';')[0]
if contenttypeheader == 'multipart/form-data':
argd = postargs
fullheader = str(request.header(QtNetwork.QNetworkRequest.ContentTypeHeader).toString())
elif contenttypeheader == 'application/x-www-form-urlencoded':
argd = postargs
fullheader = str(request.header(QtNetwork.QNetworkRequest.ContentTypeHeader).toString())
else:
argd = urlparse.parse_qs(urllib.unquote_plus(postargs.encode('ascii')).decode('utf-8'),
keep_blank_values=True)
# get a handle on the application
urlstring = unicode(request.url().toString())
handler = StaticFilesHandler(django.test.client.ClientHandler)
handler.load_middleware()
django_request = None
rqconv = ConvertedRequest(self.cookieJar())
# doesn't matter because sqlite is unencrypted anyway!
# currently used because django requires a username and password
rqconv.login(username='default', password='<PASSWORD>')
if operation == QtNetwork.QNetworkAccessManager.PostOperation:
if argd == {}:
# handle empty post data
argd = ''
django_request = rqconv.post(urlstring, argd, content_type=fullheader)
elif operation == QtNetwork.QNetworkAccessManager.GetOperation:
django_request = rqconv.get(urlstring, argd)
response = handler.get_response(django_request)
reply = django_offline.handlers.FakeReply(self, request, operation, response)
if reply is None:
reply = QtNetwork.QNetworkAccessManager.createRequest(self, operation, request, data)
reply.ignoreSslErrors()
to = time.time()
print 'request out', to, 'taken', to-t
return reply
class ConvertedRequest(object):
"""
Class that takes a QNetworkRequest and converts it to a
request type django understands
"""
def __init__(self, cookiejar):
d={}
for cookie in cookiejar.cookiesForUrl(QtCore.QUrl('http://127.0.0.1')):
d[str(cookie.name())] = str(cookie.value())
self.cookies = SimpleCookie(d)
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '), # these need retrieving from the cookiejar
'PATH_INFO': '/',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1,0),
'wsgi.url_scheme': 'http',
'wsgi.input': django.test.client.FakePayload(''),
'wsgi.errors': '',#self.errors, # these need retreiving properly
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
#environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def get(self, path, data={}, **extra):
"Construct a GET request"
parsed = urlparse.urlparse(path)
r = {
'CONTENT_TYPE': 'text/html; charset=utf-8',
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': django.utils.http.urlencode(data, doseq=True) or parsed[4],
'REQUEST_METHOD': 'GET',
}
r.update(extra)
return self.request(**r)
def post(self, path, data='', content_type=None,
**extra):
"Construct a POST request."
if content_type is None:
raise Exception()
post_data = data
parsed = urlparse.urlparse(path)
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': content_type,
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': parsed[4],
'REQUEST_METHOD': 'POST',
'wsgi.input': django.test.client.FakePayload(post_data),
'_body': post_data
}
r.update(extra)
return self.request(**r)
def _get_path(self, parsed):
# If there are parameters, add them
if parsed[3]:
return urllib.unquote(parsed[2] + ";" + parsed[3])
else:
return urllib.unquote(parsed[2])
def _encode_data(self, data, content_type, ):
if content_type is django.test.client.MULTIPART_CONTENT:
dataencode = django.test.client.encode_multipart(django.test.client.BOUNDARY, data)
return dataencode
else:
# Encode the content so that the byte representation is correct.
match = django.test.client.CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return django.test.client.smart_str(data, encoding=charset)
def login(self, **credentials):
"""
Sets the Factory to appear as if it has successfully logged into a site.
Returns True if login is possible; False if the provided credentials
are incorrect, or the user is inactive, or if the sessions framework is
not available.
"""
self.session = None
user = authenticate(**credentials)
if user and user.is_active\
and 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
return True
else:
return False
```
#### File: django_offline/src/main.py
```python
import sys
__author__ = 'jond'
def main():
import django_offline
import mysite.settings
sys.exit(django_offline.run(mysite.settings.MAIN_URL))
if __name__ == '__main__':
main()
```
#### File: src/polls/views.py
```python
from polls.models import Poll
from django.http import HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
def index(request):
latest_poll_list = Poll.objects.all().order_by('-pub_date')[:5]
return render_to_response('polls/index.html', {'latest_poll_list': latest_poll_list})
def detail(request, poll_id):
p = get_object_or_404(Poll, pk=poll_id)
return render_to_response('polls/detail.html', {'poll': p})
def results(request, poll_id):
return HttpResponse("You're looking at the results of poll %s." % poll_id)
def vote(request, poll_id):
return HttpResponse("You're voting on poll %s." % poll_id)
``` |
{
"source": "joeyjurjens/django-oscar",
"score": 2
} |
#### File: integration/basket/test_views.py
```python
from django.contrib.messages import get_messages
from django.test import TestCase
from django.urls import reverse
from oscar.apps.basket import views
from oscar.core.loading import get_model
from oscar.test import factories
from oscar.test.factories import (
AttributeOptionFactory, AttributeOptionGroupFactory, OptionFactory)
from oscar.test.testcases import WebTestCase
from tests.fixtures import RequestFactory
from tests.functional.checkout import CheckoutMixin
Option = get_model("catalogue", "Option")
class TestVoucherAddView(TestCase):
def test_get(self):
request = RequestFactory().get('/')
view = views.VoucherAddView.as_view()
response = view(request)
self.assertEqual(response.status_code, 302)
def _get_voucher_message(self, request):
return '\n'.join(str(m.message) for m in get_messages(request))
def test_post_valid(self):
voucher = factories.VoucherFactory()
self.assertTrue(voucher.is_active())
data = {
'code': voucher.code
}
request = RequestFactory().post('/', data=data)
request.basket.save()
view = views.VoucherAddView.as_view()
response = view(request)
self.assertEqual(response.status_code, 302)
voucher = voucher.__class__.objects.get(pk=voucher.pk)
self.assertEqual(voucher.num_basket_additions, 1, msg=self._get_voucher_message(request))
def test_post_valid_from_set(self):
voucherset = factories.VoucherSetFactory()
voucher = voucherset.vouchers.first()
self.assertTrue(voucher.is_active())
data = {
'code': voucher.code
}
request = RequestFactory().post('/', data=data)
request.basket.save()
view = views.VoucherAddView.as_view()
response = view(request)
self.assertEqual(response.status_code, 302)
voucher = voucher.__class__.objects.get(pk=voucher.pk)
self.assertEqual(voucher.num_basket_additions, 1, msg=self._get_voucher_message(request))
self.assertEqual(voucherset.num_basket_additions, 1)
class TestVoucherRemoveView(TestCase):
def test_post_valid(self):
voucher = factories.VoucherFactory(num_basket_additions=5)
data = {
'code': voucher.code
}
request = RequestFactory().post('/', data=data)
request.basket.save()
request.basket.vouchers.add(voucher)
view = views.VoucherRemoveView.as_view()
response = view(request, pk=voucher.pk)
self.assertEqual(response.status_code, 302)
voucher = voucher.__class__.objects.get(pk=voucher.pk)
self.assertEqual(voucher.num_basket_additions, 4)
def test_post_with_missing_voucher(self):
""" If the voucher is missing, verify the view queues a message and redirects. """
pk = '12345'
view = views.VoucherRemoveView.as_view()
request = RequestFactory().post('/')
request.basket.save()
response = view(request, pk=pk)
self.assertEqual(response.status_code, 302)
actual = list(get_messages(request))[-1].message
expected = "No voucher found with id '{}'".format(pk)
self.assertEqual(actual, expected)
class TestBasketSummaryView(TestCase):
def setUp(self):
self.url = reverse('basket:summary')
self.country = factories.CountryFactory()
self.user = factories.UserFactory()
def test_default_shipping_address(self):
user_address = factories.UserAddressFactory(
country=self.country, user=self.user, is_default_for_shipping=True
)
request = RequestFactory().get(self.url, user=self.user)
view = views.BasketView(request=request)
self.assertEqual(view.get_default_shipping_address(), user_address)
def test_default_shipping_address_for_anonymous_user(self):
request = RequestFactory().get(self.url)
view = views.BasketView(request=request)
self.assertIsNone(view.get_default_shipping_address())
class TestVoucherViews(CheckoutMixin, WebTestCase):
csrf_checks = False
def setUp(self):
self.voucher = factories.create_voucher()
super().setUp()
def test_add_voucher(self):
"""
Checks that voucher can be added to basket through appropriate view.
"""
self.add_product_to_basket()
assert self.voucher.basket_set.count() == 0
response = self.post(reverse('basket:vouchers-add'), params={'code': self.voucher.code})
self.assertRedirectsTo(response, 'basket:summary')
assert self.voucher.basket_set.count() == 1
def test_remove_voucher(self):
"""
Checks that voucher can be removed from basket through appropriate view.
"""
self.add_product_to_basket()
self.add_voucher_to_basket(voucher=self.voucher)
assert self.voucher.basket_set.count() == 1
response = self.post(reverse('basket:vouchers-remove', kwargs={'pk': self.voucher.id}))
self.assertRedirectsTo(response, 'basket:summary')
assert self.voucher.basket_set.count() == 0
class TestOptionAddToBasketView(TestCase):
def setUp(self):
super().setUp()
def setup_options(self, required):
self.product = factories.create_product(num_in_stock=1)
group = AttributeOptionGroupFactory(name="minte")
AttributeOptionFactory(option="henk", group=group)
AttributeOptionFactory(option="klaas", group=group)
self.select = OptionFactory(required=required, code=Option.SELECT, type=Option.SELECT, option_group=group)
self.radio = OptionFactory(required=required, code=Option.RADIO, type=Option.RADIO, option_group=group)
self.multi_select = OptionFactory(
required=required, code=Option.MULTI_SELECT, type=Option.MULTI_SELECT, option_group=group)
self.checkbox = OptionFactory(
required=required, code=Option.CHECKBOX, type=Option.CHECKBOX, option_group=group)
self.product.product_class.options.add(self.select)
self.product.product_class.options.add(self.radio)
self.product.product_class.options.add(self.multi_select)
self.product.product_class.options.add(self.checkbox)
def test_option_visible_required(self):
self.setup_options(True)
url = reverse('catalogue:detail', args=(self.product.slug, self.product.pk))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, Option.SELECT)
self.assertContains(response, Option.RADIO)
self.assertContains(response, Option.MULTI_SELECT)
self.assertContains(response, Option.CHECKBOX)
def test_option_visible_not_required(self):
self.setup_options(False)
url = reverse('catalogue:detail', args=(self.product.slug, self.product.pk))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, Option.SELECT)
self.assertContains(response, Option.RADIO)
self.assertContains(response, Option.MULTI_SELECT)
self.assertContains(response, Option.CHECKBOX)
def test_add_to_basket_with_options_required(self):
self.setup_options(True)
url = reverse('basket:add', kwargs={'pk': self.product.pk})
post_params = {
'product_id': self.product.id,
Option.SELECT: 'klaas',
Option.RADIO: 'henk',
Option.MULTI_SELECT: ['henk', 'klaas'],
Option.CHECKBOX: ['henk'],
'action': 'add',
'quantity': 1
}
response = self.client.post(url, post_params, follow=True)
basket = response.context["basket"]
self.assertEqual(basket.all_lines().count(), 1, "One line should have been added to the basket")
line, = basket.all_lines()
self.assertEqual(line.attributes.count(), 4, "One lineattribute shoould have been added to the basket")
checkbox, multi_select, radio, select = line.attributes.order_by("option__code")
self.assertEqual(checkbox.value, ["henk"], "The lineattribute should be saved as json")
self.assertEqual(
checkbox.option, self.checkbox,
"The lineattribute's option should be the option created by the factory"
)
self.assertListEqual(multi_select.value, ["henk", "klaas"], "The lineattribute should be saved as json")
self.assertEqual(
multi_select.option, self.multi_select,
"The lineattribute's option should be the option created by the factory"
)
self.assertEqual(radio.value, "henk", "The lineattribute should be saved as json")
self.assertEqual(
radio.option, self.radio,
"The lineattribute's option should be the option created by the factory"
)
self.assertEqual(select.value, "klaas", "The lineattribute should be saved as json")
self.assertEqual(
select.option, self.select,
"The lineattribute's option should be the option created by the factory"
)
def test_add_to_basket_with_options_not_required(self):
self.setup_options(False)
url = reverse('basket:add', kwargs={'pk': self.product.pk})
post_params = {
'product_id': self.product.id,
Option.SELECT: 'klaas',
Option.RADIO: 'henk',
Option.MULTI_SELECT: [],
Option.CHECKBOX: ['henk'],
'action': 'add',
'quantity': 1
}
response = self.client.post(url, post_params, follow=True)
basket = response.context["basket"]
self.assertEqual(basket.all_lines().count(), 1, "One line should have been added to the basket")
line, = basket.all_lines()
self.assertEqual(line.attributes.count(), 3, "One lineattribute shoould have been added to the basket")
checkbox, radio, select = line.attributes.order_by("option__code")
self.assertEqual(checkbox.value, ["henk"], "The lineattribute should be saved as json")
self.assertEqual(
checkbox.option, self.checkbox,
"The lineattribute's option should be the option created by the factory"
)
self.assertEqual(radio.value, "henk", "The lineattribute should be saved as json")
self.assertEqual(
radio.option, self.radio,
"The lineattribute's option should be the option created by the factory"
)
self.assertEqual(select.value, "klaas", "The lineattribute should be saved as json")
self.assertEqual(
select.option, self.select,
"The lineattribute's option should be the option created by the factory"
)
``` |
{
"source": "joeyjy/ayi-django",
"score": 2
} |
#### File: ms/booking/views.py
```python
import os
import datetime
import time
import md5
from pytz import timezone
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import get_current_site
from django.db.models import Q
from django.db.transaction import commit_on_success
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from alipay import Alipay
from accounts.email import create_sms, create_mail_book_confirm, create_mail_book_cancel, internal_book_confirm, internal_book_cancel
from accounts.models import Compound
from accounts.utils import JSONResponse, DefaultDate
from .forms import BookingForm
from .models import Booking
@login_required(login_url='/accounts/signin/')
@csrf_exempt
def pay(request):
if request.session.get('pay_booking_id', False):
item = Booking.objects.get(id=request.session['pay_booking_id'])
fee = item.hour*35
per_hour = 35
if item.book_type == 1:
fee = item.hour*50
per_hour = 50
else:
return HttpResponse('Pay without booking anything? :)')
return render_to_response('booking/payment.html',
RequestContext(request, locals()))
@login_required(login_url='/accounts/signin/')
@csrf_exempt
def clean_needs(request):
if not request.session.get('booking_time'):
return HttpResponseRedirect(reverse('home_index'))
if request.method == 'POST':
post_data = request.POST.copy()
form = BookingForm(post_data)
form.booker = request.user
if form.is_valid():
obj = form.save(commit=False)
obj.booker = request.user
obj.status = 4
obj.hour = post_data.get('hour')
obj.book_type = request.session.get('booking_type', '')
obj.clean_time = request.session.get('booking_time', '')
obj.save()
request.session['pay_booking_id'] = obj.id
return HttpResponseRedirect(reverse('pay'))
else:
print form.errors
print request.user
else:
form =BookingForm()
if request.session.get('lang') == 'cn':
return render_to_response('cn/booking/needs.html',
RequestContext(request, locals()))
return render_to_response('booking/needs.html',
RequestContext(request, locals()))
@login_required(login_url='/accounts/signin/')
@csrf_exempt
def history(request, username):
if request.user.username != username and not request.user.has_perm('booking.change_booking'):
raise PermissionDenied
booking_list = Booking.objects.filter(Q(booker__username=username), Q(status=1) | Q(status=4) | Q(status=5))
booking_pass = Booking.objects.filter(booker__username=username).exclude(Q(status=1) | Q(status=4) | Q(status=5))
if booking_list:
customer = booking_list[0].booker.username
elif booking_pass:
customer = booking_pass[0].booker.username
else:
customer = request.user.username
default_date = DefaultDate()
return render_to_response('booking/history.html',
RequestContext(request, locals()))
@login_required(login_url='/accounts/signin/')
@csrf_exempt
def billing(request, username):
if request.user.username != username and not request.user.has_perm('booking.change_booking'):
raise PermissionDenied
booking_list = Booking.objects.filter(booker__username=username)
if booking_list:
customer = booking_list[0].booker.username
else:
customer = request.user.username
default_date = DefaultDate()
return render_to_response('booking/billing.html',
RequestContext(request, locals()))
@login_required(login_url='/accounts/signin/')
@csrf_exempt
def booking_cancel(request, username, pk):
if request.user.username != username and not request.user.has_perm('booking.change_booking'):
raise PermissionDenied
item = Booking.objects.get(id=pk)
item.status = 3
create_sms(item.booker, item, cancel=True)
create_mail_book_cancel(item.booker, item)
internal_book_cancel(item.booker, item)
item.save()
return HttpResponseRedirect(reverse('booking_history',args=[username]))
def compound_info(request, pk):
compound = Compound.objects.get(id=pk)
result = {'address':compound.street_address,
'name':compound.street_name,
'cross':compound.cross_street,
'area':compound.district,}
return JSONResponse(result)
@login_required(login_url='/accounts/signin/')
@csrf_exempt
def handle_confirm(request):
if request.method == 'POST' and request.is_ajax():
post_data = request.POST.copy()
print post_data
if request.session.get('pay_booking_id', False):
item = Booking.objects.get(id=request.session['pay_booking_id'])
item.status = 1
item.pay_method = 1
item.save()
create_sms(item.booker, item)
create_mail_book_confirm(item.booker, item)
internal_book_confirm(item.booker, item)
return HttpResponse("Y")
raise PermissionDenied
@commit_on_success
def payment(request, id):
item = get_object_or_404(Booking, id=id)
fee = item.hour*35
fee_uni = item.hour*3500
if item.book_type == 1:
fee = item.hour*50
fee_uni = item.hour*5000
url = lambda path: "".join(["http://", get_current_site(request).domain, path])
if request.method == "POST":
data = request.POST.copy()
if data.get('pay_method', False) == '3':
return_url=url(reverse('userena_profile_edit',args=[item.booker.username]))
notify_url="http://www.merryservices.com/unipay_notify_endpoint/%s/" % (item.id)
date = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
msg = 'pickupUrl=%s&receiveUrl=%s&version=v1.0&signType=0&merchantId=109000914110001&payerName=%s&payerEmail=%s&orderNo=NO%s&orderAmount=%s&orderDatetime=%s&productName=merryservices&payType=0&key=1234567890' % (notify_url, return_url, item.booker.username, item.booker.email, date, str(fee_uni)[:-2], date)
m = md5.new()
m.update(msg)
para = m.hexdigest().upper()
url = 'http://172.16.31.10:443/gateway/index.do?' + msg + '&signMsg=' + para
return render_to_response("booking/unipay.html", {"url": url})
if data.get('pay_method', False) == '4':
alipay = Alipay(pid=settings.ALIPAY_PID, key=settings.ALIPAY_KEY, seller_email=settings.ALIPAY_SELLER_EMAIL)
url = alipay.create_direct_pay_by_user_url(
out_trade_no=item.id,
subject='Merry clean service',
total_fee=fee,
return_url=url(reverse('userena_profile_edit',args=[item.booker.username])),
notify_url=url("/alipay_notify_endpoint/"))
return render_to_response("booking/alipay.html", {"url": url})
return HttpResponseBadRequest()
@csrf_exempt
def alipay_notify(request):
""" Notify callback for alipay """
alipay = Alipay(pid=settings.ALIPAY_PID, key=settings.ALIPAY_KEY, seller_email=settings.ALIPAY_SELLER_EMAIL)
if alipay.verify_notify(**dict(request.POST.items())):
if request.POST.get("trade_status") in ["WAIT_SELLER_SEND_GOODS", "TRADE_SUCCESS"]:
order = Booking.objects.get(id=request.POST.get("out_trade_no"))
order.status = 5
order.pay_method = 2
order.save()
create_sms(order.booker, order)
create_mail_book_confirm(order.booker, order)
internal_book_confirm(order.booker, order)
return HttpResponse('success')
else:
print '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'
return HttpResponse('failure')
@csrf_exempt
def unipay_notify(request, id):
""" Notify callback for unipay """
order = get_object_or_404(Booking, id=id)
if request.method == 'POST':
post_data = request.POST.copy()
print post_data
if post_data.get("payResult") == '1':
order.status = 5
order.pay_method = 3
order.save()
create_sms(order.booker, order)
create_mail_book_confirm(order.booker, order)
internal_book_confirm(order.booker, order)
success_url = '/accounts/%s/edit/?orderNo=%s&orderAmount=%s&payResult=1&signMsg=%s' % (order.booker.username, post_data.get('orderNo'), post_data.get('orderAmount'), post_data.get('signMsg'))
return HttpResponseRedirect(success_url)
return HttpResponse('failure')
``` |
{
"source": "joey-kilgore/playground",
"score": 3
} |
#### File: playground/PythonUnitTest/calc.py
```python
def add(a, b):
return a+b
def subtract(a, b):
return a-b
def multiply(a, b):
# This is purposely written wrong to see what a failed
# test looks like
return a*a
def divide(a, b):
return a/b
```
#### File: joey-kilgore/playground/scrapeLinksCopy.py
```python
from bs4 import BeautifulSoup
import requests
import time
import os, sys
from time import gmtime, strftime
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
# Print New Line on Complete
if iteration == total:
print()
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
my_file_path = os.path.join(THIS_FOLDER, 'webScrapeOutput '+strftime("%Y_%m_%d %H_%M_%S", gmtime())+ '.txt')
my_file = open(my_file_path, 'w')
url = 'https://www.ncbi.nlm.nih.gov/pubmed/?term=unmyelinated+model'
r = requests.get(url)
data = r.text
soup = BeautifulSoup(data)
otherPages = []
totalArticles = 0
articles = []
for div in soup.find_all('div', class_='rslt'):
for subDiv in div.find_all('p'):
for link in subDiv.find_all('a'):
#print(link.get('href'))
if 'uid' in link.get('href'):
otherPages.append('https://www.ncbi.nlm.nih.gov' + link.get('href'))
else:
totalArticles+=1
articles.append('https://www.ncbi.nlm.nih.gov' + link.get('href'))
for url in otherPages:
print(url)
gettingPage = True
while gettingPage:
try:
r = requests.get(url)
gettingPage = False
except:
print("Connection refused by the server..")
print("Let me sleep for 5 seconds")
print("ZZzzzz...")
time.sleep(5)
print("Was a nice sleep, now let me continue...")
continue
data = r.text
soup = BeautifulSoup(data)
for div in soup.find_all('div', class_='rslt'):
for subDiv in div.find_all('p'):
for link in subDiv.find_all('a'):
if not 'uid' in link.get('href') and not ('https://www.ncbi.nlm.nih.gov' + link.get('href')) in articles:
totalArticles+=1
articles.append('https://www.ncbi.nlm.nih.gov' + link.get('href'))
progress = 0
for link in articles:
my_file.write(link+'\n')
gettingPage = True
while gettingPage:
try:
r = requests.get(link)
gettingPage = False
except:
print("Connection refused by the server..")
print("Let me sleep for 5 seconds")
print("ZZzzzz...")
time.sleep(5)
print("Was a nice sleep, now let me continue...")
continue
data = r.text
soup = BeautifulSoup(data)
for div in soup.find_all('div', class_='rprt abstract'):
for h1 in div.find_all('h1'):
try:
my_file.write(str(h1.getText())+'\n')
except:
my_file.write('unknownn character/title\n')
my_file.write('\n')
progress+=1
printProgressBar(progress, len(articles), prefix = 'Progress:', suffix = 'Complete', length = 50)
my_file.close()
print('TOTAL ARTICLES: '+str(totalArticles))
``` |
{
"source": "joeykilpatrick/cdk-amazon-chime-resources",
"score": 2
} |
#### File: cdk-amazon-chime-resources/resources/voiceconnectors.py
```python
import boto3
import logging
import os
logger = logging.getLogger()
try:
log_level = os.environ["LogLevel"]
if log_level not in ["INFO", "DEBUG"]:
log_level = "INFO"
except:
log_level = "INFO"
logger.setLevel(log_level)
chime = boto3.client("chime")
ssm = boto3.client("ssm")
def buildVoiceConnector(uid, region, name, encryption):
logger.info("Creating a new Voice Connector")
if encryption == "false":
require_encryption = False
else:
require_encryption = True
logger.info(f"Name: {name}")
logger.info(f"AwsRegion: {region}")
logger.info(f"RequireEncrpytion: {require_encryption}")
try:
voice_connector_id = chime.create_voice_connector(
Name=name, AwsRegion=region, RequireEncryption=require_encryption
)["VoiceConnector"]["VoiceConnectorId"]
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
logger.error(error)
raise RuntimeError(error)
try:
ssm.put_parameter(
Name="/chime/voiceConnector/" + uid,
Description="Voice Connector Ordered",
Overwrite=True,
Value=voice_connector_id,
Type="String",
)
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
logger.error(error)
raise RuntimeError(error)
return voice_connector_id
def buildStreaming(voice_connector_id, notificationTargets, dataRetention):
streaming_notification_targets = []
for notification_target in notificationTargets:
streaming_notification_targets.append({"NotificationTarget": notification_target})
logger.info(f"Streaming Notification Targets: {streaming_notification_targets}")
try:
chime.put_voice_connector_streaming_configuration(
VoiceConnectorId=voice_connector_id,
StreamingConfiguration={
"DataRetentionInHours": int(dataRetention),
"Disabled": False,
"StreamingNotificationTargets": streaming_notification_targets,
},
)
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
logger.error(error)
raise RuntimeError(error)
return True
def buildTermination(voice_connector_id, callingRegions, terminationCidrs):
try:
chime.put_voice_connector_termination(
VoiceConnectorId=voice_connector_id,
Termination={
"CallingRegions": callingRegions,
"CidrAllowedList": terminationCidrs,
"Disabled": False,
},
)
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
logger.error(error)
raise RuntimeError(error)
return True
def buildOrigination(voice_connector_id, origination):
routes = []
for route in origination:
transformed_route = {}
for key in route:
if key == "host" or key == "protocol":
transformed_route[key.capitalize()] = route[key]
else:
transformed_route[key.capitalize()] = int(route[key])
routes.append(transformed_route)
logger.info(f"Routes: {routes}")
try:
chime.put_voice_connector_origination(
VoiceConnectorId=voice_connector_id,
Origination={
"Routes": routes,
"Disabled": False,
},
)
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
logger.error(error)
raise RuntimeError(error)
def createVoiceConnector(
uid,
region=None,
name=None,
encryption=None,
termination=None,
origination=None,
streaming=None,
**kwargs,
):
voice_connector_id = buildVoiceConnector(uid, region, name, encryption)
logger.info(f"Voice Connector Id: {voice_connector_id}")
if streaming:
logger.info(f"Streaming: {streaming}")
buildStreaming(voice_connector_id, streaming["notificationTargets"], streaming["dataRetention"])
if termination:
logger.info(f"Termination CIDRs: {termination}")
buildTermination(voice_connector_id, termination["callingRegions"], termination["terminationCidrs"])
if origination:
logger.info(f"Origination IPs: {origination}")
buildOrigination(voice_connector_id, origination)
return voice_connector_id
def deleteVoiceConnecytor(uid):
logger.info(f"Deleting Voice Connector: {uid}")
try:
voice_connector_to_delete = ssm.get_parameter(Name="/chime/voiceConnector/" + str(uid))["Parameter"]["Value"]
logger.info(f"Voice Connector to Delete: {voice_connector_to_delete}")
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
logger.error(error)
raise RuntimeError(error)
try:
associated_phone_numbers = chime.list_phone_numbers(
FilterName="VoiceConnectorId", FilterValue=voice_connector_to_delete
)
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
logger.error(error)
raise RuntimeError(error)
logger.info(f"Associated Phone Numbers: {associated_phone_numbers}")
if associated_phone_numbers["PhoneNumbers"]:
phone_numbers_to_disassociate = []
for phone_number in associated_phone_numbers["PhoneNumbers"]:
phone_numbers_to_disassociate.append(phone_number["PhoneNumberId"])
logger.info(f"Phone Numbers to Disassociate: {phone_numbers_to_disassociate}")
try:
chime.disassociate_phone_numbers_from_voice_connector(
VoiceConnectorId=voice_connector_to_delete, E164PhoneNumbers=phone_numbers_to_disassociate
)
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
logger.error(error)
raise RuntimeError(error)
try:
chime.delete_voice_connector(VoiceConnectorId=voice_connector_to_delete)
except Exception as e:
error = {"error": f"Exception thrown: {e}"}
logger.error(error)
raise RuntimeError(error)
return None
``` |
{
"source": "joeykrug/augur",
"score": 3
} |
#### File: augur-core/tests/test_cashSender_usage.py
```python
import os
def test_cashSender_usage():
for root, dirs, files in os.walk('source/contracts'):
for file in files:
if (file == "CashSender.sol"):
continue
if (not file.endswith("sol")):
continue
with open(os.path.join(root, file), "r") as auto:
data = auto.read()
expectedOccurences = 0
if (file == "Market.sol"):
expectedOccurences = 1
notFound = data.count("cash.transfer") == expectedOccurences
assert notFound, "Contract %s has an unexpected cash.transfer in it" % file
```
#### File: tests/trading/test_affiliates.py
```python
from eth_tester.exceptions import TransactionFailed
from pytest import raises, fixture, mark
from utils import fix, AssertLog, EtherDelta, TokenDelta, BuyWithCash, nullAddress, longTo32Bytes
from constants import YES, NO
from old_eth_utils import ecsign, sha3, normalize_key, int_to_32bytearray, bytearray_to_bytestr, zpad
def test_fingerprint(kitchenSinkFixture, universe, cash, market):
affiliates = kitchenSinkFixture.contracts['Affiliates']
affiliateValidator = kitchenSinkFixture.contracts['AffiliateValidator']
shareToken = kitchenSinkFixture.contracts['ShareToken']
accountFingerprint = longTo32Bytes(11)
affiliateFingerprint = longTo32Bytes(12)
account = kitchenSinkFixture.accounts[0]
affiliate = kitchenSinkFixture.accounts[1]
affiliates.setFingerprint(accountFingerprint, sender=account)
affiliates.setFingerprint(affiliateFingerprint, sender=affiliate)
affiliates.setReferrer(affiliate)
# Confirm affiliate fees begin at 0 for the referrer
assert market.affiliateFeesAttoCash(affiliate) == 0
numSets = 10
cost = numSets * market.getNumTicks()
cash.faucet(cost)
shareToken.buyCompleteSets(market.address, account, numSets)
shareToken.sellCompleteSets(market.address, account, account, numSets, accountFingerprint)
expectedAffiliateFees = cost * .0025
expectedAffiliateFees *= .8
assert market.affiliateFeesAttoCash(affiliate) == expectedAffiliateFees
# If we pass the affiliate fingerprint we will see that the affiliate fees do not apply and will remain what they were before complete set sale
cash.faucet(cost)
shareToken.buyCompleteSets(market.address, account, numSets)
shareToken.sellCompleteSets(market.address, account, account, numSets, affiliateFingerprint)
assert market.affiliateFeesAttoCash(affiliate) == expectedAffiliateFees
def test_affiliate_validator(kitchenSinkFixture, universe, cash):
affiliates = kitchenSinkFixture.contracts['Affiliates']
affiliateValidator = kitchenSinkFixture.contracts['AffiliateValidator']
shareToken = kitchenSinkFixture.contracts['ShareToken']
market = kitchenSinkFixture.createReasonableYesNoMarket(universe, affiliateValidator = affiliateValidator.address)
accountFingerprint = longTo32Bytes(11)
affiliateFingerprint = longTo32Bytes(12)
account = kitchenSinkFixture.accounts[0]
affiliate = kitchenSinkFixture.accounts[1]
affiliateValidatorOperator = kitchenSinkFixture.accounts[5]
affiliateValidatorOperatorPrivKey = kitchenSinkFixture.privateKeys[5]
affiliates.setFingerprint(accountFingerprint, sender=account)
affiliates.setFingerprint(affiliateFingerprint, sender=affiliate)
affiliates.setReferrer(affiliate)
accountKey = longTo32Bytes(21)
salt = 0
accountHash = affiliateValidator.getKeyHash(accountKey, salt)
# A bad signature will be rejected
with raises(TransactionFailed):
affiliateValidator.addKey(accountKey, salt, longTo32Bytes(0), longTo32Bytes(0), 8, sender=account)
# This includes being signed by a non operator. So the same sig will fail initially but work once the signer is approved as an operator
r, s, v = signHash(accountHash, affiliateValidatorOperatorPrivKey)
with raises(TransactionFailed):
affiliateValidator.addKey(accountKey, salt, r, s, v, sender=account)
# Succesfully add the key for the trader account
affiliateValidator.addOperator(affiliateValidatorOperator)
affiliateValidator.addKey(accountKey, salt, r, s, v, sender=account)
# Re-using a salt will not work
with raises(TransactionFailed):
affiliateValidator.addKey(accountKey, salt, r, s, v, sender=account)
affiliateKey = longTo32Bytes(22)
salt += 1
affiliateHash = affiliateValidator.getKeyHash(affiliateKey, salt)
r, s, v = signHash(affiliateHash, affiliateValidatorOperatorPrivKey)
affiliateValidator.addKey(affiliateKey, salt, r, s, v, sender=affiliate)
# Confirm affiliate fees begin at 0 for the referrer
assert market.affiliateFeesAttoCash(affiliate) == 0
numSets = 10
cost = numSets * market.getNumTicks()
cash.faucet(cost)
shareToken.buyCompleteSets(market.address, account, numSets)
shareToken.sellCompleteSets(market.address, account, account, numSets, accountFingerprint)
expectedAffiliateFees = cost * .0025
expectedAffiliateFees *= .8
assert market.affiliateFeesAttoCash(affiliate) == expectedAffiliateFees
# If we try to use an account that has registered an affiliate key which is the same as the referrer the affiliate fees do not apply and will remain what they were before complete set sale
dupeAccount = kitchenSinkFixture.accounts[2]
affiliates.setReferrer(affiliate, sender=dupeAccount)
salt += 1
affiliateHash = affiliateValidator.getKeyHash(affiliateKey, salt)
r, s, v = signHash(affiliateHash, affiliateValidatorOperatorPrivKey)
affiliateValidator.addKey(affiliateKey, salt, r, s, v, sender=dupeAccount)
cash.faucet(cost, sender=dupeAccount)
shareToken.buyCompleteSets(market.address, dupeAccount, numSets, sender=dupeAccount)
shareToken.sellCompleteSets(market.address, dupeAccount, dupeAccount, numSets, accountFingerprint, sender=dupeAccount)
assert market.affiliateFeesAttoCash(affiliate) == expectedAffiliateFees
# It will also not work if the account or the referrer does not have a key registered with the validator
noKeyAccount = kitchenSinkFixture.accounts[3]
affiliates.setReferrer(affiliate, sender=noKeyAccount)
cash.faucet(cost, sender=noKeyAccount)
shareToken.buyCompleteSets(market.address, noKeyAccount, numSets, sender=noKeyAccount)
shareToken.sellCompleteSets(market.address, noKeyAccount, noKeyAccount, numSets, accountFingerprint, sender=noKeyAccount)
assert market.affiliateFeesAttoCash(affiliate) == expectedAffiliateFees
def signHash(hash, private_key):
key = normalize_key(private_key.to_hex())
v, r, s = ecsign(sha3("\x19Ethereum Signed Message:\n32".encode('utf-8') + hash), key)
return zpad(bytearray_to_bytestr(int_to_32bytearray(r)), 32), zpad(bytearray_to_bytestr(int_to_32bytearray(s)), 32), v
``` |
{
"source": "JoeyL6/Assorted_projects",
"score": 4
} |
#### File: Assorted_projects/MD1_Queue_Modeling/mytypes.py
```python
class Queue(object):
def __init__(self, max_capacity = None):
'''
Initialize a queue.
If max_capacity is None, then the queue
has no maximum capacity.
'''
self.__queue = []
self.__max_capacity = max_capacity
@property
def max_capacity(self):
''' Getter for max_capacity '''
return self.__max_capacity
@property
def length(self):
'''
Returns the length of the queue
'''
return len(self.__queue)
@property
def front(self):
'''
Return the value at the front of the queue, *without*
removing it from the queue.
Returns: Value at the front of the queue
'''
return self.__queue[0]
def is_empty(self):
'''
Returns True if the queue contains no elements,
and False otherwise.
'''
return len(self.__queue) == 0
def enqueue(self, value):
'''
Enqueue a value in the queue (i.e., add the value
at the back of the queue).
Returns: True if the value was enqueue; False if
it wasn't enqueued because the queue has reached
its maximum capacity.
'''
if self.max_capacity is not None and self.length == self.max_capacity:
return False
else:
self.__queue.append(value)
return True
def dequeue(self):
'''
Dequeue a value from the queue (i.e., remove the
element at the front of the queue, and remove it
from the queue)
Returns: The dequeued value
'''
return self.__queue.pop(0)
def __repr__(self):
'''
Returns a string representation of the queue.
'''
s = ""
for v in reversed(self.__queue):
s += " --> " + str(v)
s += " -->"
return s
``` |
{
"source": "joeylamcy/gchp",
"score": 2
} |
#### File: ESMPy/examples/exampletest.py
```python
try:
from unittest import SkipTest
except ImportError:
from nose import SkipTest
from ESMF.test.base import TestBase, attr
import ESMF.api.constants as constants
class TestExamples(TestBase):
def test_helloworld(self):
from . import hello_world
# # ESMF IO does not work in mpiuni mode
# def test_cubed_sphere_to_mesh_regrid(self):
# if constants._ESMF_COMM == constants._ESMF_COMM_MPIUNI:
# raise SkipTest('ESMF must be built with MPI for test')
# else:
# from . import cubed_sphere_to_mesh_regrid
# ESMF IO does not work in mpiuni mode
# only example, not in documentation
def test_field_read(self):
if constants._ESMF_COMM == constants._ESMF_COMM_MPIUNI:
raise SkipTest('ESMF must be built with MPI for test')
else:
from . import field_read
# only example, not in documentation
def test_grid_create_peridim_mask(self):
from . import grid_create_peridim_mask
def test_grid_locstream_regrid(self):
from . import grid_locstream_regrid
def test_locstream_grid_regrid(self):
from . import locstream_grid_regrid
def test_mesh_locstream_regrid(self):
from . import mesh_locstream_regrid
# ESMF IO does not work in mpiuni mode
def test_read_write_weight_file(self):
if constants._ESMF_COMM == constants._ESMF_COMM_MPIUNI:
raise SkipTest('ESMF must be built with MPI for test')
else:
from . import read_write_weight_file
# ESMF IO does not work in mpiuni mode
def test_regrid_from_file(self):
if constants._ESMF_COMM == constants._ESMF_COMM_MPIUNI:
raise SkipTest('ESMF must be built with MPI for test')
else:
from . import regrid_from_file
# only example, not in documentation
@attr('slow')
def test_tripole_regrid(self):
from . import tripole_regrid
# only example, not in documentation
def test_ugrid_latlon_regrid(self):
from . import ugrid_latlon_regrid
# only example, not in documentation
def test_ungridded_dimension_regrid(self):
from . import ungridded_dimension_regrid
```
#### File: ESMF/api/regrid.py
```python
from ESMF.api import constants
from ESMF.api.field import *
class Regrid(object):
"""
The :class:`~ESMF.api.regrid.Regrid` object represents a regridding operator between two :class:`Fields <ESMF.api.field.Field>`. The
creation of this object is analogous to ESMF_FieldRegridStore(), and
calling this object corresponds to ESMF_FieldRegrid().
ESMF_FieldRegridRelease() is called when the :class:`~ESMF.api.regrid.Regrid` object goes out of
scope (this only happens when the :class:`~ESMF.api.esmpymanager.Manager` goes out of scope, there is a
destroy() call for explicit deallocation of the :class:`~ESMF.api.regrid.Regrid`).
For more information about the ESMF Regridding functionality, please see
the `ESMF Regrid documentation
<http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_7_1_0r/ESMF_refdoc/node5.html#SECTION05012000000000000000>`_.
The following arguments are used to create a handle to a Regridding
operation between two :class:`Fields <ESMF.api.field.Field>`.
*REQUIRED:*
:param Field srcfield: source :class:`~ESMF.api.field.Field` associated with an underlying :class:`~ESMF.api.grid.Grid`,
:class:`~ESMF.api.mesh.Mesh` or :class:`~ESMF.api.locstream.LocStream`.
:param Field dstfield: destination :class:`~ESMF.api.field.Field` associated with an underlying
:class:`~ESMF.api.grid.Grid`, :class:`~ESMF.api.mesh.Mesh` or :class:`~ESMF.api.locstream.LocStream`. The data in this :class:`~ESMF.api.field.Field` may be overwritten
by this call.
*OPTIONAL:*
:param string filename: path to the output netCDF weight file
:param ndarray src_mask_values: a numpy array of values that should be
considered masked value on the source :class:`~ESMF.api.field.Field`.
:param ndarray dst_mask_values: a numpy array of values that should be
considered masked value on the destination :class:`~ESMF.api.field.Field`.
:param RegridMethod regrid_method: specifies which
:attr:`~ESMF.api.constants.RegridMethod` to use. If ``None``, defaults
to :attr:`~ESMF.api.constants.RegridMethod.BILINEAR`.
:param PoleMethod pole_method: specifies which type of artificial pole
to construct on the source :class:`~ESMF.api.grid.Grid` for regridding. If
``None``, defaults to: :attr:`~ESMF.api.constants.PoleMethod.NONE` for
regridmethod == :attr:`~ESMF.api.constants.RegridMethod.CONSERVE`, or
:attr:`~ESMF.api.constants.PoleMethod.ALLAVG` for
regridmethod != :attr:`~ESMF.api.constants.RegridMethod.CONSERVE`.
:param int regrid_pole_npoints: specifies how many points to average over
if polemethod == :attr:`~ESMF.api.constants.PoleMethod.ALLAVG`.
:param LineType line_type: select the path of the line that connects two
points on the surface of a sphere. This in turn controls the path along
which distances are calculated and the shape of the edges that make up a
cell. If ``None``, defaults to:
:attr:`~ESMF.api.constants.LineType.GREAT_CIRCLE` for
regridmethod == :attr:`~ESMF.api.constants.RegridMethod.CONSERVE`, or
:attr:`~ESMF.api.constants.LineType.CART` for
regridmethod != :attr:`~ESMF.api.constants.RegridMethod.CONSERVE`.
:param NormType norm_type: control which type of normalization to do when
generating conservative regridding weights. If ``None``, defaults to
:attr:`~ESMF.api.constants.NormType.DSTAREA`.
:param ExtrapMethod extrap_method: Specify which extrapolation method to use on
unmapped destination points after regridding.
:param int extrap_num_src_pnts: The number of source points to use for the
extrapolation methods that use more than one source point
(e.g. :attr:`~ESMF.api.constants.ExtrapMethod.NEAREST_IDAVG`). If not
specified, defaults to 8.
:param float extrap_dist_exponent: The exponent to raise the distance to when
calculating weights for the :attr:`~ESMF.api.constants.ExtrapMethod.NEAREST_IDAVG`
extrapolation method. A higher value reduces the influence of more distant
points. If not specified, defaults to ``2.0``.
:param UnmappedAction unmapped_action: specifies which action to take if a
destination point is found which does not map to any source point. If
``None``, defaults to :attr:`~ESMF.api.constants.UnmappedAction.ERROR`.
:param bool ignore_degenerate: Ignore degenerate cells when checking the
input :class:`Grids <ESMF.api.grid.Grid>` or :class:`Meshes <ESMF.api.mesh.Mesh>`
for errors. If this is set to True, then the regridding proceeds, but
degenerate cells will be skipped. If set to False, a degenerate cell produces
an error. This currently only applies to :attr:`~ESMF.api.constants.RegridMethod.CONSERVE`,
other regrid methods currently always skip degenerate cells. If ``None``, defaults
to ``False``.
:param ndarray src_frac_field: return a numpy array of values containing
weights corresponding to the amount of each :class:`~ESMF.api.field.Field`
value which contributes to the total mass of the :class:`~ESMF.api.field.Field`.
:param ndarray dst_frac_field: return a numpy array of values containing
weights corresponding to the amount of each :class:`~ESMF.api.field.Field`
value which contributes to the total mass of the :class:`~ESMF.api.field.Field`.
:param bool factors: If ``True``, return the factor and factor index list
when calling into ``ESMF``'s regrid store method. These lists are converted
to NumPy arrays and attached to the regrid object. The factor arrays
are retrievable via :meth:`~ESMF.api.regrid.get_factors` or :meth:`~ESMF.api.regrid.get_weights_dict`.
See the respective documentation on those methods for additional information.
For more information on how ``ESMF`` treats factor retrieval see the
documentation for `ESMF_FieldRegridStore <http://www.earthsystemmodeling.org/esmf_releases/last_built/ESMF_refdoc/node5.html#SECTION050366000000000000000>`_.
"""
@initialize
def __init__(self, srcfield=None, dstfield=None, filename=None, src_mask_values=None,
dst_mask_values=None, regrid_method=None, pole_method=None,
regrid_pole_npoints=None, line_type=None, norm_type=None, extrap_method=None,
extrap_num_src_pnts=None, extrap_dist_exponent=None, unmapped_action=None,
ignore_degenerate=None, create_rh=None, src_frac_field=None,
dst_frac_field=None, factors=False):
# Confirm the ESMF compiler will suport in-memory factor retrieval
if factors and not constants._ESMF_USE_INMEM_FACTORS:
raise RuntimeError("in-memory factors only supported with GNU (gfortran)")
# Routehandle storage
self._routehandle = 0
# Factor storage - only used when "factors=True"
self._factor_list = None
self._factor_index_list = None
self._num_factors = None
# We need to reference the pointers for deallocation
self._ptr_fl = None
self._ptr_fil = None
# Convert source and destination mask values to NumPy arrays if they
# are present.
if src_mask_values is not None:
src_mask_values = np.array(src_mask_values, dtype=np.int32)
if dst_mask_values is not None:
dst_mask_values = np.array(dst_mask_values, dtype=np.int32)
# Write weights to file if requested.
if filename is not None:
if constants._ESMF_COMM == constants._ESMF_COMM_MPIUNI:
msg = "Regrid(filename) requires PIO and does not work if ESMF has " \
"not been built with MPI support"
raise ImportError(msg)
self._routehandle = ESMP_FieldRegridStoreFile(
srcfield,
dstfield,
filename,
srcMaskValues=src_mask_values,
dstMaskValues=dst_mask_values,
regridmethod=regrid_method,
polemethod=pole_method,
regridPoleNPnts=regrid_pole_npoints,
lineType=line_type,
normType=norm_type,
unmappedaction=unmapped_action,
ignoreDegenerate=ignore_degenerate,
createRH=create_rh,
srcFracField=src_frac_field,
dstFracField=dst_frac_field
)
else:
# Initialize the factor array pointers if we are returning factors.
if factors:
fl = ct.POINTER(ct.c_double)()
fil = ct.POINTER(ct.c_int)()
num_factors = ct.c_int(0) # This is an int*
else:
fl = None
fil = None
num_factors = None
self._routehandle = ESMP_FieldRegridStore(
srcfield,
dstfield,
srcMaskValues=src_mask_values,
dstMaskValues=dst_mask_values,
regridmethod=regrid_method,
polemethod=pole_method,
regridPoleNPnts=regrid_pole_npoints,
lineType=line_type,
normType=norm_type,
extrapMethod=extrap_method,
extrapNumSrcPnts=extrap_num_src_pnts,
extrapDistExponent=extrap_dist_exponent,
unmappedaction=unmapped_action,
ignoreDegenerate=ignore_degenerate,
factorList=fl,
factorIndexList=fil,
numFactors=num_factors,
srcFracField=src_frac_field,
dstFracField=dst_frac_field
)
# If we are returning factors, store them and cast/convert from
# ctypes
if factors:
self._handle_factors_(fil, fl, num_factors)
self._srcfield = srcfield
self._dstfield = dstfield
self._src_mask_values = src_mask_values
self._dst_mask_values = dst_mask_values
self._regrid_method = regrid_method
self._pole_method = pole_method
self._regrid_pole_npoints = regrid_pole_npoints
self._norm_type = norm_type
self._extrap_method = extrap_method
self._extrap_num_src_pnts = extrap_num_src_pnts
self._extrap_dist_exponent = extrap_dist_exponent
self._unmapped_action = unmapped_action
self._ignore_degenerate = ignore_degenerate
self._src_frac_field = src_frac_field
self._dst_frac_field = dst_frac_field
# for arbitrary metadata
self._meta = {}
# regist with atexit
import atexit; atexit.register(self.__del__)
self._finalized = False
def __call__(self, srcfield, dstfield, zero_region=None):
"""
Call a regridding operation from srcfield to dstfield.
*REQUIRED:*
:param Field srcfield: the :class:`~ESMF.api.field.Field` of source data to regrid.
:param Field dstfield: the :class:`~ESMF.api.field.Field` to hold the regridded data.
*OPTIONAL:*
:param Region zero_region: specify which region of the field indices
will be zeroed out before adding the values resulting from the
interpolation. If ``None``, defaults to
:attr:`~ESMF.api.constants.Region.TOTAL`.
:return: dstfield
"""
# call into the ctypes layer
ESMP_FieldRegrid(srcfield, dstfield,
self._routehandle, zeroregion=zero_region)
return dstfield
def __del__(self):
self.destroy()
def __repr__(self):
string = ("Regrid:\n"
" routehandle = %r\n"
" src_mask_values = %r\n"
" dst_mask_values = %r\n"
" regrid_method = %r\n"
" unmapped_action = %r\n"
" src_frac_field = %r\n"
" dst_frac_field = %r\n"
" srcfield = %r\n"
" dstfield = %r\n"
%
(self._routehandle,
self.src_mask_values,
self.dst_mask_values,
self.regrid_method,
self.unmapped_action,
self.src_frac_field,
self.dst_frac_field,
self.srcfield,
self.dstfield))
return string
@property
def dstfield(self):
return self._dstfield
@property
def dst_frac_field(self):
return self._dst_frac_field
@property
def dst_mask_values(self):
return self._dst_mask_values
@property
def extrap_method(self):
return self._extrap_method
@property
def extrap_num_src_pnts(self):
return self._extrap_num_src_pnts
@property
def extrap_dist_exponent(self):
return self._extrap_dist_exponent
@property
def finalized(self):
"""
:rtype: bool
:return: Indicate if the underlying ESMF memory for this object has
been deallocated.
"""
return self._finalized
@property
def ignore_degenerate(self):
return self._ignore_degenerate
@property
def meta(self):
return self._meta
@property
def norm_type(self):
return self._norm_type
@property
def pole_method(self):
return self._pole_method
@property
def regrid_method(self):
return self._regrid_method
@property
def regrid_pole_npoints(self):
return self._regrid_pole_npoints
@property
def routehandle(self):
return self._routehandle
@property
def srcfield(self):
return self._srcfield
@property
def src_frac_field(self):
return self._src_frac_field
@property
def src_mask_values(self):
return self._src_mask_values
@property
def struct(self):
"""
:rtype: pointer
:return: A pointer to the underlying ESMF allocation for this
:class:`~ESMF.api.regrid.Regrid`.
"""
return self.struct
@property
def unmapped_action(self):
return self._unmapped_action
def copy(self):
"""
Copy a :class:`~ESMF.api.regrid.Regrid` in an ESMF-safe manner.
:return: A :class:`~ESMF.api.regrid.Regrid` shallow copy.
"""
# shallow copy
ret = copy(self)
# don't call ESMF destructor twice on the same shallow Python object
ret._finalized = True
return ret
def destroy(self):
"""
Release the memory associated with a :class:`~ESMF.api.regrid.Regrid`.
"""
# This detects if the object has made it through initialization
# before the destroy method has been called
if hasattr(self, '_finalized'):
if not self._finalized:
ESMP_FieldRegridRelease(self.routehandle)
# Also destroy factor allocations in Fortran
if self._ptr_fl is not None:
numfac = ct.c_int(self._num_factors)
self._factor_list = None
self._factor_index_list = None
self._num_factors = None
ESMP_FieldRegridReleaseFactors(self._ptr_fl,
self._ptr_fil,
numfac)
self._ptr_fl = None
self._ptr_fil = None
self._finalized = True
def get_factors(self, deep_copy=False):
"""
Return factor and factor index arrays. These arrays will only be
available if the ``Regrid`` object was initialized with ``factors=True``.
See the `ESMF documentation <http://www.earthsystemmodeling.org/esmf_releases/last_built/ESMF_refdoc/node5.html#SECTION050366000000000000000>`_
for additional information on these arrays (see below for indexing in
Python though).
>>> factors, factors_index = get_factors(...)
The first tuple element ``factors`` will have shape ``(m,)`` where
``m`` is the number of factors or weights. It will be ``dtype(float64)``.
The second tupe element ``factors_index`` will have shape ``(m, 2)``
where ``m`` is the number of factors or weights. The source/col indices
are selected by ``factors_index[:, 0]``. The destination/row indices
are selected by ``factors_index[:, 1]``. It will be ``dtype(int32)``.
.. note:: If ``deep_copy=True``, array memory is C contiguous according
to NumPy array flags (``<array>.flags``).
.. warning:: Remember to call :meth:`~ESMF.api.regrid.destroy` to deallocate
memory associated with a regrid operation. This will be called by
the Python garbage collector. However, if numerous regridding operations
are called in a tight loop, a memory leak will occur without a call
to ``destroy``.
:param bool deep_copy: If ``True``, make deep copies of the returned
arrays. If ``False`` (the default), the returned arrays will reference
the underlying ``ESMF`` memory.
:return: tuple of NumPy array objects
"""
factor_list = self._factor_list
factor_index_list = self._factor_index_list
if deep_copy:
factor_list = factor_list.copy()
factor_index_list = factor_index_list.copy()
return factor_list, factor_index_list
def get_weights_dict(self, deep_copy=False):
"""
Return a dictionary mapping that is more user-friendly for weight/factor
retrieval. Please read the documentation for :meth:`~ESMF.api.regrid.get_factors`
before using this function.
=========== =======================
Key Value
=========== =======================
``weights`` Weight value array
``row_dst`` Destination/row indices
``col_src`` Source/col indices
=========== =======================
.. note:: If ``deep_copy=True``, array memory is C contiguous according
to NumPy array flags (``<array>.flags``).
:param bool deep_copy: If ``True``, make deep copies of the returned
arrays. If ``False`` (the default), the returned arrays will reference
the underlying ``ESMF`` memory.
:return: dict
"""
fl, fil = self.get_factors()
col = fil[:, 0].flatten() # Source indices
row = fil[:, 1].flatten() # Destination indices
if deep_copy:
row = row.copy()
col = col.copy()
fl = fl.copy()
ret = {'row_dst': row, 'col_src': col, 'weights': fl}
return ret
def _handle_factors_(self, fil, fl, num_factors):
"""Handle factor array creation and referencing."""
self._num_factors = num_factors.value # Hold integer factor count
# Only create arrays if we have any factors. There are no factors when
# grids don't overlap and we are ignoring unmapped.
if self._num_factors > 0:
# Pointer to factor list memory. We need to hold on to this for
# deallocation.
self._ptr_fl = fl
# Cast the pointer to the appropriate size.
cptr_fl = ct.cast(fl, ct.POINTER(ct.c_double * self._num_factors))
self._factor_list = np.frombuffer(cptr_fl.contents,
count=self._num_factors,
dtype=np.float64)
# The factor index list is (m, 2) hence the multiplication
# of the factor count by 2.
self._ptr_fil = fil # Hold onto the pointer for deallocation
cptr_fil = ct.cast(fil,
ct.POINTER(ct.c_int * self._num_factors * 2))
self._factor_index_list = np.frombuffer(cptr_fil.contents,
count=self._num_factors * 2,
dtype=np.int32)
self._factor_index_list = self._factor_index_list.reshape(
self._num_factors, 2)
else:
self._factor_list = np.zeros((0,), dtype=np.float64)
self._factor_index_list = np.zeros((0, 2), dtype=np.int32)
class RegridFromFile(object):
"""
The :class:`~ESMF.api.regrid.RegridFromFile` object represents a regridding
operator between two :class:`Fields <ESMF.api.field.Field>` that is read
from a file. The creation of this object is analogous to= ESMF_FieldSMMStore(),
and calling this object corresponds to ESMF_FieldRegrid(). ESMF_FieldRegridRelease()
is called when the :class:`~ESMF.api.regrid.RegridFromFile` object goes
out of scope (this only happens when the :class:`~ESMF.api.esmpymanager.Manager`
goes out of scope, there is a destroy() call for explicit deallocation of
the :class:`~ESMF.api.regrid.RegridFromFile`).
For more information about the ESMF Regridding functionality, please see
the `ESMF Regrid documentation
<http://www.earthsystemmodeling.org/esmf_releases/public/ESMF_7_1_0r/ESMF_refdoc/node5.html#SECTION05012000000000000000>`_.
The following arguments are used to create a handle to a regridding
operation between two :class:`Fields <ESMF.api.field.Field>`.
*REQUIRED:*
:param Field srcfield: source :class:`~ESMF.api.field.Field` associated
with an underlying :class:`~ESMF.api.grid.Grid`, :class:`~ESMF.api.mesh.Mesh`
or :class:`~ESMF.api.locstream.LocStream`.
:param Field dstfield: destination :class:`~ESMF.api.field.Field` associated
with an underlying :class:`~ESMF.api.grid.Grid`, :class:`~ESMF.api.mesh.Mesh`
or :class:`~ESMF.api.locstream.LocStream`. The data in this :class:`~ESMF.api.field.Field`
may be overwritten by this call.
:param string filename: the name of the file from which to retrieve the
weights.
"""
@initialize
def __init__(self, srcfield, dstfield, filename):
self._routehandle = ESMP_FieldSMMStore(srcfield, dstfield, filename)
# Holds arbitrary metadata if needed by the client.
self._meta = {}
# Register with "atexit" to attempt and ensure __del__ is called by
# the Python garbage collector.
import atexit; atexit.register(self.__del__)
self._finalized = False
def __call__(self, srcfield, dstfield, zero_region=None):
"""
Call a regridding operation from srcfield to dstfield.
*REQUIRED:*
:param Field srcfield: the :class:`~ESMF.api.field.Field` of source data to regrid.
:param Field dstfield: the :class:`~ESMF.api.field.Field` to hold the regridded data.
*OPTIONAL:*
:param Region zero_region: specify which region of the field indices
will be zeroed out before adding the values resulting from the
interpolation. If ``None``, defaults to
:attr:`~ESMF.api.constants.Region.TOTAL`.
:return: dstfield
"""
# call into the ctypes layer
ESMP_FieldRegrid(srcfield, dstfield,
self._routehandle, zeroregion=zero_region)
return dstfield
def __del__(self):
self.destroy()
def __repr__(self):
string = "RegridFromFile:\n routehandle = {}\n".format(self._routehandle)
return string
@property
def dstfield(self):
return self._dstfield
@property
def finalized(self):
"""
:rtype: bool
:return: Indicate if the underlying ESMF memory for this object has
been deallocated.
"""
return self._finalized
@property
def meta(self):
"""
:rtype: tdk
:return: tdk
"""
return self._meta
@property
def routehandle(self):
return self._routehandle
@property
def struct(self):
"""
:rtype: pointer
:return: A pointer to the underlying ESMF allocation for this
:class:`~ESMF.api.regrid.Regrid`.
"""
return self.struct
def copy(self):
"""
Copy a :class:`~ESMF.api.regrid.Regrid` in an ESMF-safe manner.
:return: A :class:`~ESMF.api.regrid.Regrid` shallow copy.
"""
# shallow copy
ret = copy(self)
# don't call ESMF destructor twice on the same shallow Python object
ret._finalized = True
return ret
def destroy(self):
"""
Release the memory associated with the :class:`~ESMF.api.regrid.RegridFromFile`
object.
"""
if hasattr(self, '_finalized'):
if not self._finalized:
ESMP_FieldRegridRelease(self.routehandle)
self._finalized = True
```
#### File: ESMF/test/base_test.py
```python
import unittest
from ESMF.test.base import TestBase
import numpy as np
from ESMF import Manager
class Test(TestBase):
def setup(self):
mg = Manager()
# mg.barrier()
def test_assertNumpyAll_bad_mask(self):
arr = np.ma.array([1,2,3],mask=[True,False,True])
arr2 = np.ma.array([1,2,3],mask=[False,True,False])
self.assertRaises(AssertionError, lambda: self.assertNumpyAll(arr,arr2))
def test_assertNumpyAll_type_differs(self):
arr = np.ma.array([1,2,3],mask=[True,False,True])
arr2 = np.array([1,2,3])
self.assertRaises(AssertionError, lambda: self.assertNumpyAll(arr,arr2))
def tearDown(self):
mg = Manager()
# mg.barrier()
if __name__ == "__main__":
unittest.main()
```
#### File: test/regrid_from_file/run_regrid_from_file_dryrun.py
```python
import sys
import os
from ESMF.test.regrid_from_file.regrid_from_file_consts import DATA_SUBDIR, DATA_URL_ROOT
from ESMF.util.cache_data import cache_data_file
from ESMF.test.regrid_from_file.read_test_cases_from_control_file import read_control_file
def cache_data_files_for_test_cases(test_cases):
# Create data subdirectory if it doesn't exist.
if not os.path.exists(DATA_SUBDIR):
os.mkdir(DATA_SUBDIR)
# For each test case line from the control file parse the line and call
# the test subroutine.
status_ok = True
for test_case in test_cases:
(src_fname, dst_fname, regrid_method, options, mean_err, max_err,
max_area_err) = test_case
src_fname_full = os.path.join(DATA_SUBDIR, src_fname)
dst_fname_full = os.path.join(DATA_SUBDIR, dst_fname)
# run the data file retrieval and regridding through try/except
correct = False
status_ok = cache_data_file(src_fname_full) and cache_data_file(dst_fname_full, DATA_URL_ROOT)
if not status_ok:
break
return status_ok
# Main program: Retrieve data files from a remote server if they do not exist
# locally for each test read from a control file.
def main():
# Read the test case parameters from the control file.
test_cases = read_control_file()
# Retrieve the data files needed for the test cases from the remote server.
status_ok = cache_data_files_for_test_cases(test_cases)
if status_ok:
print ('RESULT: PASS - regrid_from_file_dryrun ok\n\n')
else:
print ('RESULT: FAIL - regrid_from_file_dryrun error\n\n')
if __name__ == '__main__':
sys.exit(main())
```
#### File: test/test_api/test_array.py
```python
from ESMF import *
from ESMF.interface.cbindings import *
from ESMF.test.base import TestBase
import numpy as np
# TODO: test view casting
# TODO: demonstrate Fortran reordering in reshape call
class TestMaskedArray(TestBase):
class ctypesgrid(object):
def __init__(self, maxindex):
'''
:param self: testgrid object
:param maxindex: maxindex of the grid
:type maxindex: np array with dtype = int32
:return:
'''
self.struct = ESMP_GridStruct()
self.maxindex = maxindex
self.rank = len(maxindex)
def get_maskedarray_info(self, dim=0):
typekind = TypeKind.R8
grid = Grid(np.array([100, 100]), coord_sys=CoordSys.CART,
coord_typekind=typekind, staggerloc=[StaggerLoc.CENTER])
grid_row = grid.get_coords(0, staggerloc=StaggerLoc.CENTER)
grid_col = grid.get_coords(1, staggerloc=StaggerLoc.CENTER)
local_size = np.array(grid.upper_bounds[0]) - np.array(grid.lower_bounds[0])
row = np.random.rand(local_size[0], local_size[1])
col = np.random.rand(local_size[0], local_size[1])
grid_row[:] = row
grid_col[:] = col
data = ESMP_GridGetCoordPtr(grid, dim)
lbounds, ubounds = ESMP_GridGetCoordBounds(grid)
mask = [False]*np.prod(ubounds[:]-lbounds[:])
return data, mask, typekind, lbounds, ubounds, grid
def get_array(self):
data, mask, tk, lb, ub, grid = self.get_maskedarray_info()
esmpyarray = MaskedArray(data, mask, tk, ub-lb)
return esmpyarray
def make_maskedarray(self, array, type=TypeKind.R8):
'''
:param self: TestMaskedArray class type
:param array: maxindices of a 2- or 3d array
:type array: np.array of dtype=np.int32
:param type: the type of the esmf buffer
:type type: ESMF.TypeKind
'''
# create manager because we are doing some lower level stuff here without automatic initialization
Manager()
# create a ctypes grid object to hold pointer and other info for ctypes layer
if array.dtype is not np.int32:
array = np.array(array, dtype=np.int32)
esmfalloc = self.ctypesgrid(array)
# create an esmf data allocation to test numpy array with
esmfalloc.struct = ESMP_GridCreateNoPeriDim(esmfalloc.maxindex,
coordSys=CoordSys.CART,
coordTypeKind=type)
ESMP_GridAddCoord(esmfalloc)
dataptr = ESMP_GridGetCoordPtr(esmfalloc, 0)
lb, ub = ESMP_GridGetCoordBounds(esmfalloc)
return dataptr, lb, ub
def test_del(self):
self.esmpyarray = self.get_array()
del(self.esmpyarray)
assert(not hasattr(self, 'esmpyarray'))
def test_copy(self):
esmpyarray = self.get_array()
esmpyarray2 = esmpyarray
self.assertNumpyAll(esmpyarray, esmpyarray2)
assert(np.may_share_memory(esmpyarray, esmpyarray2))
def test_reshape(self):
data, mask, tk, lb, ub, grid = self.get_maskedarray_info()
esmpyarray = MaskedArray(data, mask, tk, ub-lb)
# test reshape
self.assertNumpyAll(np.array(esmpyarray.shape, dtype=np.int32),
np.array(ub - lb, dtype=np.int32))
def test_slice(self):
data, mask, tk, lb, ub, grid = self.get_maskedarray_info()
esmpyarray = MaskedArray(data, mask, tk, ub-lb)
# slice
esmpyarrayslice = esmpyarray[:, 0]
# test slice
assert (esmpyarrayslice.shape == (ub[0] - lb[0]))
def test_slice2(self):
dataptr, lb, ub = self.make_maskedarray(np.array([10, 10], dtype=np.int32))
array0 = MaskedArray(dataptr, None, TypeKind.R8, ub-lb)
local_size = np.array(ub) - np.array(lb)
vals = np.random.rand(local_size[0], local_size[1])
array0.data[:] = vals
self.assertNumpyAll(np.array(array0.shape, dtype=np.int32), local_size)
self.assertNumpyAll(array0[1,1], vals[1,1])
res = array0 * 2
self.assertTrue(np.all(vals * 2 == res))
def test_mul(self):
esmpyarray1 = self.get_array()
esmpyarray2 = self.get_array()
esmpyarray3 = self.get_array()
self.assertNumpyAll(esmpyarray1*5, 5*esmpyarray1)
self.assertNumpyAll(esmpyarray1*esmpyarray2, esmpyarray2*esmpyarray1)
# todo: associate and distributive properties have floating point errors?
self.assertNumpyAllClose((esmpyarray1*(esmpyarray2*esmpyarray3)),
((esmpyarray1*esmpyarray2) * esmpyarray3))
self.assertNumpyAllClose((esmpyarray1*(esmpyarray2+esmpyarray3)),
((esmpyarray1*esmpyarray2)+
(esmpyarray1*esmpyarray3)))
def test_stress(self):
for _ in range(100):
grid = Grid(np.array([100, 100]), coord_sys=CoordSys.CART,
coord_typekind=TypeKind.R8, staggerloc=[StaggerLoc.CENTER])
# get the coordinate pointers and set the coordinates
grid_row = grid.get_coords(0, staggerloc=StaggerLoc.CENTER)
grid_col = grid.get_coords(1, staggerloc=StaggerLoc.CENTER)
local_size = np.array(grid.upper_bounds[0]) - np.array(grid.lower_bounds[0])
row = np.random.rand(local_size[0], local_size[1])
col = np.random.rand(local_size[0], local_size[1])
grid_row[:] = row
grid_col[:] = col
data0 = ESMP_GridGetCoordPtr(grid, 0)
data1 = ESMP_GridGetCoordPtr(grid, 1)
lb, ub = ESMP_GridGetCoordBounds(grid)
mask = [False] * np.prod(ub[:]-lb[:])
esmpy_row = MaskedArray(data0, mask, TypeKind.R8, ub - lb)
esmpy_col = MaskedArray(data1, mask, TypeKind.R8, ub - lb)
self.assertNumpyAll(row, np.array(esmpy_row))
self.assertNumpyAll(col, np.array(esmpy_col))
def test_doublebuffer(self):
typekind = TypeKind.R8
dataptr, lb, ub = self.make_maskedarray(np.array([100,100], dtype=np.int32))
# create ESMPy Arrays using ESMF data allocations
array0 = MaskedArray(dataptr, None, typekind, ub-lb)
local_size = np.array(ub)-np.array(lb)
vals = np.random.rand(local_size[0], local_size[1])
array0.data[:] = vals
array1 = MaskedArray(dataptr, None, typekind, ub-lb)
array2 = MaskedArray(dataptr, None, typekind, ub-lb)
# assert that these numpy mangled esmf allocations are transposed
self.assertNumpyAll(array0, array1)
self.assertNumpyAll(array0, array2)
self.assertNumpyAll(array1, array2)
def test_new(self):
data0, mask, tk, lb1, ub1, grid = self.get_maskedarray_info()
esmpyarray0 = MaskedArray(data0, mask, tk, ub1 - lb1)
data1 = ESMP_GridGetCoordPtr(grid, 1)
lb1, ub1 = ESMP_GridGetCoordBounds(grid)
esmpyarray1 = MaskedArray(data1, mask, tk, ub1 - lb1)
self.assertNumpyAllClose(np.array(esmpyarray0),
np.array(grid.coords[StaggerLoc.CENTER][0]))
self.assertNumpyAllClose(np.array(esmpyarray1),
np.array(grid.coords[StaggerLoc.CENTER][1]))
def test_ownership(self):
data0, mask, tk, lb0, ub0, grid = self.get_maskedarray_info(dim=0)
esmpyarray0 = MaskedArray(data0, mask, tk, ub0 - lb0)
# don't call get_array_info again or it will reset the grid!!
data1 = ESMP_GridGetCoordPtr(grid, 1)
lb1, ub1 = ESMP_GridGetCoordBounds(grid)
esmpyarray1 = MaskedArray(data1, mask, tk, ub1 - lb1)
coords0 = grid.get_coords(0, staggerloc=StaggerLoc.CENTER)
coords1 = grid.get_coords(1, staggerloc=StaggerLoc.CENTER)
# test that the data has the same values and sizes for each grid dimension
self.assertNumpyAll(np.array(esmpyarray0), np.array(coords0))
self.assertNumpyAll(np.array(esmpyarray1), np.array(coords1))
# test ownership
assert (np.may_share_memory(esmpyarray0, coords0))
assert (np.may_share_memory(esmpyarray1, coords1))
```
#### File: ESMF/test/test_cbindings.py
```python
from ESMF import *
from ESMF.interface.cbindings import *
from ESMF.test.base import TestBase, attr
import numpy as np
class TestCbindings(TestBase):
def test_log(self):
Manager()
flush = True
ESMP_LogSet(flush)
def test_vm(self):
# inquire for rank and proc from ESMF Virtual Machine
localpet = local_pet()
petcount = pet_count()
print ('\nlocal_pet = {0}\n'.format(localpet))
print ('\npet_count = {0}\n'.format(petcount))
def test_interfaceint(self):
Narray = np.array([4,5,6], dtype=np.int32)
interfaceint = ESMP_InterfaceInt(Narray)
@expected_failure
def test_interfaceint2(self):
# This test should fail
try:
a = (ct.c_int*3)()
a = [1,2,3]
interfaceint2 = ESMP_InterfaceInt(a)
except:
raise TypeError('FAIL: tuples cannot be used in place of numpy.array')
@expected_failure
def test_interfaceint3(self):
# This test should fail
try:
interfaceint2 = ESMP_InterfaceInt(np.array([1,2,3]))
except:
raise TypeError('FAIL: tuples cannot be used in place of numpy.array')
def test_version_compare(self):
assert(version_compare("ESMF_5_3_0_ESMP_02","ESMF_5_3_0_ESMP_01") == 1)
assert (version_compare("ESMF_5_3_0_ESMP_01",
"ESMF_5_3_1_beta_snapshot_02_ESMP_01") == -1)
assert (version_compare("ESMF_5_3_0_ESMP_01",
"ESMF_5_3_0_beta_snapshot_42_ESMP_01") == 1)
assert (version_compare("ESMF_5_3_0_ESMP_01",
"ESMF_5_3_0_beta_snapshot_37_ESMP_02") == 1)
assert (version_compare("ESMF_5_3_0_ESMP_01",
"ESMF_5_3_1_beta_snapshot_02_ESMP_01") == -1)
assert (version_compare("ESMF_5_3_0_ESMP_01",
"ESMF_6_1_0_beta_snapshot_00_ESMP_01") == -1)
assert (version_compare("ESMF_6_1_0_beta_snapshot_00_ESMP_01",
"ESMF_5_3_1_beta_snapshot_02_ESMP_01") == 1)
assert (version_compare("ESMF_6_1_0_beta_snapshot_00_ESMP_01",
"ESMF_6_1_0_beta_snapshot_00_ESMP_01") == 0)
assert (version_compare("ESMPy_620b10_04",
"ESMF_6_1_0_beta_snapshot_00_ESMP_01") == 1)
```
#### File: ESMF/util/cache_data.py
```python
import os
DATA_URL_ROOT = 'http://www.earthsystemmodeling.org/download/data'
# If fname doesn't exist, retrieve it from the remote server via http.
def cache_data_file(fname, DATA_URL_ROOT=DATA_URL_ROOT):
import sys
if sys.version_info[0] >= 3:
from urllib.request import urlopen, URLError
else:
from urllib2 import urlopen, URLError
from shutil import copyfileobj
status_ok = True
if not os.path.exists(fname):
url = os.path.join(DATA_URL_ROOT, os.path.basename(fname))
print('Retrieving ' + url + '...\n')
try:
req = urlopen(url)
except URLError:
print('Error opening %s' % url)
status_ok = False
else:
try:
with open(fname, 'wb') as fp:
copyfileobj(req, fp)
except:
status_ok = False
return status_ok
def cache_data_files():
# Filenames to download.
datafilelist = ["aggregAtlanticESTOFS.nc",
"GRIDSPEC_ACCESS1.nc",
"ll1deg_grid.nc",
"ll2.5deg_grid.nc",
"mpas_uniform_10242_dual_counterclockwise.nc",
"so_Omon_GISS-E2.nc",
"T42_grid.nc",
]
# Create data subdirectory if it doesn't exist.
datadir = os.path.join('examples', 'data')
if not os.path.exists(datadir):
os.mkdir(datadir)
# Download each test file.
for fname in datafilelist:
# Retrieve the data files needed for the test cases from the remote server.
status_ok = cache_data_file(os.path.join(datadir, fname))
if not status_ok:
raise IOError("Error downloading '{}'".format(fname))
```
#### File: ESMF/util/field_utilities.py
```python
import numpy as np
import ESMF
import ESMF.util.helpers as helpers
import ESMF.api.constants as constants
def compare_fields(field1, field2, itrp_mean_tol, itrp_max_tol, csrv_tol,
dstfracfield=None, mass1=None, mass2=None,
regrid_method=ESMF.RegridMethod.CONSERVE,
uninitval=422397696., mask_values=[0]):
"""
Compare the values of two fields to verify the accuracy of a Regrid. The
Fields should be the same size and have rank = 2 or 3.
:param field1: The Field that received the interpolation values.
:param field2: The Field holding the values of the exact solution.
:param itrp_mean_tol: The mean relative error tolerance.
:param itrp_max_tol: The maximum relative error tolerance.
:param csrv_tol: The conservation relative error tolerance.
:param parallel: True or False value to tell whether this is a parallel run
:param dstfracfield:
:param mass1: The mass of Field 1.
:param mass2: The mass of Field 2.
:param regrid_method: The regrid method that was used.
:param uninitval: The uninitialized value for Field1.
:param mask_values: Any masked values to skip when comparing the Fields.
:return:
"""
import numpy.ma as ma
parallel = False
if ESMF.pet_count() > 1:
parallel = True
correct = False
# verify that the fields are the same size
assert field1.data.shape == field2.data.shape, 'compare_fields: Fields must be the same size!'
# deal with default values for fracfield
if dstfracfield is None:
dstfracfield = ma.ones(field1.data.shape)
# compute pointwise error measures
totalErr = 0.0
max_error = 0.0
min_error = 1000000.0
num_nodes = 0
# allow fields of all dimensions
field1_flat = np.ravel(field1.data)
field2_flat = np.ravel(field2.data)
dstfracfield_flat = np.ravel(dstfracfield.data)
# setup mask, no Mask on a Mesh (yet) so need to look at the type first
if ((type(field2.grid) is ESMF.Grid) and
(field2.grid.mask[field2.staggerloc] is not None)):
if (field2.grid.mask[field2.staggerloc] is not None):
field2mask_flat = [True if x in mask_values else False for x in field2.grid.mask[field2.staggerloc].flatten().tolist()]
else:
field2mask_flat = np.ravel(np.zeros_like(field2.data))
for i in range(field2_flat.size):
if ((not field2mask_flat[i]) and
(field1_flat[i] != uninitval) and
(dstfracfield_flat[i] >= 0.999)):
if (field2_flat.data[i] != 0.0):
err = abs(field1_flat[i]/dstfracfield_flat[i] - \
field2_flat[i])/abs(field2_flat[i])
else:
err = abs(field1_flat[i]/dstfracfield_flat[i] - \
field2_flat[i])
num_nodes += 1
totalErr += err
if (err > max_error):
max_error = err
if (err < min_error):
min_error = err
# gather error on processor 0 or set global variables in serial case
mass1_global = 0.
mass2_global = 0.
csrv_error_global = 0
if parallel:
total_error_global = helpers.reduce_val(totalErr)
num_nodes_global = helpers.reduce_val(num_nodes)
max_error_global = helpers.reduce_val(max_error, op=constants.Reduce.MAX)
min_error_global = helpers.reduce_val(min_error, op=constants.Reduce.MIN)
if (mass1 is not None) and (mass2 is not None):
mass1_global = helpers.reduce_val(mass1)
mass2_global = helpers.reduce_val(mass2)
else:
total_error_global = totalErr
num_nodes_global = num_nodes
max_error_global = max_error
min_error_global = min_error
if (mass1 is not None) and (mass2 is not None):
mass1_global = mass1
mass2_global = mass2
# compute relative error measures and compare against tolerance values
itrp_mean = False
itrp_max = False
csrv = False
if ESMF.local_pet() == 0:
if mass1_global == 0.:
csrv_error_global = abs(mass2_global - mass1_global)
else:
csrv_error_global = abs(mass2_global - mass1_global)/abs(mass1_global)
# compute mean relative error
if num_nodes_global != 0:
total_error_global = total_error_global/num_nodes_global
# determine if interpolation and conservation are up to spec
if (total_error_global < itrp_mean_tol):
itrp_mean = True
if (max_error_global < itrp_max_tol):
itrp_max = True
if (csrv_error_global < csrv_tol):
csrv = True
# print out diagnostic information
print ("\n Mean relative error = "+str(total_error_global))
print (" Max relative error = "+str(max_error_global))
print (" Conservation error = "+str(csrv_error_global))
#print (" Min error = "+str(min_error_global))
#print (" srcmass = "+str(mass1_global))
#print (" dstmass = "+str(mass2_global))
# broadcast in parallel case
if parallel:
itrp_mean = helpers.broadcast_val(itrp_mean)
itrp_max = helpers.broadcast_val(itrp_max)
csrv = helpers.broadcast_val(csrv)
total_error_global = helpers.broadcast_val(total_error_global)
csrv_error_global = helpers.broadcast_val(csrv_error_global)
# print pass or fail
if (itrp_mean and itrp_max and csrv):
print ("PET{0} - PASS".format(ESMF.local_pet()))
correct = True
else:
print ("PET{0} - FAIL".format(ESMF.local_pet()))
return total_error_global, csrv_error_global, correct
```
#### File: ESMF/util/itester.py
```python
from collections import namedtuple
import itertools
def itr_row(key, sequence):
for element in sequence:
yield ({key: element})
def iter_product_keywords(keywords, as_namedtuple=True):
if as_namedtuple:
yld_tuple = namedtuple('ITesterKeywords', keywords.keys())
iterators = [itr_row(ki, vi) for ki, vi in keywords.items()]
for dictionaries in itertools.product(*iterators):
yld = {}
for dictionary in dictionaries:
yld.update(dictionary)
if as_namedtuple:
yld = yld_tuple(**yld)
yield yld
```
#### File: ESMF/util/locstream_utilities.py
```python
import sys
try:
import numpy as np
except:
raise ImportError('The Numpy library cannot be found!')
try:
import ESMF
except:
raise ImportError('The ESMF library cannot be found!')
pi = 3.14159
deg_rad = pi
def create_locstream_16(domask=False):
"""
:param domask: a boolean to tell whether or not to add a mask
:return: LocStream
"""
if ESMF.pet_count() is not 1:
raise ValueError("processor count must be 1 to use this function")
locstream = ESMF.LocStream(16)
locstream["ESMF:X"] = [0.0, 1.5, 2.5, 4.0, 0.0, 1.5, 2.5, 4.0, 0.0, 1.5, 2.5, 4.0, 0.0, 1.5, 2.5, 4.0]
locstream["ESMF:Y"] = [0.0, 0.0, 0.0, 0.0, 1.5, 1.5, 1.5, 1.5, 2.5, 2.5, 2.5, 2.5, 4.0, 4.0, 4.0, 4.0]
if domask:
locstream["ESMF:Mask"] = [1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
return locstream
def create_locstream_16_parallel(domask=False):
"""
:param domask: a boolean to tell whether or not to add a mask
:return: LocStream
"""
if ESMF.pet_count() is not 4:
raise ValueError("processor count must be 4 to use this function")
locstream = None
if ESMF.local_pet() is 0:
locstream = ESMF.LocStream(4)
locstream["ESMF:X"] = [0.0, 1.5, 0.0, 1.5]
locstream["ESMF:Y"] = [0.0, 0.0, 1.5, 1.5]
if domask:
locstream["ESMF:Mask"] = [1, 0, 0, 1]
elif ESMF.local_pet() is 1:
locstream = ESMF.LocStream(4)
locstream["ESMF:X"] = [2.5, 4.0, 2.5, 4.0]
locstream["ESMF:Y"] = [0.0, 0.0, 1.5, 1.5]
if domask:
locstream["ESMF:Mask"] = [1, 1, 1, 1]
elif ESMF.local_pet() is 2:
locstream = ESMF.LocStream(4)
locstream["ESMF:X"] = [0.0, 1.5, 0.0, 1.5]
locstream["ESMF:Y"] = [2.5, 2.5, 4.0, 4.0]
if domask:
locstream["ESMF:Mask"] = [1, 1, 1, 1]
elif ESMF.local_pet() is 3:
locstream = ESMF.LocStream(4)
locstream["ESMF:X"] = [2.5, 4.0, 2.5, 4.0]
locstream["ESMF:Y"] = [2.5, 2.5, 4.0, 4.0]
if domask:
locstream["ESMF:Mask"] = [1, 1, 1, 1]
return locstream
def create_locstream_spherical_16(coord_sys=ESMF.CoordSys.SPH_DEG, domask=False):
"""
:param coord_sys: the coordinate system of the LocStream
:param domask: a boolean to tell whether or not to add a mask
:return: LocStream
"""
if ESMF.pet_count() is not 1:
raise ValueError("processor count must be 1 to use this function")
locstream = ESMF.LocStream(16, coord_sys=coord_sys)
deg_rad = pi
if coord_sys == ESMF.CoordSys.SPH_DEG:
deg_rad = 180
locstream["ESMF:Lon"] = [0.0, 0.5*deg_rad, 1.5*deg_rad, 2*deg_rad, 0.0, 0.5*deg_rad, 1.5*deg_rad, 2*deg_rad, 0.0, 0.5*deg_rad, 1.5*deg_rad, 2*deg_rad, 0.0, 0.5*deg_rad, 1.5*deg_rad, 2*deg_rad]
locstream["ESMF:Lat"] = [deg_rad/-2.0, deg_rad/-2.0, deg_rad/-2.0, deg_rad/-2.0, -0.25*deg_rad, -0.25*deg_rad, -0.25*deg_rad, -0.25*deg_rad, 0.25*deg_rad, 0.25*deg_rad, 0.25*deg_rad, 0.25*deg_rad, deg_rad/2.0, deg_rad/2.0, deg_rad/2.0, deg_rad/2.0]
if domask:
locstream["ESMF:Mask"] = np.array([1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.int32)
return locstream
def create_locstream_spherical_16_parallel(coord_sys=ESMF.CoordSys.SPH_DEG, domask=False):
"""
:param coord_sys: the coordinate system of the LocStream
:param domask: a boolean to tell whether or not to add a mask
:return: LocStream
"""
if ESMF.pet_count() is not 4:
raise ValueError("processor count must be 4 to use this function")
deg_rad = pi
if coord_sys == ESMF.CoordSys.SPH_DEG:
deg_rad = 180.0
locstream = None
if ESMF.local_pet() is 0:
locstream = ESMF.LocStream(4, coord_sys=coord_sys)
locstream["ESMF:Lon"] = [0.0, 0.5*deg_rad, 0.0, 0.5*deg_rad]
locstream["ESMF:Lat"] = [deg_rad/-2.0, deg_rad/-2.0, -0.25*deg_rad, -0.25*deg_rad]
if domask:
locstream["ESMF:Mask"] = np.array([1, 0, 1, 1], dtype=np.int32)
elif ESMF.local_pet() is 1:
locstream = ESMF.LocStream(4, coord_sys=coord_sys)
locstream["ESMF:Lon"] = [1.5*deg_rad, 2*deg_rad, 1.5*deg_rad, 2*deg_rad]
locstream["ESMF:Lat"] = [deg_rad/-2.0, deg_rad/-2.0, -0.25*deg_rad, -0.25*deg_rad]
if domask:
locstream["ESMF:Mask"] = np.array([0, 1, 1, 1], dtype=np.int32)
elif ESMF.local_pet() is 2:
locstream = ESMF.LocStream(4, coord_sys=coord_sys)
locstream["ESMF:Lon"] = [0.0, 0.5*deg_rad, 0.0, 0.5*deg_rad]
locstream["ESMF:Lat"] = [0.25*deg_rad, 0.25*deg_rad, deg_rad/2.0, deg_rad/2.0]
if domask:
locstream["ESMF:Mask"] = np.array([1, 1, 1, 1], dtype=np.int32)
elif ESMF.local_pet() is 3:
locstream = ESMF.LocStream(4, coord_sys=coord_sys)
locstream["ESMF:Lon"] = [1.5*deg_rad, 2*deg_rad, 1.5*deg_rad, 2*deg_rad]
locstream["ESMF:Lat"] = [0.25*deg_rad, 0.25*deg_rad, deg_rad/2.0, deg_rad/2.0]
if domask:
locstream["ESMF:Mask"] = np.array([1, 1, 1, 1], dtype=np.int32)
return locstream
```
#### File: Python/MAPL/Abstract.py
```python
class Method (object):
def __init__(self, func):
self._function = func
def __get__(self, obj, type):
return self.AbstractMethodHelper(self._function, type)
class AbstractMethodHelper (object):
def __init__(self, func, cls):
self._function = func
self._class = cls
def __call__(self, *args, **kwargs):
raise NotImplementedError('Abstract method `' + self._class.__name__ \
+ '.' + self._function + '\' called')
```
#### File: Python/MAPL/run.py
```python
from job import Job
class Run(Job):
def __init__(self,ConfigFile,Children=[]):
# Initialize Job specific stuff in base class
# -------------------------------------------
Job.__init__(self,ConfigFile)
self.Children = Children
# -------------------
# Per-segment Methods
# -------------------
def execute(self):
"""Executes the Application for one segment."""
self.initialize()
self.run()
self.finalize()
def initialize(self):
self._initialize()
for child in self.Children:
child.initialize()
self.initialize_()
def run(self):
self._run()
for child in self.Children:
child.run()
self.run_()
def finalize(self):
self._finalize()
for child in self.Children:
child.finalize()
self.finalize_()
# -----------------
# Per-job Methods
# -----------------
def signin(self):
self._signin()
for child in self.Children:
child.signin()
self.signin_()
def signout(self):
self._signout()
for child in self.Children:
child.signout()
self.signout_()
# ---------------------
# No-op Default Methods
# ---------------------
# No-op pre-child methods
# -----------------------
def _initialize(self): pass
def _run(self): pass
def _finalize(self): pass
def _signin(self): pass
def _signout(self): pass
# No-op post-child methods
# ------------------------
def initialize_(self): pass
def run_(self): pass
def finalize_(self): pass
def signin_(self): pass
def signout_(self): pass
```
#### File: Infrastructure/Trace/gen_trace_metadata.py
```python
from jinja2 import Environment
template_esmci_metadata_c = """
// $Id$
/*
* Standard trace metadata used by all ESMF traces.
*
* Earth System Modeling Framework
* Copyright 2002-2019, University Corporation for Atmospheric Research,
* Massachusetts Institute of Technology, Geophysical Fluid Dynamics
* Laboratory, University of Michigan, National Centers for Environmental
* Prediction, Los Alamos National Laboratory, Argonne National Laboratory,
* NASA Goddard Space Flight Center.
* Licensed under the University of Illinois-NCSA License.
*/
#include <string.h>
#include "ESMCI_Trace.h"
namespace ESMCI {
std::string TraceGetMetadataString() {
std::string metadata_string;
metadata_string = ""
{% for ln in lines %}"{{ln}}\\n"
{% endfor %};
return metadata_string;
}
}
"""
#extern "C" {
# {% for f in cfunc_list %}
# {{f.ret}} __wrap_{{f.name}}({{f.params}});
# {% endfor %}
def gen():
lines = [line.rstrip('\n').replace('"', '\\"') for line in open('include/metadata', 'r')]
template = Environment().from_string(template_esmci_metadata_c)
text = template.render(lines=lines)
f = open('src/ESMCI_TraceMetadata.C', 'w+')
f.write(text)
f.close()
print 'Generated src/ESMCI_TraceMetadata.C'
if __name__ == '__main__':
gen()
``` |
{
"source": "joeylane19/Mini-Amazon",
"score": 3
} |
#### File: app/models/product.py
```python
from flask import flash, current_app as app
import datetime
# Creates a product item
class Product:
def __init__(self, id, seller_id, name, details, price, image, category, quantity):
self.id = id
self.seller_id = seller_id
self.name = name
self.details = details
self.price = price
self.image = image
self.type = category
self.quantity = quantity
self.average_rating = ProductReview.get_avg_rating(id)
<<<<<<< HEAD
# Gets a specific product from a seller
=======
#get a product with a certain product id
>>>>>>> 5c040609574ee03260690bd24fd32703034c38ac
@staticmethod
def get(id, seller_id):
rows = app.db.execute('''
SELECT *
FROM Product
WHERE id = :id AND seller_id = :seller_id
''',
id=id,
seller_id = seller_id)
return Product(*(rows[0])) if rows is not None else None
# Gets a specific product from any seller
def getNoSell(id):
rows = app.db.execute('''
SELECT *
FROM Product
WHERE id = :id
''',
id=id)
return Product(*(rows[0])) if rows is not None else None
#get all products
@staticmethod
def get_all():
rows = app.db.execute('''
SELECT *
FROM Product
'''
)
return [Product(*row) for row in rows]
#get name of seller with a specific id
@staticmethod
def get_seller_name(seller_id):
rows = app.db.execute('''
SELECT Product.id, seller_id, name, details, price, image, firstname, lastname
FROM (Users JOIN Product ON Users.id = Product.seller_id)
WHERE Users.id = :seller_id
''',
seller_id=seller_id)
return [Product(*row) for row in rows]
#get all distinct product names
@staticmethod
def get_all_distinct():
rows = app.db.execute('''
SELECT DISTINCT name
FROM Product
'''
)
return [row[0] for row in rows]
#return all products sorted by price in ascending order
@staticmethod
def sort_ascending():
rows = app.db.execute('''
SELECT *
FROM Product
ORDER BY Product.price ASC
'''
)
return [Product(*row) for row in rows]
#return all products sorted by price in descending order
@staticmethod
def sort_descending():
rows = app.db.execute('''
SELECT *
FROM Product
ORDER BY Product.price DESC
'''
)
return [Product(*row) for row in rows]
#add a product to the database
@staticmethod
def add(id, sellerId, name, details, price, image, type, quantity):
try:
rows = app.db.execute("""
SELECT name
FROM Product
"""
)
for row in rows:
if name == row[0]:
flash("Product name is already being used")
return None
rows = app.db.execute("""
INSERT INTO Product(id, seller_id, name, details, price, image, type, quantity)
VALUES(:id, :sellerId, :name, :details, :price, :image, :type, :quantity)
RETURNING id
""",
id=id,
sellerId=int(sellerId),
name=name,
details=details,
price=float(price),
image=image,
type=type,
quantity=int(quantity)
)
id = rows[0][0]
return Product.get(id)
except Exception as e:
return None
#add an existing product to the database
@staticmethod
def add_existing(name, seller_id, quantity):
try:
rows = app.db.execute("""
SELECT *
FROM Product
WHERE name = :name
""",
name=name
)
row = rows[0]
print(row)
id = row[0]
details = row[3]
price = row[4]
image = row[5]
type = row[6]
rows = app.db.execute("""
INSERT INTO Product(id, seller_id, name, details, price, image, type, quantity)
VALUES(:id, :seller_id, :name, :details, :price, :image, :type, :quantity)
RETURNING id
""",
id=id,
seller_id=int(seller_id),
name=name,
details=details,
price=float(price),
image=image,
type=type,
quantity=int(quantity)
)
id = rows[0][0]
return Product.get(id)
except Exception as e:
print(e)
flash("You already sell this product")
return None
#get a user's inventory
@staticmethod
def get_inventory(id):
rows = app.db.execute('''
SELECT *
FROM Product
WHERE Product.seller_id = :id
''',
id=id)
return [Product(*row) for row in rows] if rows is not None else None
#search products based on a keyword
@staticmethod
def search_products(word):
rows = app.db.execute('''
SELECT *
FROM Product
WHERE LOWER(Product.name) LIKE LOWER(Concat('%', :word,'%'))
OR LOWER(Product.details) LIKE LOWER(Concat('%', :word,'%'))
OR LOWER(Product.type) LIKE LOWER(Concat('%', :word,'%'))
''',
word=word)
return [Product(*row) for row in rows] if rows is not None else None
#return all products in a certain category
@staticmethod
def search_categories(word):
rows = app.db.execute('''
SELECT *
FROM Product
WHERE Product.type LIKE Concat('%', :word,'%')
''',
word=word)
return [Product(*row) for row in rows] if rows is not None else None
#remove a product
@staticmethod
def remove(seller_id, id):
print(seller_id, id)
try:
rows = app.db.execute('''
DELETE FROM PRODUCT
WHERE seller_id = :seller_id AND id = :id
''',
seller_id=seller_id,
id=id)
except Exception as e:
return None
#update the quantity of a product sold by a specific seller
def update(seller_id, id, quantity):
try:
rows = app.db.execute('''
UPDATE PRODUCT
SET quantity = :quantity
WHERE seller_id = :seller_id AND id = :id
''',
seller_id=seller_id,
id=id,
quantity=quantity)
except Exception as e:
return None
#get a specific product -- used for determining sellers
@staticmethod
def get_sellers(id):
rows = app.db.execute('''
SELECT *
FROM Product
WHERE Product.id = :id
''',
id=id)
return [Product(*row) for row in rows] if rows is not None else None
class Category:
def __init__(self, name):
self.name = name
#get all categories
@staticmethod
def get_all():
rows = app.db.execute('''
SELECT *
FROM Category
'''
)
return [Category(*row) for row in rows] if rows is not None else None
class ProductReview:
def __init__(self, aid, fname, lname, pid, sid, r, date):
self.author_id = aid
self.fname = fname
self.lname = lname
self.product_id = pid
self.seller_id = sid
self.rating = r
self.date = date
#get average rating of product
@staticmethod
def get_avg_rating(id):
rows = app.db.execute('''
SELECT AVG(rating)
FROM ProductReview
WHERE product_id = :id
''',
id=id)
return rows[0][0]
#get all product reviews for a specific product
def get_all(id):
rows = app.db.execute('''
SELECT author_id, Users.firstname, Users.lastname, product_id, seller_id, rating, date_and_time
FROM ProductReview, Users
WHERE product_id = :id and author_id = Users.id
ORDER BY date_and_time DESC
''',
id=id)
return [ProductReview(*row) for row in rows] if rows is not None else None
#get users who authored something
def get_authored(id):
rows = app.db.execute('''
SELECT author_id, Users.firstname, Users.lastname, product_id, seller_id, rating, date_and_time
FROM ProductReview, Users
WHERE author_id = :id AND USERS.id = :id
ORDER BY date_and_time DESC
''',
id=id)
return [ProductReview(*row) for row in rows] if rows is not None else None
#count reviews for a specific product
def num_reviews(product_id):
rows = app.db.execute('''
SELECT Count(*)
FROM ProductReview
WHERE product_id = :product_id
''',
product_id=product_id)
return rows[0][0]
#add a review
def add_review(author_id, product_id, seller_id, rating, date):
try:
rows = app.db.execute('''
INSERT INTO ProductReview
Values (:author_id, :product_id, :seller_id, :rating, :date)
''',
author_id=author_id,
product_id=product_id,
seller_id=seller_id,
rating=rating,
date=date)
except Exception as e:
return None
#edit a review
def edit_review(author_id, product_id, rating, date):
try:
rows = app.db.execute('''
UPDATE PRODUCTREVIEW
SET rating = :rating, date_and_time = :date
WHERE author_id = :author_id AND product_id = :product_id
''',
author_id=author_id,
product_id=product_id,
rating=rating,
date=date)
except Exception as e:
return None
#delete a review
def delete_review(author_id, product_id):
try:
rows = app.db.execute('''
DELETE FROM PRODUCTREVIEW
WHERE author_id = :author_id AND product_id = :product_id
''',
author_id=author_id,
product_id=product_id)
except Exception as e:
return None
class SellerReview:
def __init__(self, author_id, seller_id, rating, date_and_time):
self.author_id = author_id
self.seller_id = seller_id
self.rating = rating
self.date_and_time = date_and_time
#add a seller review
def add_review(author_id, seller_id, rating, date):
try:
rows = app.db.execute('''
INSERT INTO SellerReview
Values (:author_id, :seller_id, :rating, :date_and_time)
''',
author_id=author_id,
seller_id=seller_id,
rating=rating,
date_and_time=date)
except Exception as e:
return None
#get average seller rating
@staticmethod
def get_avg_rating(id):
rows = app.db.execute('''
SELECT AVG(rating)
FROM SellerReview
WHERE seller_id = :id
''',
id=id)
return rows[0][0]
#update a seller review
def update_review(author_id, seller_id, rating, date):
try:
rows = app.db.execute('''
Update Sellerreview
SET rating = :rating, date_and_time = :date
where author_id = :author_id and seller_id = :seller_id;
''',
author_id=author_id,
seller_id=seller_id,
rating=rating,
date=date)
except Exception as e:
return None
#check if a user has reviewed a seller
def has_reviewed(user_id, seller_id):
rows = app.db.execute('''
SELECT COUNT(*)
FROM SellerReview
WHERE author_id = :user_id AND seller_id = :seller_id
''', user_id=user_id,
seller_id=seller_id,
id=id)
return rows[0][0]
#delete a seller review
def delete(author_id, seller_id):
try:
rows = app.db.execute('''
DELETE FROM SELLERREVIEW
WHERE author_id = :author_id AND seller_id = :seller_id
''',
author_id=author_id,
seller_id=seller_id)
except Exception as e:
return None
#get all seller reviews for a seller
def get_seller(seller_id):
rows = app.db.execute('''
SELECT *
FROM SellerReview
WHERE seller_id = :seller_id
ORDER BY date_and_time DESC
''',
seller_id=seller_id)
return [SellerReview(*row) for row in rows]
#get all reviews by a specific user
def get_authored(auhtor_id):
rows = app.db.execute('''
SELECT *
FROM SellerReview
WHERE author_id = :author_id
ORDER BY date_and_time DESC
''',
author_id=auhtor_id)
return [SellerReview(*row) for row in rows]
class Product_Seller:
def __init__(self, id, seller_id, name, details, price, image, category, quantity, firstname, lastname):
self.id = id
self.seller_id = seller_id
self.name = name
self.details = details
self.price = price
self.image = image
self.type = category
self.quantity = quantity
self.firstname = firstname
self.lastname = lastname
self.average_rating = ProductReview.get_avg_rating(id)
#get the name of a seller
@staticmethod
def get_seller_name(id):
rows = app.db.execute('''
SELECT Product.id, seller_id, name, details, price, image, type, quantity, firstname, lastname
FROM (Users JOIN Product ON Users.id = Product.seller_id)
WHERE Product.id = :id
''',
id=id)
return [Product_Seller(*row) for row in rows] if rows is not None else None
``` |
{
"source": "joeylee1125/HANAHv2",
"score": 3
} |
#### File: joeylee1125/HANAHv2/CollectDataFromInternet.py
```python
import datetime
import time
import Spider
import DBOperation
import StaticUtils
import VerdictAnalyser
CASE_TABLE = StaticUtils.case_table
def get_total_number(search_criteria):
wenshu = Spider.WenShu()
wenshu.set_search_criteria(search_criteria)
return wenshu.get_total_item_number()
def download_all_caselist(search_criteria, max_page):
cases = dict()
wenshu = Spider.WenShu()
wenshu.set_search_criteria(search_criteria)
for index in range(1, max_page + 1):
tmp_case_list = wenshu.get_case_list(index)
if not cases:
cases = tmp_case_list
else:
for key, value in tmp_case_list.items():
cases[key] += value
print(f"{cases}")
return cases
def download_case(case_id):
w = Spider.WenShu()
return w.get_case(case_id)
def download_case_list_by_upload_date(year, upload_date):
search_criteria = "案件类型:刑事案件,审判程序:一审,法院地域:四川省,裁判年份:" + year + ",文书类型:判决书,上传日期:" + upload_date + " TO " + upload_date
total_number = get_total_number(search_criteria)
if int(total_number) == 0:
return None
max_page = int(total_number) // 20 if int(total_number) % 20 == 0 else (int(total_number) // 20) + 1
cases = download_all_caselist(search_criteria, max_page)
db_sc_cases = DBOperation.MyDatabase('127.0.0.1', 'root', '082666')
length = len(cases['name'])
for i in range(length):
data = dict()
for key in cases:
if key == 'procedure':
data['trial'] = cases[key][i]
else:
data[key] = cases[key][i]
data['download'] = 'no'
data['upload_date'] = upload_date
doc_id = db_sc_cases.get(StaticUtils.case_table, 'doc_id', 'doc_id=\'{}\''.format(data['doc_id']))
if not doc_id:
fields_list = ["name", "doc_id", "date", "case_id", "trial", "court", "download", "upload_date"]
values = ''
for key in fields_list:
#transfer to str if it's a int
if isinstance(data[key], int):
values = values + str(data[key])
else:
if values:
values = values +",\'" + data[key] + "\'"
else:
values = "(\'" + data[key] + "\'"
values = values + ")"
fields = ','.join(fields_list)
db_sc_cases.insert(CASE_TABLE, fields, values)
db_sc_cases.commit()
db_sc_cases.close()
def download_case_list_by_upload_period(year, start_date, end_date):
search_criteria = "案件类型:刑事案件,审判程序:一审,法院地域:四川省,裁判年份:{},文书类型:判决书,上传日期:{} TO {}".format(year, start_date, end_date)
total_number = get_total_number(search_criteria)
if int(total_number) == 0:
return None
max_page = int(total_number) // 20 if int(total_number) % 20 == 0 else (int(total_number) // 20) + 1
cases = download_all_caselist(search_criteria, max_page)
db_sc_cases = DBOperation.MyDatabase('127.0.0.1', 'root', '082666')
length = len(cases['name'])
for i in range(length):
data = dict()
for key in cases:
if key == 'procedure':
data['trial'] = cases[key][i]
else:
data[key] = cases[key][i]
data['download'] = 'no'
data['upload_date'] = start_date
doc_id = db_sc_cases.get(StaticUtils.case_table, 'doc_id', 'doc_id=\'{}\''.format(data['doc_id']))
if not doc_id:
fields_list = ["name", "doc_id", "date", "case_id", "trial", "court", "download", "upload_date"]
values = ''
for key in fields_list:
# transfer to str if it's a int
if isinstance(data[key], int):
values = values + str(data[key])
else:
if values:
values = values + ",\'" + data[key] + "\'"
else:
values = "(\'" + data[key] + "\'"
values = values + ")"
fields = ','.join(fields_list)
db_sc_cases.insert(CASE_TABLE, fields, values)
# db_sc_cases.insert(StaticUtils.case_table, data)
db_sc_cases.commit()
db_sc_cases.close()
def get_latest_upload_date():
db_sc_cases = DBOperation.MyDatabase('127.0.0.1', 'root', '082666')
result = db_sc_cases.get_max_record(StaticUtils.case_table, 'upload_date')
db_sc_cases.close()
return result[0]
def download_new_testcases():
db_sc_cases = DBOperation.MyDatabase('127.0.0.1', 'root', '082666')
case_list = db_sc_cases.get(StaticUtils.case_table, 'name, doc_id, court, YEAR(DATE)', 'download=\'no\'')
total = len(case_list)
i = 0
for case in case_list:
case_name, case_doc_id, case_court, case_year = case
print(case_name, case_doc_id, case_court, case_year)
try:
case_text = download_case(case_doc_id)
except Exception as e:
print(e)
db_sc_cases.commit()
print("Sleep 2s ...")
time.sleep(2)
if case_text:
verdict = VerdictAnalyser.VerdictAnalyser(case_text)
print(f"{i}/{total} case {case_name} is downloaded.")
db_sc_cases.update(StaticUtils.case_table,
'download', '\'yes\'',
f'doc_id=\'{case_doc_id}\'')
db_sc_cases.update(StaticUtils.case_table,
'content', f'\'{verdict.content}\'',
f'doc_id=\'{case_doc_id}\'')
db_sc_cases.commit()
else:
db_sc_cases.update(StaticUtils.case_table, 'download', '\'empty\'', f'doc_id=\'{case_doc_id}\'')
print(f"{i}/{total} case {case_name} is empty.")
i += 1
db_sc_cases.commit()
db_sc_cases.close()
def main():
#download_new_testcases()
#print(yesterday)
#return None
years = ['2016', '2017', '2018']
#start = '2018-07-18'
#end = '2018-07-19'
#yesterday = datetime.date.today() - datetime.timedelta(1)
date_start = get_latest_upload_date()
date_end = datetime.date.today() - datetime.timedelta(1)
#date_start = datetime.datetime.strptime(start, '%Y-%m-%d')
#date_end = datetime.datetime.strptime(end, '%Y-%m-%d')
while date_start < date_end:
print(f"Download case uploaded by "
f"=================={date_start.year}-{date_start.month}-{date_start.day}=====================")
for year in years:
upload_date = f'{date_start.year}' + '-' + f'{date_start.month:02d}' + '-' + f'{date_start.day:02d}'
download_case_list_by_upload_date(year, upload_date)
#download_new_testcases()
date_start += datetime.timedelta(days=1)
if __name__ == "__main__":
main()
```
#### File: joeylee1125/HANAHv2/ReadCaptchaImage.py
```python
from PIL import Image
import os
import pytesseract
from collections import defaultdict
# 获取图片中像素点数量最多的像素
def get_threshold(image):
pixel_dict = defaultdict(int)
# 像素及该像素出现次数的字典
rows, cols = image.size
for i in range(rows):
for j in range(cols):
pixel = image.getpixel((i, j))
pixel_dict[pixel] += 1
count_max = max(pixel_dict.values()) # 获取像素出现出多的次数
pixel_dict_reverse = {v: k for k, v in pixel_dict.items()}
threshold = pixel_dict_reverse[count_max] # 获取出现次数最多的像素点
return threshold
# 按照阈值进行二值化处理
# threshold: 像素阈值
def get_bin_table(threshold):
# 获取灰度转二值的映射table
table = []
for i in range(256):
rate = 0.1 # 在threshold的适当范围内进行处理
if threshold * (1 - rate) <= i <= threshold * (1 + rate):
table.append(1)
else:
table.append(0)
return table
# 去掉二值化处理后的图片中的噪声点
def cut_noise(image):
rows, cols = image.size # 图片的宽度和高度
change_pos = [] # 记录噪声点位置
# 遍历图片中的每个点,除掉边缘
for i in range(1, rows - 1):
for j in range(1, cols - 1):
# pixel_set用来记录该店附近的黑色像素的数量
pixel_set = []
# 取该点的邻域为以该点为中心的九宫格
for m in range(i - 1, i + 2):
for n in range(j - 1, j + 2):
if image.getpixel((m, n)) != 1: # 1为白色,0位黑色
pixel_set.append(image.getpixel((m, n)))
# 如果该位置的九宫内的黑色数量小于等于4,则判断为噪声
if len(pixel_set) <= 4:
change_pos.append((i, j))
# 对相应位置进行像素修改,将噪声处的像素置为1(白色)
for pos in change_pos:
image.putpixel(pos, 1)
return image # 返回修改后的图片
def recognition_1(img_file):
return pytesseract.image_to_string(smooth(process_img(Image.open(img_file))), 'eng')
def recognition_2(img_file):
image = Image.open(img_file) # 打开图片文件
imgry = image.convert('L') # 转化为灰度图
# 获取图片中的出现次数最多的像素,即为该图片的背景
max_pixel = get_threshold(imgry)
# 将图片进行二值化处理
table = get_bin_table(threshold=max_pixel)
out = imgry.point(table, '1')
# 去掉图片中的噪声(孤立点)
out = cut_noise(out)
# 识别图片中的数字和字母
text = pytesseract.image_to_string(out)
# 去掉识别结果中的特殊字符
exclude_char_list = ' .:\\|\'\"?![],()~@#$%^&*_+-={};<>/¥'
text = ''.join([x for x in text if x not in exclude_char_list])
return text
def process_img(img, threshold=180):
'''对图片进行二值化 255 是白色 0是黑色'''
# 灰度转换
img = img.convert('L')
# 二值化
pixels = img.load()
for x in range(img.width):
for y in range(img.height):
pixels[x, y] = 255 if pixels[x, y] > threshold else 0
return img
def smooth(picture):
'''平滑降噪
二值化的图片传入 去除像噪小点
'''
pixels = picture.load()
(width, height) = picture.size
xx = [1, 0, -1, 0]
yy = [0, 1, 0, -1]
for i in range(width):
for j in range(height):
if pixels[i, j] != 255:
count = 0
for k in range(4):
try:
if pixels[i + xx[k], j + yy[k]] == 255:
count += 1
except IndexError: # 忽略访问越界的情况
pass
if count > 3:
pixels[i, j] = 255
return picture
def recognition(r_func):
correct_count = 0 # 图片总数
total_count = 0 # 识别正确的图片数量
# 识别指定文件目录下的图片
# 图片存放目录figures
dir = '.\\images'
print("*" * 20 + f"{r_func.__name__}" + "*" * 20)
# 遍历figures下的png,jpg文件
for file in os.listdir(dir):
if file.endswith('.png') or file.endswith('.jpg'):
image_path = f"{dir}\\{file}"
answer = file.split('.')[0] # 图片名称,即图片中的正确文字
result = r_func(image_path) # 图片识别的文字结果
print(f"{answer}, {result}")
if result == answer: # 如果识别结果正确,则total_count加1
correct_count += 1
total_count += 1
print('Total count: %d, correct: %d.' % (total_count, correct_count))
def main():
for r_func in [recognition_1, recognition_2]:
recognition(r_func)
if __name__ == "__main__":
main()
``` |
{
"source": "Joey-Lee/FlaskDemo",
"score": 3
} |
#### File: Joey-Lee/FlaskDemo/hello.py
```python
from flask import Flask, url_for
from flask import request
from werkzeug import secure_filename, abort, redirect
app = Flask(__name__)
@app.route('/hello')
def hello_world():
return 'Hello World!'
@app.route('/')
def index():
return 'Index Page'
@app.route('/user/<username>/')
def show_user_profile(username):
'show the user profile for that user'
return 'User is %s' % username
@app.route('/post/<int:post_id>')
def show_post(post_id):
'show the post with the given id, the id is an integer'
return 'Post %d' % post_id
@app.route('/projects/')
def projects():
return 'The project page'
@app.route('/about')
def about():
return abort(401)
return 'The about page'
@app.route('/login', methods=['GET', 'POST'])
def login():
# error = None
if request.method == 'POST':
if valid_login(request.form['username'], request.form['password']):
return log_the_user_in(request.form['username'])
else:
return "{\n result:0, \n detail:'user name is incorrect'\n}"
#return 'do the login'
else:
return 'show the login form'
pass
def valid_login(username, password):
if username == 'Jerry':
return True
else:
return False
def log_the_user_in(username):
return "{id:1, 'when':'moning', username:'%s'}" % username
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
f = request.files['the_file']
if f.save('/users/Jerry/code/FlaskDemo/' + secure_filename(f.filename)):
return 'upload successful'
else:
return 'upload fail.'
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
```
#### File: Joey-Lee/FlaskDemo/query.py
```python
import leancloud
from leancloud import Object
leancloud.init('<KEY>', master_key='<KEY>')
class Detail(Object):
"""docstring for """
"""
@property
def id(self):
return self.get('id')
"""
@property
def zi(self):
return self.get('zi')
@property
def wubi(self):
return self.get('wubi')
@property
def bushou(self):
return self.get('bushou')
@property
def bihua(self):
return self.get('bihua')
@property
def pinyin(self):
return self.get('pinyin')
@property
def jiben(self):
return self.get('jiben')
@property
def xiangxi(self):
return self.get('xiangxi')
@property
def py(self):
return self.get('py')
@property
def ucode(self):
return self.get('ucode')
"""
@id.setter
def id(self, value):
return self.set('id', value)
"""
@zi.setter
def zi(self, value):
return self.set('zi', value)
@wubi.setter
def wubi(self, value):
return self.set('wubi', value)
@bushou.setter
def bushou(self, value):
return self.set('bushou', value)
@bihua.setter
def bihua(self, value):
return self.set('bihua', value)
@pinyin.setter
def pinyin(self, value):
return self.set('pinyin', value)
@jiben.setter
def jiben(self, value):
return self.set('jiben', value)
@xiangxi.setter
def xiangxi(self, value):
return self.set('xiangxi', value)
@py.setter
def py(self, value):
return self.set('py', value)
@ucode.setter
def ucode(self, value):
return self.set('ucode', value)
from leancloud import Query
query = Query(Detail)
detail = query.get('55cf129100b042cb01b8a4cc')
print detail.get('xiangxi')
``` |
{
"source": "joeylmaalouf/frlg-palette-swapper",
"score": 3
} |
#### File: joeylmaalouf/frlg-palette-swapper/main.py
```python
from PIL import Image
import numpy as np
def get_palette (data):
unique, counts = np.unique(data.reshape(-1, data.shape[2]), axis = 0, return_counts = True)
mask = (unique[:, 3] != 0)
return sorted(zip(unique[mask], counts[mask]), key = lambda x: x[1], reverse = True)
def replace_color (data, src, dst):
red, green, blue, alpha = data.T
src_mask = (red == src[0]) & (green == src[1]) & (blue == src[2])
data[..., :-1][src_mask.T] = dst
return
def swap_palettes (path1, path2):
image1 = Image.open(path1).convert('RGBA')
image2 = Image.open(path2).convert('RGBA')
data1 = np.array(image1)
data2 = np.array(image2)
palette1 = get_palette(data1)
palette2 = get_palette(data2)
for color1, color2 in zip(palette1, palette2):
replace_color(data1, color1[0][:3], color2[0][:3])
replace_color(data2, color2[0][:3], color1[0][:3])
image1 = Image.fromarray(data1)
image2 = Image.fromarray(data2)
return image1, image2
if __name__ == '__main__':
image1, image2 = swap_palettes('sprites/6.png', 'sprites/151.png')
image1.show()
image2.show()
# todo:
# - smarter recoloring than just sorted by frequency? maybe group colors into sub-palettes?
# - input via either args or prompt
# - quickplay (users enter two pokemon to see their swap)
# - advanced (users pick any number of palettes and any number of pokemon to see all the combinations)
# - output sprites in sheet; each row is a new sprite, columns are 1x, 2x, 4x
``` |
{
"source": "JoeyLr/Blur_Detection",
"score": 3
} |
#### File: JoeyLr/Blur_Detection/blur_ops.py
```python
import numpy as np
import cv2
class BlurOps:
def __init__(self, img):
self.img = img
def gaussian_blur(self, kernel_size=9):
blurred = cv2.GaussianBlur(self.img, ksize=(kernel_size, kernel_size), sigmaX=0, sigmaY=0)
return blurred
def motion_blur(self, degree=12, angle=135):
img = np.array(self.img)
matrix = cv2.getRotationMatrix2D((degree / 2, degree / 2), angle, 1)
motion_blur_kernel = np.diag(np.ones(degree))
motion_blur_kernel = cv2.warpAffine(motion_blur_kernel, matrix, (degree, degree))
motion_blur_kernel = motion_blur_kernel / degree
blurred = cv2.filter2D(img, -1, motion_blur_kernel)
cv2.normalize(blurred, blurred, 0, 255, cv2.NORM_MINMAX)
blurred = np.array(blurred, dtype=np.uint8)
return blurred
if __name__ == "__main__":
image = cv2.imread('./bean-license.png')
blur_operator = BlurOps(image)
motion_blurred = blur_operator.gaussian_blur()
cv2.imwrite("./gaussian_blur.jpg", motion_blurred)
``` |
{
"source": "joeyMckinney/lambdata",
"score": 4
} |
#### File: lambdata/lambdata/oop_examples.py
```python
import pandas as pd
class Bicycle:
"""General represention of a bicycle"""
def __init__(self, brand, size, tire_size, frame_type):
"""Constructor for complex numbers.
Complex numbers are part real, part imaginary.
"""
self.brand = str(brand)
self.size = str(size)
self.tire_size = int(tire_size)
self.frame_type = str(frame_type)
def petaling_fast(self):
return 'zooming'
def condition(self, condition):
return 'my bikes condition is ' + condition
class MountainBike(Bicycle):
"""General represention of a mountain bike"""
def __init__(self, brand, size, tire_size, frame_type, suspension_count, suspension_brand):
super().__init__(brand, size, tire_size, frame_type)
self.suspension_count = int(suspension_count)
self.suspension_brand = str(suspension_brand)
def going_offroad(self):
return 'Mountain bikes are best for dirt roads'
def petaling_fast(self):
return 'Sending it!'
def smooth_ride(self):
if self.suspension_count <= 1:
return 'this is bumpy ride'
else:
return 'this ride is smooth'
if __name__ == '__main__':
print('Import successful')
``` |
{
"source": "joeynavarro/facial_expression_classification",
"score": 2
} |
#### File: facial_expression_classification/model_src/model_executor.py
```python
from __future__ import print_function
import os
import csv
import h5py
import numpy as np
import skimage.io
import argparse
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.utils.data as data
from torch.autograd import Variable
#custom libraries
import model_backend.transformers as transforms
from model_backend import progress_bar
from model_backend.create_confusion_matrix import plot_confusion_matrix
from model_backend.create_train_data import CK
from built_models import *
from sklearn.metrics import confusion_matrix
# ## Data Extraction and Label Creation for CK+ Dataset
# ### Data Dictionary
# Expressions: 0 = anger 1 = disgust, 2 = fear, 3 = happy, 4 = sadness, 5 = surprise, 6 = contempt \
# Dataset holds: 135, 177, 75, 207, 84, 249, 54 images for each expression respectively
#
# In[2]:
#this code will create an .h5 file that the model will call upon when searching for inputs
emotions = {'anger' : 'anger_path' , 'disgust' : 'disgust_path', 'fear' : 'fear_path',
'happy' : 'happy_path' , 'sadness': 'sadness_path', 'surprise' : 'surprise_path' ,
'contempt' : 'contempt_path'}
# path to image directory
ck_path = '../data/ck+'
# instantiate lists and counter to store data and label information
data_x = []
data_y = []
count = 0
datapath = os.path.join('../data/ck+','ck_data.h5')
if not os.path.exists(os.path.dirname(datapath)):
os.makedirs(os.path.dirname(datapath))
for emo_key, emo_val in emotions.items():
emo_val = os.path.join(ck_path, emo_key)
files = os.listdir(emo_val)
files.sort()
for filename in files:
I = skimage.io.imread(os.path.join(emo_val, filename))
data_x.append(I.tolist())
data_y.append(count)
count += 1
print(f'The pixel data shape is: {np.shape(data_x)}')
print(f'The label data shape is: {np.shape(data_y)}')
#save the pixel and labels in .h5 file in the ck+ data folder for the model to call upon
datafile = h5py.File(datapath, 'w')
datafile.create_dataset("data_pixel", dtype = 'uint8', data=data_x)
datafile.create_dataset("data_label", dtype = 'int64', data=data_y)
datafile.close()
print("Oh happy day!, the image data has been compiled without a hitch!")
# ## Instantiating Command Line Interface Arguements
# In[3]:
parser = argparse.ArgumentParser(description='PyTorch CK+ CNN Training')
parser.add_argument('--model', type=str, default='VGG19', help='CNN architecture')
parser.add_argument('--dataset', type=str, default='CK+', help='dataset')
parser.add_argument('--fold', default=1, type=int, help='k fold number')
parser.add_argument('--bs', default=128, type=int, help='batch_size')
parser.add_argument('--lr', default=0.01, type=float, help='learning rate')
parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')
print(parser)
# ## Training Function Definition
# In[4]:
# instantiating global lists to collect data for visualizations
# the lists are the reason I did not load in the functions from a seperate script
train_acc_list_vgg = []
train_loss_list_vgg = []
train_acc_list_rn = []
train_loss_list_rn = []
train_all_pred = []
train_all_targ = []
def train(epoch):
print('This Is Training Epoch: %d' % epoch )
global Train_acc
net.train()
train_loss = 0
correct = 0
total = 0
all_target = []
if epoch > learning_rate_decay_start and learning_rate_decay_start >= 0:
frac = (epoch - learning_rate_decay_start) // learning_rate_decay_every
decay_factor = learning_rate_decay_rate ** frac
current_lr = opt.lr * decay_factor
progress_bar.set_lr(optimizer, current_lr) # set the decayed rate
else:
current_lr = opt.lr
print(' ')
print('Learning Rate: %s' % str(current_lr))
print(' ')
for batch_idx, (inputs, targets) in enumerate(trainloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
# apply optimizer
optimizer.zero_grad()
outputs = net(inputs)
# apply crossentropyloss
loss = criterion(outputs, targets)
loss.backward()
progress_bar.clip_gradient(optimizer, 0.1)
optimizer.step()
train_loss += loss.item()
# make prediction
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
#draw progress bar
progress_bar.progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Accuracy: %.3f%% (%d/%d)'
% (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))
# append predicted and target global lists for visualizations
if batch_idx == 0:
train_all_pred.append(predicted)
train_all_targ.append(targets)
else:
train_all_pred.append(torch.cat((train_all_pred[-1], predicted), 0))
train_all_targ.append(torch.cat((train_all_targ[-1], targets), 0))
print(' ')
# appending accuracy and loss to global list for visualizations
# not the test data is scaled according to cutsize for visualizations
for index in range(cut_size):
if opt.model == 'VGG19':
loss_per_run = train_loss / (batch_idx + 1)
train_loss_list_vgg.append(loss_per_run)
acc_per_run = 100. * correct / total
train_acc_list_vgg.append(acc_per_run)
elif opt.model == 'Resnet18':
loss_per_run = train_loss / (batch_idx + 1)
train_loss_list_rn.append(loss_per_run)
acc_per_run = 100. * correct / total
train_acc_list_rn.append(acc_per_run)
Train_acc = 100. * correct / total
# ## Testing Function Definition
# In[5]:
# instantiating global lists to collect data for visualizations
# the lists are the reason I did not load in the functions from a seperate script
test_acc_list_vgg = []
test_loss_list_vgg = []
test_acc_list_rn = []
test_loss_list_rn = []
test_all_pred = []
test_all_targ = []
res_best_test_acc = []
vgg_best_test_acc = []
def test(epoch):
print('This Is Testing Epoch: %d' % epoch )
global Test_acc
global best_Test_acc
global best_Test_acc_epoch
net.eval()
Testing_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
bs, ncrops, c, h, w = np.shape(inputs)
inputs = inputs.view(-1, c, h, w)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
# apply optimizer
optimizer.zero_grad()
outputs = net(inputs)
outputs_avg = outputs.view(bs, ncrops, -1).mean(1) # avg over crops
# apply crossentropyloss
loss = criterion(outputs_avg, targets)
Testing_loss += loss.item()
# make prediction
_, predicted = torch.max(outputs_avg.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
#draw progress bar
progress_bar.progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Accuracy: %.3f%% (%d/%d)'
% (Testing_loss / (batch_idx + 1), 100. * correct / total, correct, total))
# append predicted and target global lists for visualizations
if batch_idx == 0:
test_all_pred.append(predicted)
test_all_targ.append(targets)
else:
test_all_pred.append(torch.cat((test_all_pred[-1], predicted), 0))
test_all_targ.append(torch.cat((test_all_targ[-1], targets), 0))
# appending accuracy and loss to global list for visualizations
if opt.model == 'VGG19':
loss_per_run = Testing_loss / (batch_idx+1)
test_loss_list_vgg.append(loss_per_run)
acc_per_run = 100. * correct / total
test_acc_list_vgg.append(acc_per_run)
elif opt.model == 'Resnet18':
loss_per_run = Testing_loss / (batch_idx + 1 )
test_loss_list_rn.append(loss_per_run)
acc_per_run = 100.*correct / total
test_acc_list_rn.append(acc_per_run)
print(' ')
# Save checkpoint.
Test_acc = 100. * correct / total
if Test_acc > best_Test_acc:
print(' ')
print('Awesome! Saving This Model..')
print('Check This Out, The Best Test Accuracy So Far Is: %0.3f' % Test_acc + '%!!')
state = {'net': net.state_dict() if use_cuda else net,
'best_Test_acc': Test_acc,
'best_Test_acc_epoch': epoch,
}
best_Test_acc = Test_acc
best_Test_acc_epoch = epoch
if not os.path.isdir('../model_checkpoints/' + opt.dataset + '_' + opt.model):
os.mkdir('../model_checkpoints/' + opt.dataset + '_' + opt.model)
if not os.path.isdir('../model_checkpoints/' + path):
os.mkdir('../model_checkpoints/' + path)
torch.save(state, os.path.join('../model_checkpoints/' + path, 'emoclass_model.t7'))
if opt.model == 'VGG19':
vgg_best_test_acc.append(best_Test_acc)
elif opt.model == 'Resnet18':
res_best_test_acc.append(best_Test_acc)
# ## Neural Network Parameters
# In[6]:
#Epoch choice
total_epoch = 12
start_epoch = 1 # start from epoch 1 or last checkpoint epoch
#Learning Rate Choice by run
learning_rate_decay_start = 0
learning_rate_decay_every = 1
learning_rate_decay_rate = 0.8
#basically batch size
cut_size = 43
#this model is built on a cuda PC, so if you have a mac or a non NVIDIA gpu, sorry not sorry.
use_cuda = torch.cuda.is_available()
# ## Command Line Arguements for VGG19 Convolutional Neural Network Model
# In[7]:
print('===> Reading Command Line Arguments')
opt = parser.parse_args('--model VGG19 --bs 128 --lr 0.01 --fold 10'.split())
# ## Data and Transformer Loader
# In[8]:
print('===> Loading Data Transformers for Augmentation...')
# define data transformers
transform_train = transforms.Compose([
transforms.RandomCrop(cut_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.TenCrop(cut_size),
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
])
# model loaders use data transformers
print('===> Loading Data For Model...')
trainset = CK(split = 'Training', fold = opt.fold, transform = transform_train)
testset = CK(split = 'Testing', fold = opt.fold, transform = transform_test)
print('===> Preparing Data For Model...')
# Load trainning and testing data and apply parameters
trainloader = torch.utils.data.DataLoader(trainset, batch_size = opt.bs, shuffle = True, num_workers = 0)
testloader = torch.utils.data.DataLoader(testset, batch_size = 8, shuffle = False, num_workers=0)
print('===> Data Ready For Model Execution...')
# ## Model Loader and Executor
# In[9]:
#Count instantiators
best_Test_acc = 0
best_Test_acc_epoch = 0
print('===> Loading Model Executor...')
# where to save best model
path = os.path.join( opt.dataset + '_' + opt.model)
# Load model
if opt.model == 'VGG19':
net = VGG('VGG19')
# resume from best model if not started
if opt.resume:
# Load checkpoint.
print('===> Continuing From Checkpoint...')
assert os.path.isdir(path), 'ERROR: NO CHECKPOINT DIRECTORY FOUND!!!!'
checkpoint = torch.load(os.path.join('../model_checkpoints/' + path,'emoclass_model.t7'))
net.load_state_dict(checkpoint['net'])
best_Test_acc = checkpoint['best_Test_acc']
best_Test_acc_epoch = checkpoint['best_Test_acc_epoch']
start_epoch = best_Test_acc_epoch + 1
else:
print(' ')
print('===> Building Model...')
print(' ')
# initialize cuda!! Note, this is not an option it is a requirement.
if use_cuda == True:
net.cuda()
print('===> Preparing Optimizers For Model...')
# initialize loss and optimizer functions
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr = opt.lr, momentum = 0.9, weight_decay = 5e-4)
print(' ')
print('===> Running Model...')
print(' ')
# start trainning and testing
for epoch in range(start_epoch, total_epoch + 1):
train(epoch)
print(' ')
print('Epoch Trainning Done.')
print(' ')
test(epoch)
print(' ')
print('Epoch Testing Done')
print(' ')
print('===> Calculating Confusion Matrix For Model...')
# Compute confusion matrix
vgg_matrix = confusion_matrix(test_all_targ[-1].data.cpu().numpy(), test_all_pred[-1].cpu().numpy())
# append for visualization in case no other appends have been made
if opt.model == 'VGG19':
vgg_best_test_acc.append(best_Test_acc)
print("===> Best Test Accuracy: %0.3f" % best_Test_acc)
print("===> Best Test Accuracy Occured on Epoch: %d" % best_Test_acc_epoch)
# delete cuda memory cache to prevent memory errors
print('===> Clearing CUDA Memory Cache...')
del trainloader
torch.cuda.empty_cache()
del testloader
torch.cuda.empty_cache()
print('===> Model Execution Complete...')
# ## Command Line Arguements for ResNet18 Convolutional Neural Network Model
# In[10]:
print('===> Reading Command Line Arguments')
opt = parser.parse_args('--model Resnet18 --bs 128 --lr 0.01 --fold 10'.split())
# ## Data and Transformer Loader
# In[11]:
print('===> Loading Data Transformers for Augmentation...')
# define data transformers
transform_train = transforms.Compose([
transforms.RandomCrop(cut_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.TenCrop(cut_size),
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])),
])
# model loaders use data transformers
print('===> Loading Data For Model...')
trainset = CK(split = 'Training', fold = opt.fold, transform = transform_train)
testset = CK(split = 'Testing', fold = opt.fold, transform = transform_test)
print('===> Preparing Data For Model...')
# Load trainning and testing data and apply parameters
trainloader = torch.utils.data.DataLoader(trainset, batch_size = opt.bs, shuffle = True, num_workers = 0)
testloader = torch.utils.data.DataLoader(testset, batch_size = 8, shuffle = False, num_workers=0)
print('===> Data Ready For Model Execution...')
# ## Model Loader and Executor
# In[12]:
#Count instantiators
best_Test_acc = 0
best_Test_acc_epoch = 0
print('===> Loading Model Executor...')
# where to save best model
path = os.path.join( opt.dataset + '_' + opt.model)
# Load model
if opt.model == 'Resnet18':
net = ResNet18()
# resume from best model if not started
if opt.resume:
# Load checkpoint.
print(' ')
print('===> Continuing From Checkpoint...')
print(' ')
assert os.path.isdir(path), 'ERROR: NO CHECKPOINT DIRECTORY FOUND!!!!'
checkpoint = torch.load(os.path.join('../model_checkpoints/' + path,'emoclass_model.t7'))
net.load_state_dict(checkpoint['net'])
best_Test_acc = checkpoint['best_Test_acc']
best_Test_acc_epoch = checkpoint['best_Test_acc_epoch']
start_epoch = best_Test_acc_epoch + 1
else:
print(' ')
print('===> Building Model...')
print(' ')
# initialize cuda!! Note, this is not an option it is a requirement.
if use_cuda == True:
net.cuda()
print('===> Preparing Optimizers For Model...')
# initialize loss and optimizer functions
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr = opt.lr, momentum = 0.9, weight_decay = 5e-4)
print(' ')
print('===> Running Model...')
print(' ')
# start trainning and testing
for epoch in range(start_epoch, total_epoch + 1):
train(epoch)
print(' ')
print('Epoch Trainning Done.')
print(' ')
test(epoch)
print(' ')
print('Epoch Testing Done.')
print(' ')
print('===> Calculating Confusion Matrix For Model...')
# Compute confusion matrix
res_matrix = confusion_matrix(test_all_targ[-1].data.cpu().numpy(), test_all_pred[-1].cpu().numpy())
# append for visualization in case no other appends have been made
if opt.model == 'Resnet18':
res_best_test_acc.append(best_Test_acc)
print("===> Best Test Accuracy: %0.3f" % best_Test_acc)
print("===> Best Test Accuracy Occured on Epoch: %d" % best_Test_acc_epoch)
# delete cuda memory cache to prevent memory errors
print('===> Clearing CUDA Memory Cache...')
del trainloader
torch.cuda.empty_cache()
del testloader
torch.cuda.empty_cache()
print('===> Model Execution Complete...')
# ## Visualizations
# In[13]:
# Plot normalized confusion matrix
print('===> Creating VGG19 Confusion Matrix')
plt.figure(figsize=(10, 8))
plot_confusion_matrix(vgg_matrix, cmap = plt.cm.Reds, normalize=True,
title= 'VGG19 Convolutional Neural Network Model \n Normalized Confusion Matrix (Model Accuracy: %0.3f%%)' % max(vgg_best_test_acc))
plt.savefig('../model_visualizations/vgg19_model_confusion_matrix.png')
plt.show()
# In[14]:
# Plot normalized confusion matrix
print('===> Creating ResNet18 Confusion Matrix')
plt.figure(figsize=(10, 8))
plot_confusion_matrix(res_matrix, cmap = plt.cm.Greens, normalize=True,
title= 'ResNet18 Convolutional Neural Network Model \n Normalized Confusion Matrix (Model Accuracy: %0.3f%%)' % max(res_best_test_acc))
plt.savefig('../model_visualizations/resnet18_model_confusion_matrix.png')
plt.show()
# ## Loss Model Plots
# In[15]:
print('===> Creating Loss Plot')
fig, ax = plt.subplots(2, 2, figsize = (28, 10))
fig.subplots_adjust(left = None, bottom = None, right = None, top = None, wspace = None, hspace = 0.8)
#first plot
tral_vgg, = ax[0, 0].plot( train_loss_list_vgg)
tst_vgg_tr, = ax[0, 0].plot(test_loss_list_vgg[:6709], c = 'red' )
ax[0, 0].xaxis.set_major_locator(MultipleLocator(516))
ax[0, 0].yaxis.set_major_locator(MultipleLocator(0.5))
ax[0, 0].grid(which='major', color='#CCCCCC', linestyle='--')
ax[0, 0].set_xticklabels([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], fontsize = 16)
ax[0, 0].set_yticklabels([ '-0.5','0', '0.5', '1.0', '1.5', '2.0', '2.5','3.0'],fontsize=16)
ax[0, 0].set_title('VGG19 Model Loss On CK+ Data For 12 Epochs', size = 30, pad = 15, fontname = 'Gill Sans MT')
ax[0, 0].legend([tral_vgg, tst_vgg_tr], ['Training', 'Testing'], prop={'size': 16} )
ax[0, 0].set_xlabel('Epoch', size = 25, labelpad = 12, fontname = 'Gill Sans MT')
ax[0, 0].set_ylabel('Loss Rate', size = 25, labelpad = 5, fontname = 'Gill Sans MT')
for tick in ax[0, 0].get_xticklabels():
tick.set_fontname("Gill Sans MT")
for tick in ax[0, 0].get_yticklabels():
tick.set_fontname("Gill Sans MT")
#second plot
tral_rn, = ax[0, 1].plot( train_loss_list_rn, c = 'olivedrab')
tst_rn_tr, = ax[0, 1].plot(test_loss_list_rn[:6709], c = 'mediumorchid')
ax[0, 1].xaxis.set_major_locator(MultipleLocator(516))
ax[0, 1].yaxis.set_major_locator(MultipleLocator(0.5))
ax[0, 1].grid(which='major', color='#CCCCCC', linestyle='--')
ax[0, 1].set_xticklabels([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], fontsize = 16)
ax[0, 1].set_yticklabels([ '-0.5','0', '0.5', '1.0', '1.5', '2.0', '2.5','3.0', '3.5'],fontsize=16)
ax[0, 1].set_title('ResNet18 Model Loss On CK+ for 12 Epochs', size = 30, pad = 15, fontname = 'Gill Sans MT')
ax[0, 1].legend([tral_rn, tst_rn_tr], ['Training', 'Testing'], prop={'size': 16} )
ax[0, 1].set_xlabel('Epoch', size = 25, labelpad = 12, fontname = 'Gill Sans MT')
ax[0, 1].set_ylabel('Loss Rate', size = 25, labelpad = 5, fontname = 'Gill Sans MT')
for tick in ax[0, 1].get_xticklabels():
tick.set_fontname("Gill Sans MT")
for tick in ax[0, 1].get_yticklabels():
tick.set_fontname("Gill Sans MT")
#third plot
tral_vgg, = ax[1, 0].plot( train_loss_list_vgg)
tral_rn, = ax[1, 0].plot( train_loss_list_rn, c = 'olivedrab')
ax[1, 0].xaxis.set_major_locator(MultipleLocator(516))
ax[1, 0].yaxis.set_major_locator(MultipleLocator(0.5))
ax[1, 0].grid(which='major', color='#CCCCCC', linestyle='--')
ax[1, 0].set_xticklabels([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], fontsize = 16)
ax[1, 0].set_yticklabels([ '-0.5','0', '0.5', '1.0', '1.5', '2.0', '2.5','3.0'],fontsize=16)
ax[1, 0].set_title('Model Loss on CK+ Training Data For 12 Epochs \n [Lowest Loss] VGG19: %0.3f | ResNet18: %0.3f' % ( min(train_loss_list_vgg), min(train_loss_list_rn)), size = 30, pad = 15, fontname = 'Gill Sans MT')
ax[1, 0].legend([tral_vgg, tral_rn], ['VGG19', 'ResNet18'], prop={'size': 16} )
ax[1, 0].set_xlabel('Epoch', size = 25, labelpad = 12, fontname = 'Gill Sans MT')
ax[1, 0].set_ylabel('Loss Rate', size = 25, labelpad = 5, fontname = 'Gill Sans MT')
for tick in ax[1, 0].get_xticklabels():
tick.set_fontname("Gill Sans MT")
for tick in ax[1, 0].get_yticklabels():
tick.set_fontname("Gill Sans MT")
#fourth plot
vgg_test, = ax[1, 1].plot(test_loss_list_vgg[:6709], c = 'red' )
rn_test, =ax[1, 1].plot(test_loss_list_rn[:6709], c = 'mediumorchid')
ax[1, 1].xaxis.set_major_locator(MultipleLocator(516))
ax[1, 1].yaxis.set_major_locator(MultipleLocator(0.5))
ax[1, 1].grid(which='major', color='#CCCCCC', linestyle='--')
ax[1, 1].set_yticklabels([ '-0.5','0', '0.5', '1.0', '1.5', '2.0', '2.5', '3.0', '3.5'],fontsize=16)
ax[1, 1].set_xticklabels([ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13], fontsize = 16)
ax[1, 1].set_title('Model Loss on CK+ Testing Data For 12 Epochs \n [Lowest Loss] VGG19: %0.3f | ResNet18: %0.3f' % ( min(test_loss_list_vgg), min(test_loss_list_rn)), size = 30, pad = 15, fontname = 'Gill Sans MT')
ax[1, 1].legend([vgg_test, rn_test], ['VGG19', 'ResNet18'], prop={'size': 16})
ax[1, 1].set_xlabel('Epoch', size = 25, labelpad = 12, fontname = 'Gill Sans MT')
ax[1, 1].set_ylabel('Loss Rate', size = 25, labelpad = 5, fontname = 'Gill Sans MT')
for tick in ax[1, 1].get_xticklabels():
tick.set_fontname("Gill Sans MT")
for tick in ax[1, 1].get_yticklabels():
tick.set_fontname("Gill Sans MT")
plt.savefig('../model_visualizations/model_loss_viz.png');
# ## Accuracy Model Plots
# In[16]:
print('===> Creating Accuracy Plot')
fig, ax = plt.subplots(2, 2, figsize = (28, 10))
fig.subplots_adjust(left = None, bottom = None, right = None, top = None, wspace = None, hspace = 0.8)
#first plot
tral_vgg, = ax[0, 0].plot(train_acc_list_vgg, c = 'mediumpurple')
tst_vgg_tr, = ax[0, 0].plot(test_acc_list_vgg[:6709], c = 'olive' )
ax[0, 0].xaxis.set_major_locator(MultipleLocator(516))
ax[0, 0].yaxis.set_major_locator(MultipleLocator(10))
ax[0, 0].grid(which='major', color='#CCCCCC', linestyle='--')
ax[0, 0].set_xticklabels([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13], fontsize = 16)
ax[0, 0].set_yticklabels(['-1' ,'0', '10', '20', '30', '40', '50', '60', '70', '80', '90', '100'],fontsize=16)
ax[0, 0].set_title('VGG19 Model Accuracy For 12 Epochs', size = 30, pad = 15, fontname = 'Gill Sans MT')
ax[0, 0].legend([tral_vgg, tst_vgg_tr], ['Training','Testing'], prop={'size': 16} )
ax[0, 0].set_xlabel('Epoch', size = 25, labelpad = 12, fontname = 'Gill Sans MT')
ax[0, 0].set_ylabel('Accuracy in %', size = 25, labelpad = 5, fontname = 'Gill Sans MT')
for tick in ax[0, 0].get_xticklabels():
tick.set_fontname("Gill Sans MT")
for tick in ax[0, 0].get_yticklabels():
tick.set_fontname("Gill Sans MT")
#second plot
tral_rn, = ax[0, 1].plot( train_acc_list_rn, c = 'deeppink')
tst_rn_tr, = ax[0, 1].plot(test_acc_list_rn[:6709], c = 'cadetblue')
ax[0, 1].xaxis.set_major_locator(MultipleLocator(516))
ax[0, 1].yaxis.set_major_locator(MultipleLocator(10))
ax[0, 1].grid(which='major', color='#CCCCCC', linestyle='--')
ax[0, 1].set_xticklabels([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13], fontsize = 16)
ax[0, 1].set_yticklabels(['-1' ,'0', '10', '20', '30', '40', '50', '60', '70', '80', '90', '100'],fontsize=16)
ax[0, 1].set_title('ResNet18 Model Accuracy For 12 Epochs', size = 30, pad = 15, fontname = 'Gill Sans MT')
ax[0, 1].legend([tral_rn, tst_rn_tr], ['Training','Testing'], prop={'size': 16} )
ax[0, 1].set_xlabel('Epoch', size = 25, labelpad = 12, fontname = 'Gill Sans MT')
ax[0, 1].set_ylabel('Accuracy in %', size = 25, labelpad = 5, fontname = 'Gill Sans MT')
for tick in ax[0, 1].get_xticklabels():
tick.set_fontname("Gill Sans MT")
for tick in ax[0, 1].get_yticklabels():
tick.set_fontname("Gill Sans MT")
# third plot
tral_vgg, = ax[1, 0].plot(train_acc_list_vgg, c = 'mediumpurple')
tral_rn, = ax[1, 0].plot( train_acc_list_rn, c = 'deeppink')
ax[1, 0].xaxis.set_major_locator(MultipleLocator(516))
ax[1, 0].yaxis.set_major_locator(MultipleLocator(10))
ax[1, 0].grid(which='major', color='#CCCCCC', linestyle='--')
ax[1, 0].set_xticklabels([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13], fontsize = 16)
ax[1, 0].set_yticklabels([ '10', '20', '30', '40', '50', '60', '70', '80', '90', '100'],fontsize=16)
ax[1, 0].set_title('Model Accuracy on Trainning Data For 12 Epochs \n [Best Accuracies] VGG19: %0.3f%% | ResNet18: %0.3f%%' % ( max(train_acc_list_vgg), max(train_acc_list_rn)), size = 30, pad = 15, fontname = 'Gill Sans MT')
ax[1, 0].legend([tral_vgg, tral_rn], ['VGG19', 'ResNet18'], prop={'size': 16} )
ax[1, 0].set_xlabel('Epoch', size = 25, labelpad = 12, fontname = 'Gill Sans MT')
ax[1, 0].set_ylabel('Accuracy in %', size = 25, labelpad = 5, fontname = 'Gill Sans MT')
for tick in ax[1, 0].get_xticklabels():
tick.set_fontname("Gill Sans MT")
for tick in ax[1, 0].get_yticklabels():
tick.set_fontname("Gill Sans MT")
#fourth plot
vgg_test, = ax[1, 1].plot(test_acc_list_vgg[:6709], c = 'olive' )
rn_test, =ax[1, 1].plot(test_acc_list_rn[:6709], c = 'cadetblue')
ax[1, 1].xaxis.set_major_locator(MultipleLocator(516))
ax[1, 1].yaxis.set_major_locator(MultipleLocator(10))
ax[1, 1].grid(which='major', color='#CCCCCC', linestyle='--')
ax[1, 1].set_yticklabels(['-1', '0', '10', '20', '30', '40', '50', '60', '70', '80', '90', '100'],fontsize=16)
ax[1, 1].set_xticklabels([ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13], fontsize = 16)
ax[1, 1].set_title('Model Accuracy on Test Data For 12 Epochs \n [Best Accuracies] VGG19: %0.3f%% | ResNet18: %0.3f%%' % ( max(vgg_best_test_acc), max(res_best_test_acc)), size = 30, pad = 15, fontname = 'Gill Sans MT')
ax[1, 1].legend([vgg_test, rn_test], ['VGG19', 'ResNet18'], prop={'size': 16})
ax[1, 1].set_xlabel('Epoch', size = 25, labelpad = 12, fontname = 'Gill Sans MT')
ax[1, 1].set_ylabel('Accuracy in %', size = 25, labelpad = 5, fontname = 'Gill Sans MT')
for tick in ax[1, 1].get_xticklabels():
tick.set_fontname("Gill Sans MT")
for tick in ax[1, 1].get_yticklabels():
tick.set_fontname("Gill Sans MT")
plt.savefig('../model_visualizations/model_accuracy_viz.png');
# In[17]:
print('===> Done Running Model Executor')
``` |
{
"source": "JoeyOhman/text-dedup",
"score": 3
} |
#### File: text_dedup/suffix/__init__.py
```python
from typing import List, Any, Tuple
from multiprocessing import Manager
from ctypes import c_char_p
import multiprocessing as mp
from numpy.lib.stride_tricks import sliding_window_view
import numpy as np
def similar(x: int, y: int, S: Any, k: int) -> bool:
"""Whether S[x:x+k] is the same as S[y:y+k].
Parameters
----------
x : int
[description]
y : int
[description]
S : Any
[description]
k : int
[description]
Returns
-------
bool
[description]
"""
if x == y:
return True
return (
x + k <= len(S.value)
and y + k <= len(S.value)
and S.value[x : x + k] == S.value[y : y + k]
)
def group(x: str, patterns: str) -> List[int]:
"""Find patterns that are present in string x.
Parameters
----------
x : str
A document string
patterns : str
Patterns to search for
Returns
-------
List[int]
List of indices of which patterns are present in string x
"""
result = []
for idx, pattern in enumerate(patterns):
if pattern in x:
result.append(idx)
return result
class SuffixArray:
def __init__(self, k: int = 50):
self.array = []
self.k = k
def fit_transform(self, data: List[str]) -> Tuple[List[str], np.ndarray]:
"""Find duplicate substrings in the data.
Parameters
----------
data : List[str]
List of documents.
Returns
-------
Tuple[List[str], np.ndarray]
List of duplicate substrings and a matrix where each row is a document and each column is a substring.
Examples
--------
>>> array = SuffixArray(k = 9)
>>> duplicates, groups = array.fit_transform(["This is a sentence.", "This is another sentences.", "This is a question.", "hello world"] * 10)
>>> assert len(duplicates) == groups.shape[1], "Invalid number of columns"
>>> assert groups.shape[0] == 40, "Invalid number of rows"
"""
S = "".join(data)
suffixes = []
for i in range(len(S)):
suffixes.append(S[i:])
self.array = np.argsort(suffixes)
# Find duplicated substrings
manager = Manager()
shared = manager.Value(c_char_p, S)
with mp.Pool(mp.cpu_count()) as pool:
results = pool.starmap(
similar,
[(x, y, shared, self.k) for x, y in sliding_window_view(self.array, 2)],
)
duplicates = []
for idx, dup in zip(self.array, results):
if dup:
duplicates.append(S[idx : idx + self.k])
# Find duplicated documents
try:
from multiprocessing import shared_memory
shared = shared_memory.ShareableList(duplicates)
except ImportError as e:
print(
f"The following error was: \n{e}\n\n"
+ "This was likely raised since you are not running python 3.8 or higher."
+ " Continuing without a shared memory file which is likely be inefficient."
)
shared = duplicates
with mp.Pool(mp.cpu_count()) as pool:
results = pool.starmap(group, [(d, shared) for d in data])
shared.shm.close()
shared.shm.unlink()
del shared
groups = np.zeros((len(data), len(duplicates)), dtype=bool)
for i, x in enumerate(results):
for y in x:
groups[i, y] = 1
return duplicates, groups
``` |
{
"source": "joeyparis/serverless-aws-iam-access-key-auto-rotation",
"score": 2
} |
#### File: src/access_key_auto_rotation/key_actions.py
```python
import json
from config import Config, log
from aws_partitions import get_partition_for_region, get_iam_region,\
get_partition_regions
config = Config()
def log_actions(action_queue, dryrun=False):
if not action_queue:
log.info("No actions to be taken on this account.")
return
for action_spec in action_queue:
action = action_spec['action']
key_metadata = action_spec['key']
access_key_id = key_metadata["AccessKeyId"]
reason = action_spec['reason'].value
if action == 'ROTATE':
if dryrun:
log.info(
f"Would create new key to replace {access_key_id}"
f" -- {reason}")
else:
log.info(
f"Creating new key to replace {access_key_id}"
f" -- {reason}")
elif action == 'DEACTIVATE':
if dryrun:
log.info(
f"Would deactivate {access_key_id}"
f" -- {reason}")
else:
log.info(
f"Deactivating {access_key_id}"
f" -- {reason}")
elif action == 'DELETE':
if dryrun:
log.info(
f"Would delete {access_key_id}"
f" -- {reason}")
else:
log.info(
f"Deleting {access_key_id}"
f" -- {reason}")
def execute_actions(action_queue, account_session):
for action_spec in action_queue:
action = action_spec['action']
key_metadata = action_spec['key']
if action == 'ROTATE':
rotate_key(key_metadata, account_session)
elif action == 'DEACTIVATE':
deactivate_key(key_metadata, account_session)
elif action == 'DELETE':
delete_key(key_metadata, account_session)
def rotate_key(key_metadata, account_session):
user_name = key_metadata['UserName']
access_key_id = key_metadata['AccessKeyId']
log.info(f'Rotating user {user_name} key {access_key_id}')
iam_client = account_session.client('iam')
sts_client = account_session.client('sts')
# get account id and region from session
account_id = sts_client.get_caller_identity()["Account"]
my_region = account_session.region_name
# use default iam regions to store secret
partition = get_partition_for_region(my_region)
iam_region = get_iam_region(partition)
sm_client = account_session.client(
'secretsmanager', region_name=iam_region)
replication_regions = get_partition_regions(partition)
# TODO: parameterize this instead of hardcoding
if partition == 'aws-us-gov':
replication_regions = ['us-gov-east-1']
elif partition == 'aws':
replication_regions = ['us-east-2', 'us-west-1', 'us-west-2']
# Create new access key
new_access_key = iam_client.create_access_key(
UserName=user_name)['AccessKey']
new_access_key_str = json.dumps(
new_access_key, indent=4, sort_keys=True, default=str)
secret_name = config.secretNameFormat.format(user_name)
secret_arn = config.secretArnFormat.format(
partition=partition, account_id=account_id, secret_name=secret_name,
region_name=iam_region)
# Create new secret, or store in existing
try:
# will throw error if secret does not yet exist
secret = sm_client.describe_secret(
SecretId=secret_name)
# update secret
sm_client.put_secret_value(SecretId=secret_name,
SecretString=new_access_key_str)
# make sure secret is replicated to all regions
replication_status = secret.get('ReplicationStatus', [])
current_replication_regions = [x['Region'] for x in replication_status]
missing_regions = [x for x in replication_regions
if x not in current_replication_regions]
if missing_regions:
sm_client.replicate_secret_to_regions(
SecretId=secret_name,
AddReplicaRegions=[{'Region': x} for x in missing_regions],
ForceOverwriteReplicaSecret=True
)
except sm_client.exceptions.ClientError as error:
# create if we caught an error on describe
if error.response['Error']['Code'] == 'ResourceNotFoundException':
sm_client.create_secret(
Name=secret_name, Description='Auto-created secret',
SecretString=new_access_key_str,
AddReplicaRegions=[{'Region': x} for x in replication_regions],
ForceOverwriteReplicaSecret=True
)
else:
raise error
user = iam_client.get_user(
UserName=user_name
)['User']
user_arn = user['Arn']
resource_policy_document = config.secretPolicyFormat.format(
user_arn=user_arn)
sm_client.put_resource_policy(SecretId=secret_name,
ResourcePolicy=resource_policy_document,
BlockPublicPolicy=True)
policy_name = 'SecretsAccessPolicy'
try:
iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name)
except iam_client.exceptions.ClientError as error:
# TODO - IAM uses IAM.Client.exceptions.NoSuchEntityException
# Find out if it inherits from ClientError. If it does, this code is probably ok,
# but may need to change ResourceNotFoundException to NoSuchEntityException
if error.response['Error']['Code'] == 'NoSuchEntity':
policy_document = config.iamPolicyFormat.format(
account_id=account_id, secret_arn=secret_arn)
iam_client.put_user_policy(UserName=user_name,
PolicyName=policy_name,
PolicyDocument=policy_document)
else:
raise error
return
def deactivate_key(key_metadata, account_session):
user_name = key_metadata['UserName']
access_key_id = key_metadata['AccessKeyId']
log.info(f'Deactivating user {user_name} key {access_key_id}')
iam_client = account_session.client('iam')
iam_client.update_access_key(UserName=user_name,
AccessKeyId=access_key_id,
Status='Inactive')
def delete_key(key_metadata, account_session):
user_name = key_metadata['UserName']
access_key_id = key_metadata['AccessKeyId']
log.info(f'Deleting user {user_name} key {access_key_id}')
iam_client = account_session.client('iam')
iam_client.delete_access_key(UserName=user_name,
AccessKeyId=access_key_id)
```
#### File: serverless-aws-iam-access-key-auto-rotation/src/check_permissions.py
```python
from typing import Dict, List, Optional
def blocked(
actions: List[str],
resources: Optional[List[str]] = None,
context: Optional[Dict[str, List]] = None
) -> List[str]:
"""test whether IAM user is able to use specified AWS action(s)
Args:
actions (list): AWS action(s) to validate IAM user can use.
resources (list): Check if action(s) can be used on resource(s).
If None, action(s) must be usable on all resources ("*").
context (dict): Check if action(s) can be used with context(s).
If None, it is expected that no context restrictions were set.
Returns:
list: Actions denied by IAM due to insufficient permissions.
"""
if not actions:
return []
actions = list(set(actions))
if resources is None:
resources = ["*"]
_context: List[Dict] = [{}]
if context is not None:
# Convert context dict to list[dict] expected by ContextEntries.
_context = [{
'ContextKeyName': context_key,
'ContextKeyValues': [str(val) for val in context_values],
'ContextKeyType': "string"
} for context_key, context_values in context.items()]
# You'll need to create an IAM client here
results = aws.iam_client().simulate_principal_policy(
PolicySourceArn=consts.IAM_ARN, # Your IAM user's ARN goes here
ActionNames=actions,
ResourceArns=resources,
ContextEntries=_context
)['EvaluationResults']
return sorted([result['EvalActionName'] for result in results
if result['EvalDecision'] != "allowed"])
``` |
{
"source": "joeypauls/sandbox-editor",
"score": 3
} |
#### File: sandbox-editor/src/__init__.py
```python
import os
here = os.path.abspath(os.path.dirname(__file__))
os.chdir(here)
def hello_world():
return "Hello World"
``` |
{
"source": "joeypoyiwu/signal-scanner-bot",
"score": 3
} |
#### File: signal-scanner-bot/signal_scanner_bot/signal.py
```python
import logging
import subprocess
import traceback
from datetime import datetime
from typing import Dict, List
from . import env
log = logging.getLogger(__name__)
################################################################################
# Private Functions
################################################################################
def _check_group(recipient: str) -> bool:
"""Check whether a supplied recipient is in the phone number or group format."""
if recipient.endswith("=") and len(recipient) in {24, 44}:
# Heuristic: this is usually the pattern of group IDs
return True
elif recipient.startswith("+"):
# Heuristic: this is what phone numbers have to start with
return False
else:
raise ValueError(f"Supplied recipient is invalid: {recipient}")
################################################################################
# Public Functions
################################################################################
def message_timestamp(data: Dict) -> datetime:
"""
Extract the timestamp from a Signal message and convert it to
a proper datetime object.
"""
try:
timestamp_milliseconds = data["timestamp"]
except KeyError as err:
raise KeyError(f"Timestamp field is not present in data: {data}") from err
dt = datetime.fromtimestamp(timestamp_milliseconds / 1000.0)
return dt
def list_identities() -> List[str]:
"""
Call the signal-cli `listIdentities` command and return the entire
result as a list of strings.
"""
proc = subprocess.run(
["signal-cli", "-u", str(env.BOT_NUMBER), "listIdentities"],
capture_output=True,
text=True,
)
if proc.stderr:
log.warning(f"STDERR: {proc.stderr}")
if proc.stdout:
return proc.stdout.split("\n")
else:
return []
def trust_identity(phone_number: str, safety_number: str):
"""Call the signal-cli `trust` command for the provided phone + safety numbers."""
proc = subprocess.run(
[
"signal-cli",
"-u",
str(env.BOT_NUMBER),
"trust",
phone_number,
"-v",
f'{safety_number.replace(" ", "")}',
],
capture_output=False,
text=True,
)
if proc.stderr:
log.error(f"STDERR: {proc.stderr}")
if proc.returncode != 0:
log.error(f"Trust call return code: {proc.returncode}")
def send_message(message: str, recipient: str, attachment=None):
"""High level function to send a Signal message to a specified recipient."""
group = _check_group(recipient)
recipient_args = ["-g", recipient] if group else [recipient]
attachement_args = ["-a", attachment] if attachment else []
log.debug("Sending message")
proc = subprocess.run(
[
"signal-cli",
"-u",
str(env.BOT_NUMBER),
"send",
"-m",
message,
*recipient_args,
*attachement_args,
],
capture_output=True,
text=True,
)
if proc.stdout:
log.info(f"STDOUT: {proc.stdout}")
if proc.stderr:
log.warning(f"STDERR: {proc.stderr}")
################################################################################
# Panic?!?!?!?!
################################################################################
def panic(err: Exception) -> None:
# We don't really care if this succeeds, particularly if there's an issue
# with the signal config
log.info(f"Panicing, attempting to call home at {env.ADMIN_CONTACT}")
message = f"BOT FAILURE: {err}\n{traceback.format_exc(limit=4)}"
send_message(message, env.ADMIN_CONTACT)
```
#### File: signal-scanner-bot/signal_scanner_bot/transport.py
```python
import asyncio
import logging
import subprocess
from datetime import date, datetime, timedelta
import ujson
from peony import events
from . import env, messages, radio_monitor_alert, signal
log = logging.getLogger(__name__)
################################################################################
# Twitter-to-Queue
################################################################################
def _filter_hashtags(data, filter_hashtag_list):
for input_hashtag in data["entities"]["hashtags"]:
if input_hashtag["text"].lower() in filter_hashtag_list:
return True
return False
async def twitter_to_queue():
log.info("Starting Twitter Event Stream")
stream_obj = env.CLIENT.stream.statuses.filter.post(
follow=",".join(env.TRUSTED_TWEETERS)
)
async with stream_obj as stream:
async for data in stream:
if events.on_connect(data):
log.info("Connected to the stream")
elif events.on_tweet(data) and env.STATE.LISTENING:
if (
_filter_hashtags(data, env.RECEIVE_HASHTAGS)
and data["user"]["id_str"] in env.TRUSTED_TWEETERS
):
await messages.process_twitter_message(data)
################################################################################
# Queue-to-Signal
################################################################################
async def queue_to_signal():
"""Run the queue-to-signal loop. Flushes the entire queue on each call."""
while True:
log.debug("Trying to empty Twitter to Signal queue.")
while not env.TWITTER_TO_SIGNAL_QUEUE.empty():
try:
log.debug("Emptying Twitter to Signal queue.")
message = await env.TWITTER_TO_SIGNAL_QUEUE.get()
signal.send_message(message, env.LISTEN_CONTACT)
env.TWITTER_TO_SIGNAL_QUEUE.task_done()
except asyncio.QueueEmpty:
log.debug("Queue is empty breaking out of async loop.")
except Exception as err:
log.error("Exception occurred, halting queue to signal process")
log.exception(err)
signal.panic(err)
env.STATE.STOP_REQUESTED = True
raise
await asyncio.sleep(1)
################################################################################
# Signal-to-Twitter
################################################################################
async def signal_to_twitter():
"""Run the signal-to-twitter loop."""
try:
while not env.STATE.STOP_REQUESTED:
proc = await asyncio.create_subprocess_shell(
f"signal-cli -u {env.BOT_NUMBER} --output=json receive -t {env.SIGNAL_TIMEOUT}",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
while line := await proc.stdout.readline():
line = line.decode("utf-8").rstrip()
blob = ujson.loads(line)
try:
await messages.process_signal_message(blob, env.CLIENT)
except Exception:
log.error(f"Malformed message: {blob}")
raise
# Check to see if there's any content in stderr
error = (await proc.stderr.read()).decode()
for line in error.split("\n"):
if line.strip():
log.warning(f"STDERR: {line}")
if proc.returncode != 0 and proc.returncode is not None:
log.warning(f"Something went wrong (error code {proc.returncode})")
except Exception as err:
signal.panic(err)
raise
finally:
log.info("Killing signal-cli")
try:
proc.kill()
log.info("signal-cli process killed")
except ProcessLookupError:
log.warning("Failed to kill process, moving on.")
pass
################################################################################
# Comradely Reminder
################################################################################
async def comradely_reminder() -> None:
"""Run the comradely reminder loop."""
# Wait for system to initialize...
await asyncio.sleep(15)
try:
window_start = env.COMRADELY_TIME
# Can't do arithmetic with python time objects...
# So we have to convert it into a datetime, add the timedelta, then swap
# it back to a time object
window_end = (
datetime.combine(date(1, 1, 1), window_start) + timedelta(hours=1)
).time()
while True:
now = datetime.now().time()
log.debug(f"Now: {now.isoformat()} | Start: {window_start.isoformat()}")
# Check if we're currently within a 1-hour time window
if window_start <= now < window_end:
log.debug("Within time window")
await messages.send_comradely_reminder()
# Wait at least 60 minutes for the next check
log.debug("Waiting an hour...")
await asyncio.sleep(60 * 60)
except Exception as err:
log.exception(err)
signal.panic(err)
raise
################################################################################
# SWAT Alert
################################################################################
async def radio_monitor_alert_transport() -> None:
"""Run the radio monitor alert loop."""
# Wait for system to initialize
await asyncio.sleep(15)
while True:
try:
log.debug("Checking for monitored units' radio activity.")
if (
radio_monitor_alert_messages := await radio_monitor_alert.check_radio_calls()
):
log.info(
"Radio activity found for monitored units sending alert to group."
)
log.debug(f"Monitored units are {env.RADIO_MONITOR_UNITS}")
log.debug(f"Alert messages to be sent:\n{radio_monitor_alert_messages}")
for message, audio in radio_monitor_alert_messages:
await messages.send_radio_monitor_alert(message, audio)
# Wait a minute to poll again
log.debug(
f"Sleeping for {env.RADIO_MONITOR_LOOKBACK}s before checking for monitored unit alerts again."
)
await asyncio.sleep(env.RADIO_MONITOR_LOOKBACK)
except Exception as err:
log.exception(err)
signal.panic(err)
raise
``` |
{
"source": "joeypsmith/mc-projects",
"score": 5
} |
#### File: cmsc-135/assignment-7/date_printer.py
```python
inputDate = ""
printDate = ""
#Months list
months = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
#Main function
def main():
#Asks for user input in correct format
inputDate = input("Enter a date in the format mm/dd/yyyy: ")
#Converts date
printDate = convertDate(inputDate)
#Print new date
print(printDate)
#Convert date function
def convertDate(date):
#Gather information. Month is set to integer for indexing month strings.
month = int(date[0:2])-1
day = date[3:5]
year = date[6:10]
#Set return string. Add month as index to months string list
returnDate = months[month]+" "+day+", "+year
#Return string
return returnDate
#Call main function and add exit button
main()
input("press enter")
``` |
{
"source": "joeypy/Algorithms",
"score": 4
} |
#### File: Algorithms/Python/Ordenamiento_por_burbuja_bidireccional.py
```python
import random
def intercambiarValores(array, pos1, pos2):
temporal = array[pos1]
array[pos1] = array[pos2]
array[pos2] = temporal
def ordenamiento(array, size):
while True:
intercambiados = False
for i in range(1, size):
if array[i] < array[i-1]:
intercambiarValores(array, i, i-1)
intercambiados = True
for i in range(size-1, 2):
if array[i] < array[i-1]:
intercambiarValores(array, i, i-1)
intercambiados = True
if intercambiados == False: break
if __name__ == "__main__":
array = []
size = 10
for i in range(size):
array.append(random.randint(0, 100))
print(array)
ordenamiento(array, size)
print(array)
```
#### File: Algorithms/Python/Ordenamiento_por_insercion.py
```python
import random
def intercambiarValores(array, pos1, pos2):
temporal = array[pos1]
array[pos1] = array[pos2]
array[pos2] = temporal
def ordenamiento(array, size):
for i in range(size):
j = i
while j > 0 and array[j-1] > array[j]:
intercambiarValores(array, j, j-1)
j = j - 1
if __name__ == "__main__":
array = []
size = 10
for i in range(size):
array.append(random.randint(0, 100))
print(array)
ordenamiento(array, size)
print(array)
``` |
{
"source": "joeypy/Blog_django",
"score": 2
} |
#### File: tutorial/blog/views.py
```python
from django.shortcuts import render
from blog.models import Post
# Create your views here.
def home(request):
posts = Post.objects.all()
return render(request, 'blog/home.html', context= {'posts': posts})
def post(request, id):
post = Post.objects.get(id=id)
return render(request, 'blog/post.html', context= {'post': post})
``` |
{
"source": "joeypy/DjangoVue-practice",
"score": 2
} |
#### File: djangoVue/book/models.py
```python
from django.db import models
# Create your models here.
class Book(models.Model):
title = models.CharField(max_length=50)
description = models.TextField()
def __str__(self):
return title
``` |
{
"source": "JoeyRamone/J4KTest",
"score": 3
} |
#### File: scripts/python-phoniebox/PhonieboxConfigChanger.py
```python
import os,sys,signal
#from mpd import MPDClient
import configparser
#from RawConfigParserExtended import RawConfigParserExtended
from Phoniebox import Phoniebox
# get absolute path of this script
dir_path = os.path.dirname(os.path.realpath(__file__))
defaultconfigFilePath = os.path.join(dir_path,'./phoniebox.conf')
def is_int(s):
""" return True if string is an int """
try:
int(s)
return True
except ValueError:
return False
def str2bool(s):
""" convert string to a python boolean """
return s.lower() in ("yes", "true", "t", "1")
def str2num(s):
""" convert string to an int or a float """
try:
return int(s)
except ValueError:
return float(s)
class PhonieboxConfigChanger(Phoniebox):
def __init__(self,configFilePath=defaultconfigFilePath):
Phoniebox.__init__(self,configFilePath)
def assigncard(self,cardid,uri):
section = cardid
# set uri and cardid for card (section = cardid)
if not section in self.cardAssignments.sections():
self.cardAssignments.add_section(section)
self.cardAssignments.set(section,"cardid",cardid)
self.cardAssignments.set(section,"uri",uri)
# write updated assignments to file
with open(self.config['card_assignments_file'], 'w') as cardAssignmentsFile:
self.cardAssignments.write(cardAssignmentsFile)
def removecard(self,cardid):
section = cardid
if section in self.cardAssignments.sections():
self.cardAssignments.remove_section(section)
# write updated assignments to file
with open(self.config['card_assignments_file'], 'w') as f:
self.cardAssignments.write(f)
def set(self,section,key,value):
try:
num = int(section)
parser = self.cardAssignments
config_file = self.config.get("phoniebox","card_assignments_file")
except ValueError:
parser = self.config
config_file = configFilePath
# update value
try:
parser.set(section,key,value)
self.debug("Set {} = {} in section {}".format(key,value,section))
except configparser.NoSectionError as e:
raise configparser.NoSectionError, e
# write to file
# with open(config_file, 'w') as f:
# parser.write(f)
def get(self,section,t="ini"):
try:
num = int(section)
parser = self.cardAssignments
except ValueError:
parser = self.config
if t == "json":
print(parser.as_json(section))
elif t == "dict":
print(parser.as_dict(section))
else:
print(parser.print_ini(section))
def print_usage(self):
print("Usage: {} set ".format(sys.argv[0]))
if __name__ == "__main__":
cmdlist = ["assigncard","removecard","set","get"]
if len(sys.argv) < 1:
sys.exit()
else:
if sys.argv[1] in cmdlist:
configFilePath = defaultconfigFilePath
cmd = sys.argv[1]
shift = 0
else:
configFilePath = sys.argv[1]
cmd = sys.argv[2]
shift = 1
ConfigChanger = PhonieboxConfigChanger(configFilePath)
try:
if cmd == "assigncard":
cardid = sys.argv[2+shift]
uri = sys.argv[3+shift]
ConfigChanger.assigncard(cardid,uri)
elif cmd == "removecard":
cardid = sys.argv[2+shift]
ConfigChanger.removecard(cardid)
elif cmd == "set":
section = sys.argv[2+shift]
key = sys.argv[3+shift]
value = sys.argv[4+shift]
ConfigChanger.set(section,key,value)
elif cmd == "get":
section = sys.argv[2+shift]
try:
t = sys.argv[3+shift]
except:
t = "ini"
ConfigChanger.get(section,t)
else:
# will never be reached
print("supported commands are {} and {}".format(", ".join(cmdlist[:-1]),cmdlist[-1]))
except:
self.print_usage()
``` |
{
"source": "JoeyRBishop/stock_sentiment_analysis",
"score": 4
} |
#### File: JoeyRBishop/stock_sentiment_analysis/stock_data.py
```python
import pandas as pd
import yfinance as yf
import numpy as np
def main_stock_data(tick="TSLA",start_date="2022-01-01",price="close"):
'''
Get historical stock data from yahoo finance and save it as a csv
Parameters
----------
tick : str, optional
The ticker, The default is "TSLA".
start_date : str, optional
The initial start data of the histroy. The default is "2022-01-01".
price : str, optional
The "price" of the stock this is "subjective" as the price fluxuates
over the day, so there is no "price for that day".
Returns
-------
Saves the historical data as a csv where the file name is defined by the
most current date
'''
ticker = yf.Ticker(tick)
df = ticker.history(start=start_date,interval="1d")
df=df.reset_index()
df["ticker"]=tick
###A personal preference, to make all the headers lower case
df.columns = [x.lower() for x in df.columns]
df=interpolate_missing_stock_data(df,price)
df=percentage_growth(df,price)
df["date"]=df["date"].dt.date
final_date=str(df["date"].max())[:10]
df.to_csv(f"stock_data_{tick}_{final_date}.csv",index=False)
return df
def percentage_growth(df,price):
'''
Assumption that the price is the close price as the price moves throughout
the day
Parameters
----------
df : DataFrame
The historical stock data df
Returns
-------
df : DataFrame
The dataframe with adds columns, close_tomorrow and percentage_change
'''
df[f"{price}_tomorrow"]=df[price].shift(-1,fill_value=np.nan)
df["percentage_change"]=(df[f"{price}_tomorrow"]-df[price])*100/df[price]
return df
def interpolate_missing_stock_data(df,price):
'''
As the stock market is closed on weekends and bank holidays, the assumption
is made that the stock linearly increases(decreases) over the days that the
stock market is closed.
Parameters
----------
df : DataFrame
The dataframe that contains the stock data.
price : str
The column that contains the "price"
Returns
-------
df_interpolate : DataFrame
The df that contains the interpolated stock data (over the "closed"
days)
'''
df['date'] = pd.to_datetime(df['date'])
df=df.set_index("date")
df_interpolate = df[[price]].resample('D').max()
df_interpolate[price] = df_interpolate[price]\
.interpolate(method='linear',limit_direction="both")
df_interpolate=df_interpolate.reset_index()
return df_interpolate
``` |
{
"source": "JoeyRead/kafcrapy",
"score": 3
} |
#### File: kafcrapy/kafcrapy/pipeline.py
```python
from . import connection
class KafkaProducerPipeline(object):
"""
Publish serialize item to configured topic
"""
def __init__(self, producer):
self.producer = producer
self.topic = None
def open_spider(self, spider):
if not hasattr(spider, 'produce_item_topic'):
return ValueError('produce_item_topic name is not provided')
self.topic = spider.produce_item_topic
def process_item(self, item, spider):
"""
This method has overridden for pipeline to process the item
:param item:
:param spider:
:return:
"""
"""
send(self, topic, value=None, key=None, headers=None, partition=None, timestamp_ms=None):
"""
self.producer.send(topic=self.topic, value=item)
return item
@classmethod
def from_settings(cls, settings):
"""
This
:param settings: the current scrapy spider settings
:return: KafkaProducerPipeline instance
"""
producer = connection.producer_from_settings({})
return cls(producer)
@classmethod
def from_crawler(cls, crawler):
return cls.from_settings(crawler.settings)
def close_spider(self, spider):
if self.producer:
self.producer.close()
```
#### File: kafcrapy/kafcrapy/spider.py
```python
from scrapy import signals
from scrapy.exceptions import DontCloseSpider
from scrapy.spiders import Spider
from . import connection
class KafkaSpiderMixin(object):
def __init__(self):
self.consumer = None
@classmethod
def process_message(cls, message):
if not message:
return None
return message.message.value
def setup_kafka_consumer(self):
self.logger.info('setting up with kafka consumer')
if not self.crawler:
raise ValueError("Crawler is required")
if not hasattr(self, 'topic') or not self.topic:
raise ValueError('kafka topic is required')
if self.consumer:
return
settings = self.crawler.settings
self.consumer = connection.consumer_from_settings(topic_name=self.topic, config={})
self.crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)
# This will be called just after item has been scraped, reason is to call this not to stop crawler
self.crawler.signals.connect(self.item_scraped, signal=signals.item_scraped)
def start_requests(self):
return self.next_request()
def next_request(self):
self.logger.info("starting next message_batch")
message_batch = self.consumer.poll(timeout_ms=10 * 1000, max_records=10)
for partition_batch in message_batch.values():
for message in partition_batch:
print("message", message.value)
url = self.process_message(message.value)
yield self.make_requests_from_url(url)
def schedule_next_request(self):
for req in self.next_request():
self.crawler.engine.crawl(req, spider=self)
def spider_idle(self):
"""
Schedules a requests if available, otherwise waits, If there is no request available in the queue
so it will not close the spider
:return:
"""
self.schedule_next_request()
raise DontCloseSpider
def item_scraped(self, *args, **kwargs):
"""
After item has been scrapped, avoid waiting for scheduler to schedule next request
:param args:
:param kwargs:
:return:
"""
self.schedule_next_request()
def closed(self, reason):
if self.consumer:
self.consumer.close()
self.logger.info('Closing spider name: %s, reason: %s', getattr(self, 'name', None), reason)
class KafkaFeedSpider(KafkaSpiderMixin, Spider):
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
obj: KafkaFeedSpider = super(KafkaFeedSpider, cls).from_crawler(crawler, *args, **kwargs)
obj.setup_kafka_consumer()
return obj
``` |
{
"source": "Joey-Resende/Estudando-Python",
"score": 4
} |
#### File: desafios/mundo03/desafio098.py
```python
from time import sleep
def contador(inicio, fim, passo):
if passo < 0:
passo *= -1
if passo == 0:
passo = 1
print('-=' * 15)
print(f'Contagem de {inicio} até {fim} de {passo} em {passo}')
sleep(1)
if inicio < fim:
cont = inicio
while cont <= fim:
print(f'{cont}', end='...', flush=True)
cont += passo
sleep(0.5)
print('FIM!')
else:
cont = inicio
while cont >= fim:
print(f'{cont}', end='...', flush=True)
cont -= passo
sleep(0.5)
print('FIM!')
contador(1, 10, 1)
contador(10, 0, 2)
print('-=' * 15)
print('Agora e sua vez de personalizar a contagem!')
ini = int(input('Início: '))
fim = int(input('Fim: '))
pas = int(input('Passo: '))
contador(ini, fim, pas)
```
#### File: desafios/mundo03/desafio103.py
```python
def ficha(jog='<Fulaninho>', gol=0):
print(f'O jogador {jog} fez {gol} gol(s) no campeonato.')
print('-' * 30)
n = str(input('Nome do jogador: '))
g = str(input('Números de Gols: '))
if g.isnumeric():
g = int(g)
else:
g = 0
if n.strip() == '':
ficha(gol=g)
else:
ficha(n, g)
```
#### File: outrosEstudos/cursoIntroDunossauro/exercicio_21.py
```python
def eleva_numero(lista_de_numeros, numero_elevado):
lista_resposta = []
for numero in lista_de_numeros:
lista_resposta.append(numero ** numero_elevado)
return lista_resposta
lista_valores = []
for valor in range(10):
lista_valores.append(int(input('Fala um número ai: ')))
dicionario = {
'Lista Padrão': lista_valores,
'Lista Quadrada': eleva_numero(lista_valores, 2),
'Lista Cúbica': eleva_numero(lista_valores, 3)
}
print(dicionario)
``` |
{
"source": "Joey-Resende/PetVet",
"score": 3
} |
#### File: PetVet/users/forms.py
```python
from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
class Userform(UserCreationForm):
email = forms.EmailField(max_length=100)
class Meta:
model = User
fields = ['username', 'email', '<PASSWORD>', '<PASSWORD>']
def clean_email(self):
email = self.cleaned_data['email']
if User.objects.filter(email=email).exists():
raise ValidationError(f'O email {email} já esta em uso')
return email
``` |
{
"source": "Joey-Resende/python-project",
"score": 4
} |
#### File: python-project/sistemaTeste/Banco.py
```python
import sqlite3
class Banco():
def __init__(self):
self.conexao = sqlite3.connect('banco.db')
self.createTable()
def createTable(self):
c = self.conexao.cursor()
c.execute("""create table if not exists petDb (
idpet integer primary key autoincrement,
data text,
hora text,
tutor text,
telefone text,
nomepet text,
especie text,
procedimento text,
sedativo text,
veterinario text,
queixas text)""")
self.conexao.commit()
c.close()
```
#### File: python-project/sistemaTeste/frame02.py
```python
from tkinter import *
from tkinter import ttk
from defmod import *
from datetime import datetime
class application:
def __init__(self, master=None):
self.fonte = ('fira code', 12, 'bold')
self.container1 = Frame(master, bg=cor1)
self.container1.pack(fill='x', pady=(25, 5))
self.container2 = Frame(master, bg=cor1, relief=GROOVE, bd=2)
self.container2.pack(fill='x', padx=20, pady=5, ipady=5)
self.container3 = Frame(master, bg=cor1, relief=GROOVE, bd=2)
self.container3.pack(fill='x', padx=20, pady=5, ipady=5)
self.container4 = Frame(master, bg=cor1, relief=GROOVE, bd=2)
self.container4.pack(fill='x', padx=20, pady=5, ipady=5)
self.container5 = Frame(master, bg=cor1, relief=GROOVE, bd=2)
self.container5.pack(fill='x', padx=20, pady=5, ipady=5)
self.container6 = Frame(master, bg=cor1, relief=GROOVE, bd=2)
self.container6.pack(fill='x', padx=20, pady=5, ipady=5)
self.container7 = Frame(master, bg=cor1, relief=GROOVE, bd=2)
self.container7.pack(fill='x', padx=20, pady=5, ipady=5)
self.container8 = Frame(master, bg=cor1, relief=GROOVE, bd=2)
self.container8.pack(fill='x', padx=20, pady=5, ipady=5)
self.container9 = Frame(master, bg=cor1, relief=GROOVE, bd=2)
self.container9.pack(fill='x', padx=20, pady=5, ipady=5)
self.container10 = Frame(master, bg=cor1)
self.container10.pack(fill='both', expand='yes', padx=10, pady=5)
self.container11 = Frame(master, bg=cor1)
self.container11.pack(fill='x', padx=20, pady=5)
self.container12 = Frame(master, bg=cor1)
self.container12.pack(fill='x', padx=20, pady=5)
self.container13 = Frame(master, bg=cor1)
self.container13.pack(fill='x', side=BOTTOM, padx=20, pady=5)
self.lbltitulo = Label(
self.container1, font=('fira code', 20, 'bold'), text='Clinica Vetérinaria Teste', bg=cor1, fg=cor0)
self.lbltitulo.pack()
dataAtual = datetime.today().strftime('%d/%m/%Y')
self.lbldata = Label(
self.container2, text='Data:', font=self.fonte, width=5, bg=cor1, fg=cor5)
self.lbldata.pack(side=LEFT, padx=(10, 0))
self.lbldata = Label(
self.container2, text=dataAtual, relief=SUNKEN, bg=white, width=11, font=self.fonte)
self.lbldata.pack(side=LEFT)
self.lblhora = Label(
self.container2, text='Hora:', font=self.fonte, width=5, bg=cor1, fg=cor5)
self.lblhora.pack(side=LEFT, padx=(90, 0))
self.txthora = Entry(self.container2, width=6, font=self.fonte)
self.txthora.pack(side=LEFT)
self.lbltutor = Label(
self.container3, text='Tutor:', font=self.fonte, width=6, bg=cor1, fg=cor5)
self.lbltutor.pack(side=LEFT, padx=(10, 0))
self.txttutor = Entry(self.container3, width=31, font=self.fonte)
self.txttutor.pack(side=LEFT)
self.lbltelefone = Label(
self.container4, text='Telefone:', font=self.fonte, width=9, bg=cor1, fg=cor5)
self.lbltelefone.pack(side=LEFT, padx=(10, 0))
self.txttelefone = Entry(self.container4, width=28, font=self.fonte)
self.txttelefone.pack(side=LEFT)
self.lblnomePet = Label(
self.container5, text='Nome do Pet:', font=self.fonte, width=12, bg=cor1, fg=cor5)
self.lblnomePet.pack(side=LEFT, padx=(10, 0))
self.txtnomePet = Entry(self.container5, width=25, font=self.fonte)
self.txtnomePet.pack(side=LEFT)
self.lblespecie = Label(
self.container6, text='Espécie:', font=self.fonte, width=8, bg=cor1, fg=cor5)
self.lblespecie.pack(side=LEFT, padx=(10, 0))
self.txtespecie = Entry(self.container6, width=29, font=self.fonte)
self.txtespecie.pack(side=LEFT)
self.lblprocedimento = Label(
self.container7, text='Procedimento:', font=self.fonte, width=13, bg=cor1, fg=cor5)
self.lblprocedimento.pack(side=LEFT, padx=(10, 0))
self.txtprocedimento = ttk.Combobox(
self.container7, values=('Consulta', 'Retorno', 'Exame', 'Cirúrgia'), width=23, font=self.fonte)
self.txtprocedimento.pack(side=LEFT)
self.lblsedativo = Label(
self.container8, text='Sedativo:', font=self.fonte, width=9, bg=cor1, fg=cor5)
self.lblsedativo.pack(side=LEFT, padx=(10, 0))
self.txtsedativo = ttk.Combobox(
self.container8, values=('NÃO', 'Simples', 'Complexo'), width=27, font=self.fonte)
self.txtsedativo.pack(side=LEFT)
self.lblveterinario = Label(
self.container9, text='Vetérinario:', font=self.fonte, width=12, bg=cor1, fg=cor5)
self.lblveterinario.pack(side=LEFT, padx=(10, 0))
self.txtveterinario = Entry(
self.container9, width=25, font=self.fonte)
self.txtveterinario.pack(side=LEFT)
self.lblqueixa = LabelFrame(self.container10, text='Queixas',
font=self.fonte, width=12, bg=cor1, fg=cor5)
self.lblqueixa.pack(fill='both', expand='yes', padx=10)
self.txtqueixa = Text(self.lblqueixa, height=5, font=self.fonte)
self.txtqueixa.pack(fill='both', expand='yes', padx=5, pady=5)
self.bntinserirPet = Button(self.container11, text='Salvar',
font=self.fonte, cursor='hand2', width=8, bg=cor4, activebackground=cor3, fg=cor0, activeforeground=cor5, highlightbackground=cor3, highlightthickness=1, bd=1)
#self.bntinserirPet['command'] = self.inserirPet
self.bntinserirPet.pack(side=LEFT, padx=(0, 20))
self.bnteditarPet = Button(self.container11, text='Editar',
font=self.fonte, cursor='hand2', width=8, bg=cor4, activebackground=cor3, fg=cor0, activeforeground=cor5, highlightbackground=cor3, highlightthickness=1, bd=1)
# self.bnteditarPet['command'] = self.editarPet
self.bnteditarPet.pack(side=LEFT, padx=(20, 20))
self.bntdeletePet = Button(self.container11, text='Excluir',
font=self.fonte, cursor='hand2', width=8, bg=cor4, activebackground=cor3, fg=cor0, activeforeground=cor5, highlightbackground=cor3, highlightthickness=1, bd=1)
# self.bntdeletePet['command'] = self.deletePet
self.bntdeletePet.pack(side=LEFT, padx=(20, 0))
self.lblmsg = Label(
self.container12, text='Aqui vai mostrar as mensagem do sistema', bg=cor1, fg=cor5)
self.lblmsg['font'] = ('fira code', 12, 'bold', 'italic')
self.lblmsg.pack()
self.lblFooter = Label(
self.container13, text='PetVet® - Cadastro de Pacientes - V 0.0.1', font=self.fonte, bg=cor1, fg=cor3)
self.lblFooter.pack()
def salvarCadastro(self):
pet = petsDb()
pet.hora = self.txthora.get()
pet.tutor = self.txttutor.get()
pet.telefone = self.txttelefone.get()
pet.nomePet = self.txtnomePet.get()
pet.especie = self.txtespecie.get()
pet.procedimento = self.txtprocedimento.get()
pet.sedativo = self.txtsedativo.get()
pet.veterinario = self.txtveterinario.get()
pet.queixas = self.txtqueixas.get()
self.lblmsg["text"] = pet.inserirPet()
self.txthora.delete(0, END)
self.txttutor.delete(0, END)
self.txttelefone.delete(0, END)
self.txtnomePet.delete(0, END)
self.txtespecie.delete(0, END)
self.txtprocedimento.delete(0, END)
self.txtsedativo.delete(0, END)
self.txtveterinario.delete(0, END)
self.txtqueixas.delete(0, END)
def alterarCadastro(self):
pet = petsDb()
pet.hora = self.txthora.get()
pet.tutor = self.txttutor.get()
pet.telefone = self.txttelefone.get()
pet.nomePet = self.txtnomePet.get()
pet.especie = self.txtespecie.get()
pet.procedimento = self.txtprocedimento.get()
pet.sedativo = self.txtsedativo.get()
pet.veterinario = self.txtveterinario.get()
pet.queixas = self.txtqueixas.get()
self.lblmsg["text"] = pet.editarPet()
self.txthora.delete(0, END)
self.txttutor.delete(0, END)
self.txttelefone.delete(0, END)
self.txtnomePet.delete(0, END)
self.txtespecie.delete(0, END)
self.txtprocedimento.delete(0, END)
self.txtsedativo.delete(0, END)
self.txtveterinario.delete(0, END)
self.txtqueixas.delete(0, END)
def excluirCadastro(self):
pet = petsDb()
pet.idPet = self.txtidPet.get()
self.lblmsg["text"] = pet.deletePet()
self.txthora.delete(0, END)
self.txttutor.delete(0, END)
self.txttelefone.delete(0, END)
self.txtnomePet.delete(0, END)
self.txtespecie.delete(0, END)
self.txtprocedimento.delete(0, END)
self.txtsedativo.delete(0, END)
self.txtveterinario.delete(0, END)
self.txtqueixas.delete(0, END)
def buscarCadastro(self):
pet = petsDb()
idPet = self.txtidPet.get()
self.lblmsg["text"] = pet.selectUser(idPet)
self.txtidPet.delete(0, END)
self.txtidPet.insert(INSERT, pet.idPet)
self.txtnome.delete(0, END)
self.txtnome.insert(INSERT, pet.nome)
self.txttelefone.delete(0, END)
self.txttelefone.insert(INSERT, pet.telefone)
self.txtemail.delete(0, END)
self.txtemail.insert(INSERT, pet.email)
self.txtusuario.delete(0, END)
self.txtusuario.insert(INSERT, pet.usuario)
self.txtsenha.delete(0, END)
self.txtsenha.insert(INSERT, pet.senha)
root = Tk()
root.title('PetVet® - Agendamento')
root.minsize(width=450, height=730)
root.maxsize(width=450, height=730)
root.configure(bg=cor1)
application(root)
root.mainloop()
``` |
{
"source": "joeyroth/SearchEngine",
"score": 3
} |
#### File: joeyroth/SearchEngine/search_engine.py
```python
import requests
from bs4 import BeautifulSoup
def downloadUrl(url):
r = requests.get(url)
if r.status_code != 200:
raise Exception("Non-OK status code: {}".format(r.status_code))
return r.text
def parseText(html):
bs = BeautifulSoup(html)
return bs.select('div.usertext-body')[1].text
``` |
{
"source": "Joeyrsp/climate-emergency-declarations",
"score": 2
} |
#### File: govtrack/test/test_structures.py
```python
from django.test import TestCase
from django.urls import reverse
from govtrack.models import Country
class NodeTests(TestCase):
def setUp(self):
pass
```
#### File: climate-emergency-declarations/lambda/run_background_task.py
```python
import os
import sys
# look for modules in current dir + /lib
CWD = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(CWD, "lib"))
from django.conf import settings
import ced_bg.settings as app_settings
settings.configure(
INSTALLED_APPS=app_settings.INSTALLED_APPS, DATABASES=app_settings.DATABASES
)
import django
django.setup()
from govtrack.models import Country
def handler(event, context):
print("Called with event data " + str(event))
task = event.get("task", "")
if task == "generate_timeline":
generate_timeline(event, context)
def generate_timeline(event, context):
print("Called with event data " + str(event))
try:
print(
f"Finding country {event['country_code']} from class {Country}={Country.content_type_id()}"
)
country = Country.find_by_code(event["country_code"])
print(f"got country {country}")
country.generate_population_count()
print(f"Finished generating population count for {event['country_code']}")
except KeyError as ex:
print(f"No country code specified: {ex}")
except Country.DoesNotExist as ex:
print(f"No country found with code {event['country_code']} {ex}")
print("All done")
if __name__ == "__main__":
try:
country_code = sys.argv[1]
print(country_code)
generate_timeline({"country_code": country_code}, {})
print("DOne generating timeline")
except:
pass
``` |
{
"source": "joeySeal/mxBak",
"score": 3
} |
#### File: mxBak/libs/process.py
```python
import csv
import os
from .mxaudit import generate_info, write_info
from .mxbak import generate_backup, run_command
from .mxstill import generate_still
class Process(object):
def __init__(self, opts):
self._opts = opts
self._input = self._get_input_data()
if not os.path.exists(self._opts['output_dir']):
os.makedirs(self._opts['output_dir'])
if not os.path.exists(self._opts['images_dir']):
os.makedirs(self._opts['images_dir'])
if not os.path.exists(self._opts['configs_dir']):
os.makedirs(self._opts['configs_dir'])
output = self.process_list(self._input, self._opts)
if self._opts['generateinfo']:
write_info(output, self._opts['info_output_filename'])
@property
def opts(self):
return self._opts
def _get_input_data(self):
result = []
with open(self.opts['input']) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
r = {
'url': row['url'],
'login': row['login'],
'password': row['password'],
}
result.append(r)
return result
def process_list(self, l, opts):
result = []
for item in l:
print('Processing %s' % item['url'])
try:
r = generate_info(item)
result.append(r)
print("Success: %s" % item['url'])
if opts['backupstills']:
print('Backup still to %s' % self._opts['images_dir'])
x = self._opts['x']
y = self._opts['y']
q = self._opts['q']
generate_still(item, folder=self._opts['images_dir'], x=x, y=y, q=q)
if opts['backupconfig']:
print('Backup still to %s' % self._opts['configs_dir'])
generate_backup(item, folder=self._opts['configs_dir'])
if opts['runcommand']:
run_command(item)
except IOError as e:
print("IO error '%s' while trying to process URL: %s" % (e, item['url']))
result.append({'url': item['url'], 'status': 'IO ERROR: %s' % e})
except BaseException as e:
print("Unknown error '%s' while trying to process URL: %s" % (e, item['url']))
raise e
return result
```
#### File: mxBak/mxBakOld/main.py
```python
import csv
import re
from urllib import request
import ssl
import os
import datetime
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type=str, help="specify name of input file, default: 'input.csv'",
default='input.csv')
args = parser.parse_args()
INPUT_FILENAME = args.input
now = datetime.datetime.now()
DIR = 'backups'
CURRENT_DATE = now.strftime("%m%d%Y")
LOGGING_ENABLED = False
def get_input_data():
result = []
with open(INPUT_FILENAME) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
r = {
'url': row['url'],
'login': row['login'],
'password': row['password'],
}
result.append(r)
return result
def _get_html(url, login, password):
url = url + '/admin/m1cam.cfg'
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
p = request.HTTPPasswordMgrWithDefaultRealm()
p.add_password(None, url, login, password)
ssl_handler = request.HTTPSHandler(context=ctx)
auth_handler = request.HTTPBasicAuthHandler(p)
opener = request.build_opener(ssl_handler, auth_handler)
request.install_opener(opener)
result = opener.open(url)
info = result.info()['Content-Disposition']
pattern = r'attachment; filename=\"(.*)\"'
m = re.findall(pattern, info)
fn = m[0]
return result, fn
def get_html(url, login, password):
html = ''
result, fn = _get_html(url, login, password)
html = result.read()
return html, fn
def create_file(html, fn):
dir = os.path.join(DIR, CURRENT_DATE)
if not os.path.exists(dir):
os.makedirs(dir)
path = os.path.join(dir, fn)
with open(path, mode='wb') as f:
f.write(html)
def process_item(item):
html, fn = get_html(item['url'], item['login'], item['password'])
result = {
'url': item['url'],
}
if not html:
return result
create_file(html, fn)
result['status'] = 'OK'
return result
def process_list(l):
result = []
for item in l:
print('Processing %s' % item['url'])
try:
r = process_item(item)
if r:
result.append(r)
print("Success: %s" % item['url'])
except IOError as e:
print("IO error '%s' while trying to process URL: %s" % (e, item['url']))
result.append({'url': item['url'], 'status': 'IO ERROR: %s' % e})
except BaseException as e:
print("Unknown error '%s' while trying to process URL: %s" % (e, item['url']))
raise e
return result
def main():
result = process_list(get_input_data())
if __name__ == "__main__":
main()
``` |
{
"source": "joeyseash/PruneTrain",
"score": 2
} |
#### File: src/custom_arch/custom_resnet50_bt.py
```python
import os
from .arch_utils import layerUtil
k3_s2_p1 = [28, 53]
k1_s2_p0 = [30, 55]
k3_s1_p1 = [1, 3, 7, 10, 13, 16, 19, 22, 25, 28, 32, 35, 38, 41, 44, 47, 50,
53, 57, 60, 63, 66, 69, 72, 75]
arch = {}
for i in range(1, 77):
conv_idx = (i-1)*2
bn_idx = conv_idx +1
if i in k3_s2_p1:
arch[conv_idx] = {'name':'conv'+str(i), 'kernel_size':3, 'stride':2, 'padding':1, 'bias':False}
elif i in k1_s2_p0:
arch[conv_idx] = {'name':'conv'+str(i), 'kernel_size':1, 'stride':2, 'padding':0, 'bias':False}
elif i in k3_s1_p1:
arch[conv_idx] = {'name':'conv'+str(i), 'kernel_size':3, 'stride':1, 'padding':1, 'bias':False}
else:
arch[conv_idx] = {'name':'conv'+str(i), 'kernel_size':1, 'stride':1, 'padding':0, 'bias':False}
arch[bn_idx] = {'name':'bn'+str(i)}
arch[152] = {'name':'avgpool', 'num':8}
arch[153] = {'name':'relu'}
arch[154] = {'name':'fc', 'out_chs':'num_classes'}
def _genDenseArchResNet50BT(model, out_f_dir1, out_f_dir2, arch_name, dense_chs, chs_map, is_gating=False):
# File heading
ctx = 'import torch.nn as nn\n'
ctx += 'import torch\n'
ctx += '__all__ = [\'resnet50_bt_flat\']\n'
ctx += 'class ResNet50BT(nn.Module):\n'
ctx += '\tdef __init__(self, num_classes=10):\n'
ctx += '\t\tsuper(ResNet50BT, self).__init__()\n'
lyr = layerUtil(model, dense_chs)
# Layer definition
for idx in sorted(arch):
ctx += lyr.getLayerDef(arch[idx])
# Architecture sequential
ctx += '\tdef forward(self, x):\n'
ctx += lyr.forward('conv1')
ctx += lyr.forward('bn1')
ctx += lyr.forward('relu', o='_x')
if chs_map != None: chs_map0, chs_map1, chs_map2 = chs_map[0], chs_map[1], chs_map[2]
else: chs_map0, chs_map1, chs_map2 = None, None, None
if is_gating:
ctx += lyr.empty_ch(i='_x')
ctx += lyr.merge('conv1', chs_map0, i='_x', o='_x')
ctx += lyr.resnet_module_pool(chs_map0, chs_map1, is_gating, 2,3,4,5) #1
ctx += lyr.resnet_module(chs_map0, is_gating, 6,7,8) #2
ctx += lyr.resnet_module(chs_map0, is_gating, 9,10,11) #3
ctx += lyr.resnet_module(chs_map0, is_gating, 12,13,14) #4
ctx += lyr.resnet_module(chs_map0, is_gating, 15,16,17) #5
ctx += lyr.resnet_module(chs_map0, is_gating, 18,19,20) #6
ctx += lyr.resnet_module(chs_map0, is_gating, 21,22,23) #7
ctx += lyr.resnet_module(chs_map0, is_gating, 24,25,26) #8
ctx += lyr.resnet_module_pool(chs_map0, chs_map1, is_gating, 27,28,29,30) #9
ctx += lyr.resnet_module(chs_map1, is_gating, 31,32,33) #10
ctx += lyr.resnet_module(chs_map1, is_gating, 34,35,36) #11
ctx += lyr.resnet_module(chs_map1, is_gating, 37,38,39) #12
ctx += lyr.resnet_module(chs_map1, is_gating, 40,41,42) #13
ctx += lyr.resnet_module(chs_map1, is_gating, 43,44,45) #14
ctx += lyr.resnet_module(chs_map1, is_gating, 46,47,48) #15
ctx += lyr.resnet_module(chs_map1, is_gating, 49,50,51) #16
ctx += lyr.resnet_module_pool(chs_map1, chs_map2, is_gating, 52,53,54,55) #17
ctx += lyr.resnet_module(chs_map2, is_gating, 56,57,58) #18
ctx += lyr.resnet_module(chs_map2, is_gating, 59,60,61) #19
ctx += lyr.resnet_module(chs_map2, is_gating, 62,63,64) #20
ctx += lyr.resnet_module(chs_map2, is_gating, 65,66,67) #21
ctx += lyr.resnet_module(chs_map2, is_gating, 68,69,70) #22
ctx += lyr.resnet_module(chs_map2, is_gating, 71,72,73) #23
ctx += lyr.resnet_module(chs_map2, is_gating, 74,75,76) #24
if is_gating:
ctx += lyr.mask('fc', chs_map2, i='_x', o='_x')
ctx += '\t\tx = self.avgpool(_x)\n'
ctx += '\t\tx = x.view(x.size(0), -1)\n'
ctx += lyr.forward('fc')
ctx += '\t\treturn x\n'
# ResNet50BT definition
ctx += 'def resnet50_bt_flat(**kwargs):\n'
ctx += '\tmodel = ResNet50BT(**kwargs)\n'
ctx += '\treturn model\n'
if not os.path.exists(out_f_dir2):
os.makedirs(out_f_dir2)
print ("[INFO] Generating a new dense architecture...")
f_out1 = open(os.path.join(out_f_dir1, 'resnet50_bt_flat.py'),'w')
f_out1.write(ctx)
f_out2 = open(os.path.join(out_f_dir2, arch_name),'w')
f_out2.write(ctx)
```
#### File: src/custom/group_lasso_regs.py
```python
import torch
""" A single global group-lasso regularization coefficient
# 1. Exclude depth-wise separable convolution from regularization
# 2. Exclude first layer's input channel and last layer's output from regularization
# 3. Consider multi-layer classifier
# arch: architecture name
# lasso_penalty: group lasso regularization penalty
"""
def get_group_lasso_global(model, arch):
lasso_in_ch = []
lasso_out_ch = []
for name, param in model.named_parameters():
# Lasso added to only the neuronal layers
if ('weight' in name) and any([i for i in ['conv', 'fc'] if i in name]):
if param.dim() == 4:
conv_dw = int(name.split('.')[1].split('conv')[1]) %2 == 0
add_lasso = ('mobilenet' not in arch) or ('mobilenet' in arch and not conv_dw)
# Exclude depth-wise convolution layers from regularization
if add_lasso:
if 'conv1.' not in name:
_in = param.pow(2).sum(dim=[0,2,3])
lasso_in_ch.append( _in )
_out = param.pow(2).sum(dim=[1,2,3])
lasso_out_ch.append( _out )
elif param.dim() == 2:
# Multi-FC-layer based classifier (only fc or fc3 are the last layers)
if ('fc1' in name) or ('fc2' in name):
lasso_out_ch.append( param.pow(2).sum(dim=[1]) )
lasso_in_ch.append( param.pow(2).sum(dim=[0]) )
_lasso_in_ch = torch.cat(lasso_in_ch).cuda()
_lasso_out_ch = torch.cat(lasso_out_ch).cuda()
lasso_penalty_in_ch = _lasso_in_ch.add(1.0e-8).sqrt().sum()
lasso_penalty_out_ch = _lasso_out_ch.add(1.0e-8).sqrt().sum()
lasso_penalty = lasso_penalty_in_ch + lasso_penalty_out_ch
return lasso_penalty
""" Number of parameter-based per-group regularization coefficient
# 1. Exclude depth-wise separable convolution from regularization
# 2. Exclude first layer's input channel and last layer's output from regularization
# 3. Consider multi-layer classifier
# arch: architecture name
# lasso_penalty: group lasso regularization penalty
"""
def get_group_lasso_group(model, arch):
lasso_in_ch = []
lasso_out_ch = []
lasso_in_ch_penalty = []
lasso_out_ch_penalty = []
for name, param in model.named_parameters():
# Lasso added to only the neuronal layers
if ('weight' in name) and any([i for i in ['conv', 'fc'] if i in name]):
if param.dim() == 4:
conv_dw = int(name.split('.')[1].split('conv')[1]) %2 == 0
add_lasso = ('mobilenet' not in arch) or ('mobilenet' in arch and not conv_dw)
w_num_i_ch = param.shape[0] * param.shape[2] * param.shape[3]
w_num_o_ch = param.shape[1] * param.shape[2] * param.shape[3]
# Exclude depth-wise convolution layers from regularization
if add_lasso:
if 'conv1.' not in name:
_in = param.pow(2).sum(dim=[0,2,3])
lasso_in_ch.append( _in )
penalty_tensor = torch.Tensor(param.shape[1]).cuda()
lasso_in_ch_penalty.append( penalty_tensor.new_full([param.shape[1]], w_num_i_ch) )
_out = param.pow(2).sum(dim=[1,2,3])
lasso_out_ch.append( _out )
penalty_tensor = torch.Tensor(param.shape[0]).cuda()
lasso_out_ch_penalty.append( penalty_tensor.new_full([param.shape[0]], w_num_o_ch) )
elif param.dim() == 2:
w_num_i_ch = param.shape[0]
w_num_o_ch = param.shape[1]
if ('fc1' in name) or ('fc2' in name):
lasso_out_ch.append( param.pow(2).sum(dim=[1]) )
penalty_tensor = torch.Tensor(param.shape[0]).cuda()
lasso_out_ch_penalty.append( penalty_tensor.new_full([param.shape[0]], w_num_o_ch) )
lasso_in_ch.append( param.pow(2).sum(dim=[0]) )
penalty_tensor = torch.Tensor(param.shape[1]).cuda()
lasso_in_ch_penalty.append( penalty_tensor.new_full([param.shape[1]], w_num_i_ch) )
_lasso_in_ch = torch.cat(lasso_in_ch).cuda()
_lasso_out_ch = torch.cat(lasso_out_ch).cuda()
lasso_penalty_in_ch = _lasso_in_ch.add(1.0e-8).sqrt()
lasso_penalty_out_ch = _lasso_out_ch.add(1.0e-8).sqrt()
# Extra penalty using the number of parameters in each group
lasso_in_ch_penalty = torch.cat(lasso_in_ch_penalty).cuda().sqrt()
lasso_out_ch_penalty = torch.cat(lasso_out_ch_penalty).cuda().sqrt()
lasso_penalty_in_ch = lasso_penalty_in_ch.mul(lasso_in_ch_penalty).sum()
lasso_penalty_out_ch = lasso_penalty_out_ch.mul(lasso_out_ch_penalty).sum()
lasso_penalty = lasso_penalty_in_ch + lasso_penalty_out_ch
return lasso_penalty
```
#### File: models/cifar/resnet50_bt_flat.py
```python
import torch.nn as nn
import math
__all__ = ['resnet50_bt_flat']
class ResNet50(nn.Module):
# This should be redefined by the channel count
def __init__(self, num_classes=10):
super(ResNet50, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False, stride=1)
self.bn1 = nn.BatchNorm2d(16)
#1
self.conv2 = nn.Conv2d(16, 16, kernel_size=1, padding=0, bias=False, stride=1)
self.bn2 = nn.BatchNorm2d(16)
self.conv3 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False, stride=1)
self.bn3 = nn.BatchNorm2d(16)
self.conv4 = nn.Conv2d(16, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn4 = nn.BatchNorm2d(64)
self.conv5 = nn.Conv2d(16, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn5 = nn.BatchNorm2d(64)
#2
self.conv6 = nn.Conv2d(64, 16, kernel_size=1, padding=0, bias=False, stride=1)
self.bn6 = nn.BatchNorm2d(16)
self.conv7 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False, stride=1)
self.bn7 = nn.BatchNorm2d(16)
self.conv8 = nn.Conv2d(16, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn8 = nn.BatchNorm2d(64)
#3
self.conv9 = nn.Conv2d(64, 16, kernel_size=1, padding=0, bias=False, stride=1)
self.bn9 = nn.BatchNorm2d(16)
self.conv10 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False, stride=1)
self.bn10 = nn.BatchNorm2d(16)
self.conv11 = nn.Conv2d(16, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn11 = nn.BatchNorm2d(64)
#4
self.conv12 = nn.Conv2d(64, 16, kernel_size=1, padding=0, bias=False, stride=1)
self.bn12 = nn.BatchNorm2d(16)
self.conv13 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False, stride=1)
self.bn13 = nn.BatchNorm2d(16)
self.conv14 = nn.Conv2d(16, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn14 = nn.BatchNorm2d(64)
#5
self.conv15 = nn.Conv2d(64, 16, kernel_size=1, padding=0, bias=False, stride=1)
self.bn15 = nn.BatchNorm2d(16)
self.conv16 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False, stride=1)
self.bn16 = nn.BatchNorm2d(16)
self.conv17 = nn.Conv2d(16, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn17 = nn.BatchNorm2d(64)
#6
self.conv18 = nn.Conv2d(64, 16, kernel_size=1, padding=0, bias=False, stride=1)
self.bn18 = nn.BatchNorm2d(16)
self.conv19 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False, stride=1)
self.bn19 = nn.BatchNorm2d(16)
self.conv20 = nn.Conv2d(16, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn20 = nn.BatchNorm2d(64)
#7
self.conv21 = nn.Conv2d(64, 16, kernel_size=1, padding=0, bias=False, stride=1)
self.bn21 = nn.BatchNorm2d(16)
self.conv22 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False, stride=1)
self.bn22 = nn.BatchNorm2d(16)
self.conv23 = nn.Conv2d(16, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn23 = nn.BatchNorm2d(64)
#8
self.conv24 = nn.Conv2d(64, 16, kernel_size=1, padding=0, bias=False, stride=1)
self.bn24 = nn.BatchNorm2d(16)
self.conv25 = nn.Conv2d(16, 16, kernel_size=3, padding=1, bias=False, stride=1)
self.bn25 = nn.BatchNorm2d(16)
self.conv26 = nn.Conv2d(16, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn26 = nn.BatchNorm2d(64)
#9 (Stage 2)
self.conv27 = nn.Conv2d(64, 32, kernel_size=1, padding=0, bias=False, stride=1)
self.bn27 = nn.BatchNorm2d(32)
self.conv28 = nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False, stride=2)
self.bn28 = nn.BatchNorm2d(32)
self.conv29 = nn.Conv2d(32, 128, kernel_size=1, padding=0, bias=False, stride=1)
self.bn29 = nn.BatchNorm2d(128)
self.conv30 = nn.Conv2d(64, 128, kernel_size=1, padding=0, bias=False, stride=2)
self.bn30 = nn.BatchNorm2d(128)
#10
self.conv31 = nn.Conv2d(128, 32, kernel_size=1, padding=0, bias=False, stride=1)
self.bn31 = nn.BatchNorm2d(32)
self.conv32 = nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False, stride=1)
self.bn32 = nn.BatchNorm2d(32)
self.conv33 = nn.Conv2d(32, 128, kernel_size=1, padding=0, bias=False, stride=1)
self.bn33 = nn.BatchNorm2d(128)
#11
self.conv34 = nn.Conv2d(128, 32, kernel_size=1, padding=0, bias=False, stride=1)
self.bn34 = nn.BatchNorm2d(32)
self.conv35 = nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False, stride=1)
self.bn35 = nn.BatchNorm2d(32)
self.conv36 = nn.Conv2d(32, 128, kernel_size=1, padding=0, bias=False, stride=1)
self.bn36 = nn.BatchNorm2d(128)
#12
self.conv37 = nn.Conv2d(128, 32, kernel_size=1, padding=0, bias=False, stride=1)
self.bn37 = nn.BatchNorm2d(32)
self.conv38 = nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False, stride=1)
self.bn38 = nn.BatchNorm2d(32)
self.conv39 = nn.Conv2d(32, 128, kernel_size=1, padding=0, bias=False, stride=1)
self.bn39 = nn.BatchNorm2d(128)
#13
self.conv40 = nn.Conv2d(128, 32, kernel_size=1, padding=0, bias=False, stride=1)
self.bn40 = nn.BatchNorm2d(32)
self.conv41 = nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False, stride=1)
self.bn41 = nn.BatchNorm2d(32)
self.conv42 = nn.Conv2d(32, 128, kernel_size=1, padding=0, bias=False, stride=1)
self.bn42 = nn.BatchNorm2d(128)
#14
self.conv43 = nn.Conv2d(128, 32, kernel_size=1, padding=0, bias=False, stride=1)
self.bn43 = nn.BatchNorm2d(32)
self.conv44 = nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False, stride=1)
self.bn44 = nn.BatchNorm2d(32)
self.conv45 = nn.Conv2d(32, 128, kernel_size=1, padding=0, bias=False, stride=1)
self.bn45 = nn.BatchNorm2d(128)
#15
self.conv46 = nn.Conv2d(128, 32, kernel_size=1, padding=0, bias=False, stride=1)
self.bn46 = nn.BatchNorm2d(32)
self.conv47 = nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False, stride=1)
self.bn47 = nn.BatchNorm2d(32)
self.conv48 = nn.Conv2d(32, 128, kernel_size=1, padding=0, bias=False, stride=1)
self.bn48 = nn.BatchNorm2d(128)
#16
self.conv49 = nn.Conv2d(128, 32, kernel_size=1, padding=0, bias=False, stride=1)
self.bn49 = nn.BatchNorm2d(32)
self.conv50 = nn.Conv2d(32, 32, kernel_size=3, padding=1, bias=False, stride=1)
self.bn50 = nn.BatchNorm2d(32)
self.conv51 = nn.Conv2d(32, 128, kernel_size=1, padding=0, bias=False, stride=1)
self.bn51 = nn.BatchNorm2d(128)
#17 (Stage 3)
self.conv52 = nn.Conv2d(128, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn52 = nn.BatchNorm2d(64)
self.conv53 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False, stride=2)
self.bn53 = nn.BatchNorm2d(64)
self.conv54 = nn.Conv2d(64, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn54 = nn.BatchNorm2d(256)
self.conv55 = nn.Conv2d(128, 256, kernel_size=1, padding=0, bias=False, stride=2)
self.bn55 = nn.BatchNorm2d(256)
#18
self.conv56 = nn.Conv2d(256, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn56 = nn.BatchNorm2d(64)
self.conv57 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False, stride=1)
self.bn57 = nn.BatchNorm2d(64)
self.conv58 = nn.Conv2d(64, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn58 = nn.BatchNorm2d(256)
#19
self.conv59 = nn.Conv2d(256, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn59 = nn.BatchNorm2d(64)
self.conv60 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False, stride=1)
self.bn60 = nn.BatchNorm2d(64)
self.conv61 = nn.Conv2d(64, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn61 = nn.BatchNorm2d(256)
#20
self.conv62 = nn.Conv2d(256, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn62 = nn.BatchNorm2d(64)
self.conv63 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False, stride=1)
self.bn63 = nn.BatchNorm2d(64)
self.conv64 = nn.Conv2d(64, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn64 = nn.BatchNorm2d(256)
#21
self.conv65 = nn.Conv2d(256, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn65 = nn.BatchNorm2d(64)
self.conv66 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False, stride=1)
self.bn66 = nn.BatchNorm2d(64)
self.conv67 = nn.Conv2d(64, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn67 = nn.BatchNorm2d(256)
#22
self.conv68 = nn.Conv2d(256, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn68 = nn.BatchNorm2d(64)
self.conv69 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False, stride=1)
self.bn69 = nn.BatchNorm2d(64)
self.conv70 = nn.Conv2d(64, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn70 = nn.BatchNorm2d(256)
#23
self.conv71 = nn.Conv2d(256, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn71 = nn.BatchNorm2d(64)
self.conv72 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False, stride=1)
self.bn72 = nn.BatchNorm2d(64)
self.conv73 = nn.Conv2d(64, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn73 = nn.BatchNorm2d(256)
#24
self.conv74 = nn.Conv2d(256, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn74 = nn.BatchNorm2d(64)
self.conv75 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False, stride=1)
self.bn75 = nn.BatchNorm2d(64)
self.conv76 = nn.Conv2d(64, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn76 = nn.BatchNorm2d(256)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(256, num_classes)
self.relu = nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# This part of architecture remains the same
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
_x = self.relu(x)
#1
x = self.conv2(_x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
_x = self.conv5(_x)
_x = self.bn5(_x)
_x = _x + x
_x = self.relu(_x)
#2
x = self.conv6(_x)
x = self.bn6(x)
x = self.relu(x)
x = self.conv7(x)
x = self.bn7(x)
x = self.relu(x)
x = self.conv8(x)
x = self.bn8(x)
_x = _x + x
_x = self.relu(_x)
#3
x = self.conv9(_x)
x = self.bn9(x)
x = self.relu(x)
x = self.conv10(x)
x = self.bn10(x)
x = self.relu(x)
x = self.conv11(x)
x = self.bn11(x)
_x = _x + x
_x = self.relu(_x)
#4
x = self.conv12(_x)
x = self.bn12(x)
x = self.relu(x)
x = self.conv13(x)
x = self.bn13(x)
x = self.relu(x)
x = self.conv14(x)
x = self.bn14(x)
_x = _x + x
_x = self.relu(_x)
#5
x = self.conv15(_x)
x = self.bn15(x)
x = self.relu(x)
x = self.conv16(x)
x = self.bn16(x)
x = self.relu(x)
x = self.conv17(x)
x = self.bn17(x)
_x = _x + x
_x = self.relu(_x)
#6
x = self.conv18(_x)
x = self.bn18(x)
x = self.relu(x)
x = self.conv19(x)
x = self.bn19(x)
x = self.relu(x)
x = self.conv20(x)
x = self.bn20(x)
_x = _x + x
_x = self.relu(_x)
#7
x = self.conv21(_x)
x = self.bn21(x)
x = self.relu(x)
x = self.conv22(x)
x = self.bn22(x)
x = self.relu(x)
x = self.conv23(x)
x = self.bn23(x)
_x = _x + x
_x = self.relu(_x)
#8
x = self.conv24(_x)
x = self.bn24(x)
x = self.relu(x)
x = self.conv25(x)
x = self.bn25(x)
x = self.relu(x)
x = self.conv26(x)
x = self.bn26(x)
_x = _x + x
_x = self.relu(_x)
#9 (Stage 2)
x = self.conv27(_x)
x = self.bn27(x)
x = self.relu(x)
x = self.conv28(x)
x = self.bn28(x)
x = self.relu(x)
x = self.conv29(x)
x = self.bn29(x)
_x = self.conv30(_x)
_x = self.bn30(_x)
_x = _x + x
_x = self.relu(_x)
#10
x = self.conv31(_x)
x = self.bn31(x)
x = self.relu(x)
x = self.conv32(x)
x = self.bn32(x)
x = self.relu(x)
x = self.conv33(x)
x = self.bn33(x)
_x = _x + x
_x = self.relu(_x)
#11
x = self.conv34(_x)
x = self.bn34(x)
x = self.relu(x)
x = self.conv35(x)
x = self.bn35(x)
x = self.relu(x)
x = self.conv36(x)
x = self.bn36(x)
_x = _x + x
_x = self.relu(_x)
#12
x = self.conv37(_x)
x = self.bn37(x)
x = self.relu(x)
x = self.conv38(x)
x = self.bn38(x)
x = self.relu(x)
x = self.conv39(x)
x = self.bn39(x)
_x = _x + x
_x = self.relu(_x)
#13
x = self.conv40(_x)
x = self.bn40(x)
x = self.relu(x)
x = self.conv41(x)
x = self.bn41(x)
x = self.relu(x)
x = self.conv42(x)
x = self.bn42(x)
_x = _x + x
_x = self.relu(_x)
#14
x = self.conv43(_x)
x = self.bn43(x)
x = self.relu(x)
x = self.conv44(x)
x = self.bn44(x)
x = self.relu(x)
x = self.conv45(x)
x = self.bn45(x)
_x = _x + x
_x = self.relu(_x)
#15
x = self.conv46(_x)
x = self.bn46(x)
x = self.relu(x)
x = self.conv47(x)
x = self.bn47(x)
x = self.relu(x)
x = self.conv48(x)
x = self.bn48(x)
_x = _x + x
_x = self.relu(_x)
#16
x = self.conv49(_x)
x = self.bn49(x)
x = self.relu(x)
x = self.conv50(x)
x = self.bn50(x)
x = self.relu(x)
x = self.conv51(x)
x = self.bn51(x)
_x = _x + x
_x = self.relu(_x)
#17 (Stage 3)
x = self.conv52(_x)
x = self.bn52(x)
x = self.relu(x)
x = self.conv53(x)
x = self.bn53(x)
x = self.relu(x)
x = self.conv54(x)
x = self.bn54(x)
_x = self.conv55(_x)
_x = self.bn55(_x)
_x = _x + x
_x = self.relu(_x)
#18
x = self.conv56(_x)
x = self.bn56(x)
x = self.relu(x)
x = self.conv57(x)
x = self.bn57(x)
x = self.relu(x)
x = self.conv58(x)
x = self.bn58(x)
_x = _x + x
_x = self.relu(_x)
#19
x = self.conv59(_x)
x = self.bn59(x)
x = self.relu(x)
x = self.conv60(x)
x = self.bn60(x)
x = self.relu(x)
x = self.conv61(x)
x = self.bn61(x)
_x = _x + x
_x = self.relu(_x)
#20
x = self.conv62(_x)
x = self.bn62(x)
x = self.relu(x)
x = self.conv63(x)
x = self.bn63(x)
x = self.relu(x)
x = self.conv64(x)
x = self.bn64(x)
_x = _x + x
_x = self.relu(_x)
#21
x = self.conv65(_x)
x = self.bn65(x)
x = self.relu(x)
x = self.conv66(x)
x = self.bn66(x)
x = self.relu(x)
x = self.conv67(x)
x = self.bn67(x)
_x = _x + x
_x = self.relu(_x)
#22
x = self.conv68(_x)
x = self.bn68(x)
x = self.relu(x)
x = self.conv69(x)
x = self.bn69(x)
x = self.relu(x)
x = self.conv70(x)
x = self.bn70(x)
_x = _x + x
_x = self.relu(_x)
#23
x = self.conv71(_x)
x = self.bn71(x)
x = self.relu(x)
x = self.conv72(x)
x = self.bn72(x)
x = self.relu(x)
x = self.conv73(x)
x = self.bn73(x)
_x = _x + x
_x = self.relu(_x)
#24
x = self.conv74(_x)
x = self.bn74(x)
x = self.relu(x)
x = self.conv75(x)
x = self.bn75(x)
x = self.relu(x)
x = self.conv76(x)
x = self.bn76(x)
_x = _x + x
_x = self.relu(_x)
x = self.avgpool(_x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet50_bt_flat(**kwargs):
model = ResNet50(**kwargs)
return model
```
#### File: models/imagenet/resnet50_flat_ori.py
```python
import torch.nn as nn
import math
__all__ = ['resnet50_flat']
class ResNet50(nn.Module):
# This should be redefined by the channel count
def __init__(self, num_classes=1000):
super(ResNet50, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, padding=3, bias=False, stride=2)
self.bn1 = nn.BatchNorm2d(64)
#1
self.conv2 = nn.Conv2d(64, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False, stride=1)
self.bn3 = nn.BatchNorm2d(64)
self.conv4 = nn.Conv2d(64, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn4 = nn.BatchNorm2d(256)
self.conv5 = nn.Conv2d(64, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn5 = nn.BatchNorm2d(256)
#2
self.conv6 = nn.Conv2d(256, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn6 = nn.BatchNorm2d(64)
self.conv7 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False, stride=1)
self.bn7 = nn.BatchNorm2d(64)
self.conv8 = nn.Conv2d(64, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn8 = nn.BatchNorm2d(256)
#3
self.conv9 = nn.Conv2d(256, 64, kernel_size=1, padding=0, bias=False, stride=1)
self.bn9 = nn.BatchNorm2d(64)
self.conv10 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=False, stride=1)
self.bn10 = nn.BatchNorm2d(64)
self.conv11 = nn.Conv2d(64, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn11 = nn.BatchNorm2d(256)
#4 (Stage 2)
self.conv12 = nn.Conv2d(256, 128, kernel_size=1, padding=0, bias=False, stride=1)
self.bn12 = nn.BatchNorm2d(128)
self.conv13 = nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False, stride=2)
self.bn13 = nn.BatchNorm2d(128)
self.conv14 = nn.Conv2d(128, 512, kernel_size=1, padding=0, bias=False, stride=1)
self.bn14 = nn.BatchNorm2d(512)
self.conv15 = nn.Conv2d(256, 512, kernel_size=1, padding=0, bias=False, stride=2)
self.bn15 = nn.BatchNorm2d(512)
#5
self.conv16 = nn.Conv2d(512, 128, kernel_size=1, padding=0, bias=False, stride=1)
self.bn16 = nn.BatchNorm2d(128)
self.conv17 = nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False, stride=1)
self.bn17 = nn.BatchNorm2d(128)
self.conv18 = nn.Conv2d(128, 512, kernel_size=1, padding=0, bias=False, stride=1)
self.bn18 = nn.BatchNorm2d(512)
#6
self.conv19 = nn.Conv2d(512, 128, kernel_size=1, padding=0, bias=False, stride=1)
self.bn19 = nn.BatchNorm2d(128)
self.conv20 = nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False, stride=1)
self.bn20 = nn.BatchNorm2d(128)
self.conv21 = nn.Conv2d(128, 512, kernel_size=1, padding=0, bias=False, stride=1)
self.bn21 = nn.BatchNorm2d(512)
#7
self.conv22 = nn.Conv2d(512, 128, kernel_size=1, padding=0, bias=False, stride=1)
self.bn22 = nn.BatchNorm2d(128)
self.conv23 = nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=False, stride=1)
self.bn23 = nn.BatchNorm2d(128)
self.conv24 = nn.Conv2d(128, 512, kernel_size=1, padding=0, bias=False, stride=1)
self.bn24 = nn.BatchNorm2d(512)
#8 (Stage 3)
self.conv25 = nn.Conv2d(512, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn25 = nn.BatchNorm2d(256)
self.conv26 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False, stride=2)
self.bn26 = nn.BatchNorm2d(256)
self.conv27 = nn.Conv2d(256, 1024, kernel_size=1, padding=0, bias=False, stride=1)
self.bn27 = nn.BatchNorm2d(1024)
self.conv28 = nn.Conv2d(512, 1024, kernel_size=1, padding=0, bias=False, stride=2)
self.bn28 = nn.BatchNorm2d(1024)
#9
self.conv29 = nn.Conv2d(1024, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn29 = nn.BatchNorm2d(256)
self.conv30 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False, stride=1)
self.bn30 = nn.BatchNorm2d(256)
self.conv31 = nn.Conv2d(256, 1024, kernel_size=1, padding=0, bias=False, stride=1)
self.bn31 = nn.BatchNorm2d(1024)
#10
self.conv32 = nn.Conv2d(1024, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn32 = nn.BatchNorm2d(256)
self.conv33 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False, stride=1)
self.bn33 = nn.BatchNorm2d(256)
self.conv34 = nn.Conv2d(256, 1024, kernel_size=1, padding=0, bias=False, stride=1)
self.bn34 = nn.BatchNorm2d(1024)
#11
self.conv35 = nn.Conv2d(1024, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn35 = nn.BatchNorm2d(256)
self.conv36 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False, stride=1)
self.bn36 = nn.BatchNorm2d(256)
self.conv37 = nn.Conv2d(256, 1024, kernel_size=1, padding=0, bias=False, stride=1)
self.bn37 = nn.BatchNorm2d(1024)
#12
self.conv38 = nn.Conv2d(1024, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn38 = nn.BatchNorm2d(256)
self.conv39 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False, stride=1)
self.bn39 = nn.BatchNorm2d(256)
self.conv40 = nn.Conv2d(256, 1024, kernel_size=1, padding=0, bias=False, stride=1)
self.bn40 = nn.BatchNorm2d(1024)
#13
self.conv41 = nn.Conv2d(1024, 256, kernel_size=1, padding=0, bias=False, stride=1)
self.bn41 = nn.BatchNorm2d(256)
self.conv42 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=False, stride=1)
self.bn42 = nn.BatchNorm2d(256)
self.conv43 = nn.Conv2d(256, 1024, kernel_size=1, padding=0, bias=False, stride=1)
self.bn43 = nn.BatchNorm2d(1024)
#14 (Stage 4)
self.conv44 = nn.Conv2d(1024, 512, kernel_size=1, padding=0, bias=False, stride=1)
self.bn44 = nn.BatchNorm2d(512)
self.conv45 = nn.Conv2d(512, 512, kernel_size=3, padding=1, bias=False, stride=2)
self.bn45 = nn.BatchNorm2d(512)
self.conv46 = nn.Conv2d(512, 2048, kernel_size=1, padding=0, bias=False, stride=1)
self.bn46 = nn.BatchNorm2d(2048)
self.conv47 = nn.Conv2d(1024, 2048, kernel_size=1, padding=0, bias=False, stride=2)
self.bn47 = nn.BatchNorm2d(2048)
#15
self.conv48 = nn.Conv2d(2048, 512, kernel_size=1, padding=0, bias=False, stride=1)
self.bn48 = nn.BatchNorm2d(512)
self.conv49 = nn.Conv2d(512, 512, kernel_size=3, padding=1, bias=False, stride=1)
self.bn49 = nn.BatchNorm2d(512)
self.conv50 = nn.Conv2d(512, 2048, kernel_size=1, padding=0, bias=False, stride=1)
self.bn50 = nn.BatchNorm2d(2048)
#16
self.conv51 = nn.Conv2d(2048, 512, kernel_size=1, padding=0, bias=False, stride=1)
self.bn51 = nn.BatchNorm2d(512)
self.conv52 = nn.Conv2d(512, 512, kernel_size=3, padding=1, bias=False, stride=1)
self.bn52 = nn.BatchNorm2d(512)
self.conv53 = nn.Conv2d(512, 2048, kernel_size=1, padding=0, bias=False, stride=1)
self.bn53 = nn.BatchNorm2d(2048)
self.avgpool_adt = nn.AdaptiveAvgPool2d((1,1))
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.fc = nn.Linear(2048, num_classes)
self.relu = nn.ReLU(inplace=True)
# parameter initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
# This part of architecture remains the same
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
_x = self.maxpool(x)
#1
x = self.conv2(_x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
_x = self.conv5(_x)
_x = self.bn5(_x)
_x = _x + x
_x = self.relu(_x)
#2
x = self.conv6(_x)
x = self.bn6(x)
x = self.relu(x)
x = self.conv7(x)
x = self.bn7(x)
x = self.relu(x)
x = self.conv8(x)
x = self.bn8(x)
_x = _x + x
_x = self.relu(_x)
#3
x = self.conv9(_x)
x = self.bn9(x)
x = self.relu(x)
x = self.conv10(x)
x = self.bn10(x)
x = self.relu(x)
x = self.conv11(x)
x = self.bn11(x)
_x = _x + x
_x = self.relu(_x)
#4 (Stage 2)
x = self.conv12(_x)
x = self.bn12(x)
x = self.relu(x)
x = self.conv13(x)
x = self.bn13(x)
x = self.relu(x)
x = self.conv14(x)
x = self.bn14(x)
_x = self.conv15(_x)
_x = self.bn15(_x)
_x = _x + x
_x = self.relu(_x)
#5
x = self.conv16(_x)
x = self.bn16(x)
x = self.relu(x)
x = self.conv17(x)
x = self.bn17(x)
x = self.relu(x)
x = self.conv18(x)
x = self.bn18(x)
_x = _x + x
_x = self.relu(_x)
#6
x = self.conv19(_x)
x = self.bn19(x)
x = self.relu(x)
x = self.conv20(x)
x = self.bn20(x)
x = self.relu(x)
x = self.conv21(x)
x = self.bn21(x)
_x = _x + x
_x = self.relu(_x)
#7
x = self.conv22(_x)
x = self.bn22(x)
x = self.relu(x)
x = self.conv23(x)
x = self.bn23(x)
x = self.relu(x)
x = self.conv24(x)
x = self.bn24(x)
_x = _x + x
_x = self.relu(_x)
#8
x = self.conv25(_x)
x = self.bn25(x)
x = self.relu(x)
x = self.conv26(x)
x = self.bn26(x)
x = self.relu(x)
x = self.conv27(x)
x = self.bn27(x)
_x = self.conv28(_x)
_x = self.bn28(_x)
_x = _x + x
_x = self.relu(_x)
#9
x = self.conv29(_x)
x = self.bn29(x)
x = self.relu(x)
x = self.conv30(x)
x = self.bn30(x)
x = self.relu(x)
x = self.conv31(x)
x = self.bn31(x)
_x = _x + x
_x = self.relu(_x)
#10
x = self.conv32(_x)
x = self.bn32(x)
x = self.relu(x)
x = self.conv33(x)
x = self.bn33(x)
x = self.relu(x)
x = self.conv34(x)
x = self.bn34(x)
_x = _x + x
_x = self.relu(_x)
#11
x = self.conv35(_x)
x = self.bn35(x)
x = self.relu(x)
x = self.conv36(x)
x = self.bn36(x)
x = self.relu(x)
x = self.conv37(x)
x = self.bn37(x)
_x = _x + x
_x = self.relu(_x)
#12
x = self.conv38(_x)
x = self.bn38(x)
x = self.relu(x)
x = self.conv39(x)
x = self.bn39(x)
x = self.relu(x)
x = self.conv40(x)
x = self.bn40(x)
_x = _x + x
_x = self.relu(_x)
#13
x = self.conv41(_x)
x = self.bn41(x)
x = self.relu(x)
x = self.conv42(x)
x = self.bn42(x)
x = self.relu(x)
x = self.conv43(x)
x = self.bn43(x)
_x = _x + x
_x = self.relu(_x)
#14 (Stage 4)
x = self.conv44(_x)
x = self.bn44(x)
x = self.relu(x)
x = self.conv45(x)
x = self.bn45(x)
x = self.relu(x)
x = self.conv46(x)
x = self.bn46(x)
_x = self.conv47(_x)
_x = self.bn47(_x)
_x = _x + x
_x = self.relu(_x)
#15
x = self.conv48(_x)
x = self.bn48(x)
x = self.relu(x)
x = self.conv49(x)
x = self.bn49(x)
x = self.relu(x)
x = self.conv50(x)
x = self.bn50(x)
_x = _x + x
_x = self.relu(_x)
#16
x = self.conv51(_x)
x = self.bn51(x)
x = self.relu(x)
x = self.conv52(x)
x = self.bn52(x)
x = self.relu(x)
x = self.conv53(x)
x = self.bn53(x)
_x = _x + x
_x = self.relu(_x)
x = self.avgpool_adt(_x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet50_flat(**kwargs):
model = ResNet50(**kwargs)
return model
```
#### File: src/scripts/calc_per_layer_sparsity.py
```python
import os, sys
from os import listdir
from os.path import isfile, join
from statistics import mean
from collections import OrderedDict
from custom.checkpoint_utils_fp32 import Checkpoint
MB = 1024*1024
out_dir = '/path/to/store/output'
model_dir = '/path/to/model'
check_point_names = [f for f in listdir(model_dir) if isfile(join(model_dir, f)) and 'checkpoint' in f]
temp = []
for check_point_name in check_point_names:
if "checkpoint90.tar" in check_point_name:
temp.append(check_point_name)
check_point_names = temp
print(check_point_names)
dataset = 'imagenet'
arch = "resnet50_flat_01"
target_lyr = 1
threshold = 0.0001
depth = 20
num_classes = 100
gen_figs = True
sparse_val_maps = OrderedDict()
def calcConvSparsity(epochs, out_dir):
avg_in_by_epoch ={}
avg_out_by_epoch ={}
max_epoch = 0
for e in epochs:
lyrs_density = epochs[e]
list_in = []
list_out = []
for l in lyrs_density:
list_in.append(lyrs_density[l]['in_ch'])
list_out.append(lyrs_density[l]['out_ch'])
avg_in_by_epoch[e] = mean(list_in)
avg_out_by_epoch[e] = mean(list_out)
max_epoch = max(max_epoch, e)
print("========= input channel density ==========")
for e in epochs:
print ("{}, {}".format(e, str(avg_in_by_epoch[e])))
print("========= output channel density ==========")
for e in epochs:
print ("{}, {}".format(e, str(avg_out_by_epoch[e])))
def main():
conv_density_epochs = {}
for idx, check_point_name in enumerate(check_point_names):
print ("Processing check_point: " +os.path.join(model_dir, check_point_name))
model = Checkpoint(arch,
dataset,
os.path.join(model_dir, check_point_name),
num_classes,
depth)
if idx == 0 : model.printParams()
# Generate conv layer sparsity
sparse_bi_map, sparse_val_map, num_lyrs, conv_density, model_size, inf_cost =\
model.getConvStructSparsity(threshold, out_dir+"/out_txt")
sparse_val_maps[idx] = sparse_val_map
conv_density_epochs[model.getEpoch()] = conv_density
print ("==> Model_size: {}, inference_cost: {}".format(model_size / MB, inf_cost))
#if gen_figs:
# plotFilterSparsity(check_point_name, sparse_bi_map, threshold, out_dir, num_lyrs)
#if gen_figs:
# plotLayerSparsity(sparse_val_maps, target_lyr)
calcConvSparsity(conv_density_epochs, out_dir)
if __name__ == "__main__":
main()
``` |
{
"source": "joeysim/redash",
"score": 3
} |
#### File: redash/redash/google_oauth.py
```python
import logging
from flask.ext.login import login_user
import requests
from flask import redirect, url_for, Blueprint
from flask_oauth import OAuth
from redash import models, settings
logger = logging.getLogger('google_oauth')
oauth = OAuth()
request_token_params = {'scope': 'https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile', 'response_type': 'code'}
if settings.GOOGLE_APPS_DOMAIN:
request_token_params['hd'] = settings.GOOGLE_APPS_DOMAIN
else:
logger.warning("No Google Apps domain defined, all Google accounts allowed.")
google = oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params=request_token_params,
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
consumer_key=settings.GOOGLE_CLIENT_ID,
consumer_secret=settings.GOOGLE_CLIENT_SECRET)
blueprint = Blueprint('google_oauth', __name__)
def get_user_profile(access_token):
headers = {'Authorization': 'OAuth '+access_token}
response = requests.get('https://www.googleapis.com/oauth2/v1/userinfo', headers=headers)
if response.status_code == 401:
logger.warning("Failed getting user profile (response code 401).")
return None
return response.json()
def create_and_login_user(name, email):
try:
user_object = models.User.get(models.User.email == email)
if user_object.name != name:
logger.debug("Updating user name (%r -> %r)", user_object.name, name)
user_object.name = name
user_object.save()
except models.User.DoesNotExist:
logger.debug("Creating user object (%r)", name)
user_object = models.User.create(name=name, email=email, groups=models.User.DEFAULT_GROUPS)
login_user(user_object, remember=True)
@blueprint.route('/oauth/google', endpoint="authorize")
def login():
# TODO, suport next
callback=url_for('.callback', _external=True)
logger.debug("Callback url: %s", callback)
return google.authorize(callback=callback)
@blueprint.route('/oauth/google_callback', endpoint="callback")
@google.authorized_handler
def authorized(resp):
access_token = resp['access_token']
if access_token is None:
logger.warning("Access token missing in call back request.")
return redirect(url_for('login'))
profile = get_user_profile(access_token)
if profile is None:
return redirect(url_for('login'))
create_and_login_user(profile['name'], profile['email'])
return redirect(url_for('index'))
```
#### File: redash/tests/__init__.py
```python
import logging
from unittest import TestCase
from redash import settings
settings.DATABASE_CONFIG = {
'name': 'circle_test',
'threadlocals': True
}
from redash import models
logging.getLogger('peewee').setLevel(logging.INFO)
class BaseTestCase(TestCase):
def setUp(self):
models.create_db(True, True)
models.init_db()
def tearDown(self):
models.db.close_db(None)
models.create_db(False, True)
``` |
{
"source": "joeysnclr/pokemonText",
"score": 4
} |
#### File: joeysnclr/pokemonText/oldclasses.py
```python
class Battle(object):
def __init__(self, player, comp):
self.player = player
self.comp = comp
self.turns = 0
self.winner = None
self.loser = None
def battleMessage(self):
print('\n' * 5)
self.printBattleStatus()
print('\n' * 5)
input()
return self
def printBattleStatus(self):
print('{:45s}{:45s}'.format('You', 'Opponent'))
print('{:45s}{:45s}\n'.format(str(self.player), str(self.comp)))
return self
def printStartMessage(self):
print('Go {}!!'.format(self.player.name))
print('The Opponent is using {}'.format(self.comp.name))
print('\n' * 5)
self.printBattleStatus()
print()
return self
def checkBattleOver(self):
return self.player.stats['CurrHP'] == 0 or self.comp.stats['CurrHP'] == 0
def battleOver(self):
if self.player.stats['CurrHP'] == 0:
self.winner = self.comp
self.loser = self.player
else:
self.winner = self.player
self.loser = self.comp
print('{} beat {}!'.format(self.winner.name, self.loser.name))
print('\n' * 11)
input()
return self
def playerTurn(self):
self.player.printAttacks()
attackIndex = int(input('Enter desired attack # >> '))
print('\n' * 5)
chosenAttack = self.player.attacks[attackIndex]
self.player.attackOpponent(self.comp, chosenAttack)
return self
def compTurn(self):
computerAttack = self.comp.randomAttack()
self.comp.attackOpponent(self.player, computerAttack)
return self
def start(self):
self.printStartMessage()
turns = 0
while not self.checkBattleOver():
turnIndex = turns % 2
if turnIndex == 0:
self.playerTurn()
else:
# computers turn
self.compTurn()
turns += 1
self.battleMessage()
self.battleOver()
class TrainerBattle2(object):
def __init__(self, user, opponent):
self.user = user
self.opponent = opponent
def isBattleOver(self):
#fix this code
return self.user.nextAlivePokemon() == False or self.opponent.nextAlivePokemon() == False
def start(self):
# while both trainers have alive pokemon
#
while not self.isBattleOver():
Battle(self.user.nextAlivePokemon(), self.opponent.nextAlivePokemon()).start()
return self
``` |
{
"source": "joeysnclr/Portfolio",
"score": 2
} |
#### File: joeysnclr/Portfolio/tools.py
```python
import json
def getProjects():
with open('./static/data/projects.json') as file:
projects = json.load(file)
return projects
``` |
{
"source": "joeyspacerocks/pexpo",
"score": 3
} |
#### File: joeyspacerocks/pexpo/pexpo.py
```python
import os
import sys
import StringIO
import argparse
import json
import zipfile
from PIL import Image, ImageColor
import xml.etree.cElementTree as ET
# TODO: compose multiple layers
def compose_image(indexes, archive):
file = 'layer' + str(indexes[0]) + '.png'
return Image.open(StringIO.StringIO(archive.read(file)))
def aggregate_sprites(sprites, data):
for d in data:
for s in d['sprites']:
sprites.append(s)
if 'mask' in s: sprites.append(s['mask'])
def pack_images(filename, data):
sprites = []
aggregate_sprites(sprites, data['anims'])
aggregate_sprites(sprites, data['tiles'])
# sort sprites by height
sprites.sort(key = lambda s: s['image'].size[1], reverse = True)
# pack
dest = Image.new("RGBA", (256, 256))
mask = Image.new("1", (256, 256))
dp = dest.load()
mp = mask.load()
# masq.resize(image_width, 1);
# dest.resize(image_width, 1);
for s in sprites:
idx = s['image'].size[0]
idy = s['image'].size[1]
# assert(idx <= image_width);
found = False
for ty in range(2048):
if found: break
# if(ty + idy > dest.dat.size()) {
# masq.resize(image_width, ty + idy);
# dest.resize(image_width, ty + idy);
# }
for tx in range(dest.size[0] - idx):
if found: break
valid = not(mp[tx, ty] or mp[tx, ty + idy - 1] or mp[tx + idx - 1, ty] or mp[tx + idx - 1, ty + idy - 1])
if valid:
for ity in range(idy):
if not valid: break
for itx in range(idx):
if not valid: break
if mp[tx + itx, ty + ity]:
valid = False
if valid:
dest.paste(s['image'], (tx, ty))
mask.paste(int(True), (tx, ty, tx + idx, ty + idy))
s["x"] = tx
s["y"] = ty
s["w"] = idx
s["h"] = idy
found = True
# write image
dest.save(filename, 'png')
def write_meta(filename, imagefile, data):
root = ET.Element("spritesheet", image=imagefile)
aroot = ET.SubElement(root, "anims")
for a in data['anims']:
anode = ET.SubElement(aroot, "a", name=a['name'])
for s in a["sprites"]:
ET.SubElement(anode, "s", x=str(s['x']), y=str(s['y']), w=str(s['w']), h=str(s['h']), d=str(s['duration']))
if 'mask' in s:
mnode = ET.SubElement(snode, "mask")
ET.SubElement(mnode, "s", x=str(s['x']), y=str(s['y']), w=str(s['w']), h=str(s['h']))
sroot = ET.SubElement(root, "sprites")
for t in data['tiles']:
snode = ET.SubElement(sroot, "sprite", name=t['name'])
for s in t["sprites"]:
mnode = ET.SubElement(snode, "s", x=str(s['x']), y=str(s['y']), w=str(s['w']), h=str(s['h']))
if 'mask' in s:
mask = s['mask']
mnode.set('mx', str(mask['x']))
mnode.set('my', str(mask['y']))
tree = ET.ElementTree(root)
tree.write(filename)
def grab_tiles(data, duration, img, mask, base, count, tw, th):
img_w = img.size[0]
tpr = img_w / tw
x = (base % tpr) * tw
y = (base / tpr) * th
sprites = []
data['sprites'] = sprites
for i in range(count):
box = (x, y, x + tw, y + th)
sprite = {}
sprites.append(sprite)
sprite['image'] = img.crop(box)
if mask is not None:
sprite['mask'] = { 'image': mask.crop(box) }
sprite['duration'] = duration
x += tw
if x >= img_w:
x = 0
y += th
def generate_tileset(path, file, outpng):
archive = zipfile.ZipFile(os.path.join(path, file), 'r')
src = json.loads(archive.read('docData.json'))
tileset = src['tileset']
tw = tileset['tileWidth']
th = tileset['tileHeight']
per_row = tileset['tilesWide']
tile_count = tileset['numTiles']
iw = per_row * tw
ih = (tile_count / per_row) * th
dest = Image.new("RGBA", (iw, ih))
tx = 0
ty = 0
for i in range(tile_count):
tile_img = Image.open(StringIO.StringIO(archive.read("tile%d.png" % i)))
dest.paste(tile_img, (tx * tw, ty * th))
tx += 1
if tx >= per_row:
tx = 0
ty += 1
dest.save(outpng, 'png')
def compile_sprite_data(data, path, file):
archive = zipfile.ZipFile(os.path.join(path, file), 'r')
src = json.loads(archive.read('docData.json'))
canvas = src['canvas']
anims = src['animations']
w = canvas['width']
h = canvas['height']
tw = canvas['tileWidth']
th = canvas['tileHeight']
if tw == 0 or tw > w: tw = w
if th == 0 or th > h: th = h
# compose all visible layers, except for the magic 'mask' layer
layers = []
masks = []
for i, layer in canvas['layers'].items():
if not layer['hidden']:
if layer['name'] == 'mask':
masks.append(i)
else:
layers.append(i)
img = compose_image(layers, archive)
if len(masks) > 0:
mask = compose_image(masks, archive)
else:
mask = None
name = os.path.splitext(file)[0]
if len(anims) > 0:
print ' - ' + name + ' - export animations (' + str(len(anims)) + ')'
for ai in anims.keys():
anim = anims[ai]
base = anim['baseTile']
length = anim['length']
duration = anim['frameDuration']
out = {}
out['name'] = name + '-' + anim['name']
grab_tiles(out, duration, img, mask, base, length, tw, th)
data['anims'].append(out)
else:
print ' - ' + name + ' - export tilemap'
out = { 'name': name }
grab_tiles(out, 0, img, mask, 0, (w / tw) * (h / th), tw, th)
data['tiles'].append(out)
return data
def main(script, argv):
parser = argparse.ArgumentParser(description='Export PNGs and meta-data from PyxelEdit files.')
parser.add_argument('path', help='path to pyxel files (directory or single file)', metavar='<path>')
parser.add_argument('-t', '--tileset', help='generate tileset instead of spritesheet', action='store_true', dest='tileset')
parser.add_argument('-o', '--out', help='filename of assembled PNG', required=True, metavar='<file>', dest='outpng')
args = parser.parse_args()
path = args.path
if args.tileset:
generate_tileset(os.path.dirname(path), os.path.basename(path), args.outpng)
else:
data = { 'anims':[], 'tiles':[] }
if os.path.isfile(path):
compile_sprite_data(data, os.path.dirname(path), os.path.basename(path))
else:
for i in os.listdir(path):
if i.endswith(".pyxel"):
compile_sprite_data(data, path, i)
pack_images(args.outpng, data)
write_meta(os.path.splitext(args.outpng)[0] + '.xml', os.path.basename(args.outpng), data)
if __name__ == '__main__':
sys.exit(main(sys.argv[0], sys.argv[1:]))
``` |
{
"source": "joeyspacerocks/pm",
"score": 2
} |
#### File: pm/teenypm/teenypm.py
```python
from sys import argv
import os
import os.path
import sqlite3
import time
from datetime import datetime, timedelta
from pprint import pprint
import math
import re
import humanize
import argparse
import importlib.util
from collections.abc import MutableMapping
import uuid
from rich import box
from rich.console import Console
from rich.table import Table, Column
from rich.style import Style
from rich.theme import Theme
__version__ = '0.1.8'
DEFAULT_EDITOR = 'vi +<line>'
active_plugins = []
class Entry:
def __init__(self, id, state, msg, points, remote_id, tags, history, deadline):
self.id = id
self.state = state
self.open = state != 'done'
self.msg = msg
self.points = points
self.remote_id = remote_id
self.tags = tags
self.history = history
self.deadline = deadline
for e in history:
if e.event == 'create':
self.created = e.date
elif e.event == 'done':
self.done = e.date
def summary(self):
parts = list(filter(lambda line: line != '', self.msg.split('\n')))
if len(parts) > 1:
return '{} [bold white on blue][[+]]'.format(parts[0])
elif len(parts) > 0:
return parts[0]
else:
return '<empty description>' + Style.RESET_ALL
def displayid(self):
if self.remote_id:
return '[id.local]{:>4}[/] [id.remote]..{:0>2}[/]'.format(str(self.id), self.remote_id)
else:
return '[id.local]{:>4}[/]'.format(str(self.id))
class Event:
def __init__(self, entry, event, date):
self.entry = entry
self.event = event
self.date = date
class Config(MutableMapping):
def __init__(self, db):
self.storage = dict()
self.db = db
c = db.cursor()
for row in c.execute('SELECT key, value FROM config'):
self[row['key']] = row['value']
def __getitem__(self, key):
return self.storage[key]
def __setitem__(self, key, item):
self.storage[key] = item
c = self.db.cursor()
c.execute('INSERT INTO config(key, value) VALUES(?, ? ) ON CONFLICT(key) DO UPDATE SET value=?', (key, item, item))
self.db.commit()
def __delitem__(self, key):
del self.storage[key]
c = self.db.cursor()
c.execute('DELETE FROM config WHERE key = ?', (key,))
self.db.commit()
def __iter__(self):
return iter(self.storage)
def __len__(self):
return len(self.storage)
class TeenyPM():
def __init__(self, config):
self.config = config
def fetch_entries(self, tags, id):
return active_plugins[0].fetch_issues(self.config, tags, id)
def add_entry(self, tags, msg, points):
e = Entry(None, 'backlog', msg, points, None, tags, [], None)
for p in reversed(active_plugins):
p.add_entry(self.config, e)
return e
def edit_entry(self, issue, msg):
for p in reversed(active_plugins):
id = p.update_entry(self.config, issue, msg)
def feature_tag(self, tag):
for p in reversed(active_plugins):
id = p.add_feature(self.config, tag)
def unfeature_tag(self, tag):
for p in reversed(active_plugins):
id = p.remove_feature(self.config, tag)
def start_entry(self, issue, deadline = None):
for p in reversed(active_plugins):
p.start_entry(self.config, issue, deadline)
def end_entry(self, issue):
for p in reversed(active_plugins):
p.end_entry(self.config, issue)
def backlog_entry(self, issue):
for p in reversed(active_plugins):
p.backlog_entry(self.config, issue)
def tag_entry(self, issue, tag):
for p in reversed(active_plugins):
p.tag_entry(self.config, issue, tag)
def untag_entry(self, issue, tag):
for p in reversed(active_plugins):
p.untag_entry(self.config, issue, tag)
def remove_entry(self, issue):
for p in reversed(active_plugins):
p.remove_entry(self.config, issue)
def init_db():
filename = 'pm.db'
if not os.path.isfile(filename):
print('No teenypm database found - creating new one: ' + filename)
db = sqlite3.connect(filename, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
db.row_factory = sqlite3.Row
c = db.cursor()
schema_version = c.execute('PRAGMA user_version').fetchone()[0]
if schema_version == 0:
c.execute('CREATE TABLE IF NOT EXISTS entry (msg TEXT, points INT, state TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS tag (tag TEXT, entry INT)')
c.execute('CREATE TABLE IF NOT EXISTS history (entry INT, event TEXT, date INT)')
c.execute('CREATE TABLE IF NOT EXISTS feature (tag TEXT)')
c.execute('CREATE TABLE IF NOT EXISTS deadline (entry INT, date INT)')
c.execute('CREATE TABLE IF NOT EXISTS config (key TEXT PRIMARY KEY, value TEXT)')
c.execute('PRAGMA user_version = 1')
schema_version += 1
if schema_version == 1:
c.execute('ALTER TABLE entry ADD COLUMN remote_id TEXT')
c.execute('PRAGMA user_version = 2')
schema_version += 1
if schema_version == 2:
c.execute('INSERT INTO config (key, value) VALUES(?, ?)', ('project.id', str(uuid.uuid4())))
c.execute('PRAGMA user_version = 3')
schema_version += 1
db.commit()
return db
def display_date(date, full_date):
if full_date:
return date.strftime('%Y-%m-%d %H:%M')
else:
now = datetime.now()
return humanize.naturaltime(now - date)
def show_entries(tpm, console, args):
tags = args.tags or []
if tags and ((tags.startswith('PM') and tags[2:].isdigit()) or tags.isdigit()):
show_full_entry(console, tpm.fetch_entries((), tags)[0])
else:
show_entries_internal(tpm, console, tags, args.all, args.dates)
def doing_entries(tpm, console, args):
show_entries_internal(tpm, console, [], False, args.dates, True)
def show_entries_internal(tpm, console, tags, all, full_dates, started = False):
total = 0
open = 0
entries = tpm.fetch_entries(tags, None)
features = active_plugins[0].fetch_features(tpm.config)
buckets = {}
for e in entries:
total += 1
if not all and not e.open:
continue
if started and not e.state == 'doing':
continue
if e.open:
open += 1
bt = 'misc'
for t in list(e.tags):
if t in features:
e.tags.remove(t)
bt = t
break
if bt in buckets:
buckets[bt].append(e)
else:
buckets[bt] = [e]
now = datetime.now().strftime('%Y-%m-%d %H:%M')
console.print('[white][bold]{}[/bold]/{}[/white] issues [dim]| {} | teenypm v{}'.format(open, total, now, __version__), highlight=False)
table = Table(
"id",
"tags",
Column("msg", style = "msg"),
Column("dates", justify = 'right'),
"points",
show_header = False,
show_edge = False,
box = box.SIMPLE,
padding = [0, 0, 0, 1]
)
for b in buckets:
bstyle = 'bucket.done'
for e in buckets[b]:
if e.open:
bstyle = 'bucket.open'
break
table.add_row('{} ({})'.format(b, len(buckets[b])), None, None, None, None, style = bstyle)
for e in buckets[b]:
row_style = None
if all and not e.open:
row_style = Style(dim = True)
dates = 'closed {}'.format(display_date(e.done, full_dates))
elif e.state == 'doing':
row_style = 'state.doing'
now = datetime.now()
if e.deadline:
if now > e.deadline:
dates = '[date.overdue]due {}'.format(display_date(e.deadline, full_dates))
else:
dates = '[date.soon]{}'.format(display_date(e.deadline, full_dates))
else:
dates = '[date.created]{}'.format('{}'.format(display_date(e.created, full_dates)))
else:
dates = '[date.created]{}'.format('{}'.format(display_date(e.created, full_dates)))
tags = ['[tag.default]{}[/]'.format(t) if t != 'bug' or e.deadline else '[tag.bug]bug[/]' for t in sorted(e.tags)]
display_tags = ','.join(tags)
msg = e.summary()
if e.points > 1:
points = '[points]{}[/]'.format(str(points))
else:
points = ''
table.add_row(e.displayid(), display_tags, e.summary(), dates, points, style = row_style)
console.print(table)
def show_full_entry(console, e):
tags = ['[tag.default]{}[/]'.format(t) if t != 'bug' or e.deadline else '[tag.bug]bug[/ ]' for t in sorted(e.tags)]
display_tags = ','.join(tags)
dates = e.created.strftime('%Y-%m-%d %H:%M')
if not e.open:
dates += ' -> ' + e.done.strftime('%Y-%m-%d %H:%M')
console.print(('{} | {} | [date.created]{}[/] | [points]{}').format(e.displayid(), display_tags, dates, e.points))
console.print('[msg]' + e.msg)
def show_tags(tpm, console, args):
c = tpm.db.cursor()
for row in c.execute('SELECT tag, COUNT(*) as count FROM tag GROUP BY tag ORDER BY tag'):
console.print('[tag.default]{}[/] - [msg]{}[/]'.format(row['tag'], row['count']))
def add_entry(tpm, console, args):
msg = args.desc
if args.edit:
content = from_editor(msg, 0)
if content != None:
msg = ''.join(content)
tags = args.tag.split(',') if args.tag else []
e = tpm.add_entry(tags, msg, args.points)
console.print('Added {}: [msg]{}'.format(e.displayid(), e.summary()))
def edit_entry(tpm, console, args):
content = from_editor(args.issue.msg, 0)
if content != None:
msg = ''.join(content)
tpm.edit_entry(args.issue, msg)
console.print('Modified {}: [msg]{}'.format(args.issue.displayid(), args.issue.summary()))
def feature_tag(tpm, console, args):
tag = args.tag
if args.remove:
tpm.unfeature_tag(tag)
console.print('Tag [tag]{}[/] is no longer a feature'.format(tag))
else:
tpm.feature_tag(tag)
console.print('Tag [tag]{}[/] is now a feature'.format(tag))
def start_entry(tpm, console, args):
id = args.id
tf = None
if args.timeframe:
import dateparser # bad style, but dateparser very slow to import
now = datetime.now()
tf_str = args.timeframe
tf = dateparser.parse(tf_str, settings={'RELATIVE_BASE': now}).replace(hour=23, minute=59, second=0)
if tf < now:
console.print("[error]ERROR: time flows inexorably forwards.\nPromising to complete an issue in the past will bring you nothing but despair.")
quit()
tpm.start_entry(args.issue, tf)
console.print('Started {}'.format(args.issue.displayid()))
if tf:
console.print('Your deadline is midnight [date.soon]{}'.format(tf.strftime('%Y-%m-%d')))
def backlog_entry(tpm, console, args):
tpm.backlog_entry(args.issue)
console.print('Moved {} to backlog'.format(args.issue.displayid()))
def end_entry(tpm, console, args):
tpm.end_entry(args.issue)
console.print('Ended {}'.format(args.issue.displayid()))
def end_entry_and_commit(tpm, console, args):
end_entry(tpm, console, args)
os.system('git commit -a -m "{}"'.format('PM{:04} - {}'.format(args.issue.id, args.issue.msg)))
os.system('git lg -n 1')
def tag_entry(tpm, console, args):
tag = args.tag
id = args.id
issue = args.issue
if args.remove:
if tag in issue.tags:
tpm.untag_entry(issue, tag)
console.print('Untagged {} with [tag.default]{}'.format(issue.displayid(), tag))
else:
console.print('{} wasn\'t tagged with [tag.default]{}'.format(issue.displayid(), tag))
else:
if tag not in issue.tags:
tpm.tag_entry(issue, tag)
console.print('Tagged {} with [tag.default]{}'.format(issue.displayid(), tag))
else:
console.print('{} already tagged with [tag.default]{}'.format(issue.displayid(), tag))
def remove_entry(tpm, console, args):
tpm.remove_entry(args.issue)
console.print('Deleted {}'.format(args.issue.displayid()))
def from_editor(start_text, start_line):
tmp_file = '_pm_.txt'
if start_text:
f = open(tmp_file, "w")
f.write(start_text)
f.close()
ed_cmd = os.getenv('EDITOR', DEFAULT_EDITOR).replace('<line>', str(start_line))
if '<file>' in ed_cmd:
ed_cmd = ed_cmd.replace('<file>', tmp_file)
else:
ed_cmd += ' ' + tmp_file
os.system(ed_cmd)
if not os.path.isfile(tmp_file):
return []
with open(tmp_file) as f:
content = [line for line in list(f) if not line.startswith('#')]
if len(content)>0:
content[-1] = content[-1].rstrip('\n')
os.remove(tmp_file)
return content
def make_a_plan(tpm, console, args):
tag = args.tag
help_text = '# One line for each issue, with optional tags and points.\n# <desc> [[<tag>,...]] [points]\n# For example:\n# Sort out the thing there [bug] 2\n\n'
content = from_editor(help_text, help_text.count('\n') + 1)
for line in content:
line = line.strip()
m = re.match(r"^(?P<msg>.+?)\s*(\[(?P<tags>[^\]]+)\])?\s*(?P<points>\d+)?$", line)
if m:
task = m.groupdict()
if task['tags']:
tags = task['tags'].split(',')
else:
tags = []
tags.append('task')
if tag:
tags.append(tag)
if task['points']:
points = task['points']
else:
points = 1
e = tpm.add_entry(tags, task['msg'], points)
console.print('Added {}: [msg]{}'.format(e.displayid(), msg))
def sync(config, force):
now = int(time.time())
if not force:
last_sync = int(config.get('last.sync', 0))
if now - last_sync < 60 * 60:
return
config['last.sync'] = now
if len(active_plugins) == 1:
return
p1 = active_plugins[0]
p2 = active_plugins[1]
local_lookup = {}
local_issues = p1.fetch_issues(config)
remote_issues = p2.fetch_issues(config)
for issue in local_issues:
if issue.remote_id:
local_lookup[issue.remote_id] = issue
elif issue.msg != '':
p2.add_entry(config, issue)
print('Local issue pushed: {} - {}'.format(issue.displayid(), issue.summary()))
for issue in remote_issues:
if issue.remote_id not in local_lookup:
p1.add_entry(config, issue)
print('GitHub issue pulled: GH #{} - {}'.format(issue.remote_id, issue.summary()))
def map_id(id):
if id.startswith('PM'):
return id[2:]
return id
def remote_plugin(tpm, console, args):
plugin = import_plugin(args.plugin)
if not plugin:
console.print('[error]ERROR: plugin [white bold]{}[/] not found - available plugins are:'.format(args.plugin))
for ap in available_plugins():
console.print(' - [white]' + ap)
exit(0)
config = tpm.config
plugin_cp = 'plugin.' + args.plugin
plugin_enabled = plugin_cp in config
if args.remove:
if not plugin_enabled:
console.print('Remote [remote]{}[/] has not been setup'.format(args.plugin))
else:
plugin.remove(config)
del config[plugin_cp]
activate_plugins.remove(args.plugin)
console.print('Removed [remote]{}[/] remote'.format(args.plugin))
else:
if plugin_enabled:
console.print('Remote [remote]{}[/] is already configured'.format(args.plugin))
else:
if plugin.setup(config):
config[plugin_cp] = 'true'
activate_plugins.append(args.plugin)
console.print('Remote [remote]{}[/] has been set up .. syncing issues ..'.format(args.plugin))
sync(config, True)
def available_plugins():
plugins_dir = os.path.join(os.path.dirname(__file__), 'plugins')
plugins = {}
for f in [f for f in os.listdir(plugins_dir) if f.endswith('.py')]:
plugins[f.split('.')[0]] = os.path.join(plugins_dir, f)
return plugins
def import_plugin(p):
plugins = available_plugins()
if p not in plugins:
return None
spec = importlib.util.spec_from_file_location("plugins." + p, plugins[p])
plugin = importlib.util.module_from_spec(spec)
spec.loader.exec_module(plugin)
return plugin
def activate_plugins(config):
active_plugins.append(import_plugin('local'))
for key in config.keys():
if key.startswith('plugin.'):
active_plugins.append(import_plugin(key.split('.')[1]))
def main():
db = init_db()
config = Config(db)
tpm = TeenyPM(config)
activate_plugins(tpm.config)
parser = argparse.ArgumentParser(description="teenypm - a teeny, tiny CLI project manager | v" + __version__)
parser.add_argument('-a', '--all', help='Show all issues, even closed', action="store_true")
parser.add_argument('-d', '--dates', help='Show full dates', action="store_true")
parser.add_argument('-s', '--force-sync', help='Force a sync with remote store', action="store_true")
subparsers = parser.add_subparsers(title='subcommands', metavar="<command>", help='sub-command help')
p_show = subparsers.add_parser('show', help='show issues')
p_show.add_argument('tags', nargs="?", type=str, help='Filter by comma-seperated tags')
p_show.add_argument('-a', '--all', help='Show all issues, even closed', action="store_true")
p_show.add_argument('-d', '--dates', help='Show full dates', action="store_true")
p_show.set_defaults(func=show_entries)
p_show = subparsers.add_parser('doing', help='show issues in progress')
p_show.add_argument('-d', '--dates', help='Show full dates', action="store_true")
p_show.set_defaults(func=doing_entries)
p_add = subparsers.add_parser('add', help='add an issue')
p_add.add_argument('desc', type=str, help='issue description')
p_add.add_argument('points', type=int, nargs='?', default=1, help='effort points (defaults to 1)')
p_add.add_argument('-t', '--tag', type=str, help='comma-seperated tags')
p_add.add_argument('-e', '--edit', help='Effort points (defaults to 1)', action="store_true")
p_add.set_defaults(func=add_entry)
p_edit = subparsers.add_parser('edit', help='edit an issue description')
p_edit.add_argument('id', type=str, help='issue id')
p_edit.set_defaults(func=edit_entry)
p_remove = subparsers.add_parser('rm', help='remove an issue')
p_remove.add_argument('id', type=str, help='issue id')
p_remove.set_defaults(func=remove_entry)
p_plan = subparsers.add_parser('plan', help='make a plan')
p_plan.add_argument('tag', type=str, nargs='?', help='tag to add to all issues')
p_plan.set_defaults(func=make_a_plan)
p_start = subparsers.add_parser('start', help='mark an issue as started')
p_start.add_argument('id', type=str, help='issue id')
p_start.add_argument('timeframe', type=str, nargs='?', help='promised timeframe')
p_start.set_defaults(func=start_entry)
p_backlog = subparsers.add_parser('backlog', help='return an issue to the backlog')
p_backlog.add_argument('id', type=str, help='issue id')
p_backlog.set_defaults(func=backlog_entry)
p_end = subparsers.add_parser('end', help='mark an issue as ended')
p_end.add_argument('id', type=str, help='issue id')
p_end.set_defaults(func=end_entry)
# tag management
p_tags = subparsers.add_parser('tags', help='list tags')
p_tags.set_defaults(func=show_tags)
p_tag = subparsers.add_parser('tag', help='tag an issue')
p_tag.add_argument('tag', type=str, help='tag')
p_tag.add_argument('id', type=str, help='issue id')
p_tag.add_argument('-r', '--remove', help='remove tag from issue', action='store_true')
p_tag.set_defaults(func=tag_entry)
p_feature = subparsers.add_parser('feature', help='flag a tag as a feature')
p_feature.add_argument('tag', type=str, help='tag to feature')
p_feature.add_argument('-r', '--remove', help='remove feature flag from tag', action='store_true')
p_feature.set_defaults(func=feature_tag)
p_commit = subparsers.add_parser('commit', help='mark an issue as ended and git commit changes')
p_commit.add_argument('id', type=str, help='issue id')
p_commit.set_defaults(func=end_entry_and_commit)
p_remote = subparsers.add_parser('remote', help='integrate a remote API')
p_remote.add_argument('plugin', type=str, help='"supported: github"')
p_remote.add_argument('-r', '--remove', help='remove remote', action='store_true')
p_remote.set_defaults(func=remote_plugin)
args = parser.parse_args()
console = Console(theme = Theme({
"id.local": "yellow",
"id.remote": "dim white",
"tag.default": "cyan",
"tag.bug": "bold red",
"date.overdue": "bold white on red",
"date.soon": "bold yellow",
"date.created": "dim",
"state.doing": "bold",
"bucket.done": "dim white",
"bucket.open": "bold white",
"points": "cyan",
"msg" : "white",
"error": "red",
"remote": "bold white"
}))
if hasattr(args, 'id'):
args.id = map_id(args.id)
entries = tpm.fetch_entries([], args.id)
if len(entries) == 0:
console.print('[id.local]{:>4}[/] doesn\'t exist'.format(args.id))
exit(0)
args.issue = entries[0]
sync(config, args.force_sync)
if not hasattr(args, 'func'):
show_entries_internal(tpm, console, [], args.all, args.dates)
else:
args.func(tpm, console, args)
db.close()
if __name__ == '__main__':
main()
``` |
{
"source": "joeyster/Travel-Planner",
"score": 3
} |
#### File: Travel-Planner/contents/best_first.py
```python
from address import Address
from google_maps_utility import GoogleMapsUtility
import heapq
import copy
class Best_First():
def __init__(self, start, hit_list, heuristic):
self.hit_list = copy.deepcopy(hit_list)
self.start = start
self.best_distance = 0
self.best_time = 0
self.route = ""
self.total_count = len(hit_list)+1
self.heuristic = heuristic
self.open = []
heapq.heapify(self.open)
self.closed = []
heapq.heapify(self.closed)
self.algorithm(Address(self.start, 0, 0, "", self.hit_list))
def __str__(self):
if self.heuristic == "distance":
return f"~\n~~~\n~~~~~\n\t\tbest route: {self.route}\n\t\tbest distance: {self.meters_to_miles()} miles\n~~~~~\n~~~\n~"
elif self.heuristic == "time":
return f"~\n~~~\n~~~~~\n\t\tbest route: {self.route}\n\t\tbest time: {self.seconds_to_time()}\n~~~~~\n~~~\n~"
def algorithm(self, next_state):
root = next_state
self.hit_list = root.hit_list
if len(root.hit_list) == 0:
#saving data
foo = root
for point in range(self.total_count):
self.route = foo.address + " -> " + self.route
self.best_distance = self.best_distance + foo.distance
self.best_time = self.best_time + foo.time
foo = foo.parent
self.route = self.route[:-4]
return ''
for point in range(len(root.hit_list)):
point_A = root.address
point_B = root.hit_list[point]
connection = GoogleMapsUtility()
distance, time = connection.directionsRequest(point_A, point_B)
interior_list = copy.deepcopy(root.hit_list)
interior_list.remove(point_B)
if self.heuristic == "distance":
heapq.heappush(self.open, (distance, Address(point_B, distance, time, root, interior_list)))
elif self.heuristic == "time":
heapq.heappush(self.open, (time, Address(point_B, distance, time, root, interior_list)))
#add children to root. key = address, value = Address
root.children[point_B] = Address(point_B, distance, time, root, interior_list)
#pop min off min-heap
popped = heapq.heappop(self.open) #(miles/time, Address)
heapq.heappush(self.closed, popped)
addr = popped[1]
#remove explored from list
self.hit_list = addr.hit_list
self.algorithm(addr)
def meters_to_miles(self):
return round((self.best_distance/1000) * 0.62137)
def seconds_to_time(self):
# https://www.w3resource.com/python-exercises/python-basic-exercise-65.php
time = self.best_time
day = time // (24 * 3600)
time = time % (24 * 3600)
hour = time // 3600
time %= 3600
minutes = time // 60
return f"{day} days {hour} hours {minutes} minutes"
``` |
{
"source": "joeystevens00/mormo",
"score": 2
} |
#### File: mormo/mormo/cli.py
```python
import json
from multiprocessing import Process
import sys
import tempfile
import time
import click
import requests
import uvicorn
from mormo.api import app
from mormo.convert import OpenAPIToPostman
from mormo.postman_test import run_newman
@click.group()
def cli():
"""OpenAPI to Postman Collection V2 Conversion."""
pass
@cli.command()
def api():
uvicorn.run(app, host="127.0.0.1", port=8001, log_level="info")
def generate_schema(infile, outfile, test_file, **kwargs):
oas = OpenAPIToPostman(path=infile, test_data_file=test_file, **kwargs)
postman_collection = oas.to_postman_collection_v2()
with open(outfile, 'w') as f:
json.dump(postman_collection.to_dict(), f)
@cli.command()
@click.option('-i', '--in', 'in_file', type=click.Path(),
help='Path to the OpenAPI Schema to convert (YAML or JSON)', required=True)
@click.option('-t', '--test_file', 'test_file', type=click.Path(),
help='Path to test config (YAML or JSON)')
@click.option('-o', '--out', 'out_file', type=click.Path(),
help='The path to write the Postman Collection to.')
@click.option('--test', is_flag=True,
help='Execute the generated schema with newman.')
@click.option('--test_mormo_api', is_flag=True,
help='Spin up a mormo API with the binds in host.')
@click.option('--host', 'host', help='Target API')
@click.option('--verbose', is_flag=True,
help='Verbose option in newman.')
def run(in_file, test_file, out_file, test, test_mormo_api, host, verbose):
"""Generate Postman Collections."""
if not out_file:
temp = tempfile.NamedTemporaryFile()
out_file = temp.name
if test_mormo_api:
test = True
addr, port = host.split('/')[-1].split(':')
port = port or 80
proc = Process(
target=uvicorn.run,
args=(app,),
kwargs={
"host": addr,
"port": int(port),
"log_level": "info",
},
daemon=True,
)
proc.start()
time.sleep(1)
with open(in_file, 'w') as f:
json.dump(requests.get(f'{host}/openapi.json').json(), f)
generate_schema(in_file, out_file, test_file, host=host, verbose=verbose)
if test:
res = run_newman(out_file, host=host, verbose=verbose)
if test_mormo_api:
proc.terminate()
sys.exit(res.code)
@cli.command()
@click.option(
'-c', '--config', 'test_config', type=click.Path(),
help='path to test config (YAML or JSON)',
)
@click.option(
'--test_mormo_api', is_flag=True,
help='spin up a mormo API with the binds in target',
)
@click.option(
'-t', '--target', 'target',
help='Target API with path to schema (e.g. http://localhost:8000/openapi.json)',
)
@click.option(
'-v', '--verbose', is_flag=True, help='verbose option in newman.',
)
def test(test_config, target, test_mormo_api, verbose):
"""Run Mormo Tests."""
temp_out = tempfile.NamedTemporaryFile(suffix='.json')
temp_in = tempfile.NamedTemporaryFile(suffix='.json')
out_file = temp_out.name
in_file = temp_in.name
host = target.split('/')[2:][0]
if test_mormo_api:
test = True
addr, port = host.split(':')
port = port or 80
proc = Process(
target=uvicorn.run,
args=(app,),
kwargs={
"host": addr,
"port": int(port),
"log_level": "info",
},
daemon=True,
)
proc.start()
time.sleep(1)
with open(in_file, 'w') as f:
json.dump(requests.get(target).json(), f)
generate_schema(in_file, out_file, test_config, host=host, verbose=verbose)
res = run_newman(out_file, host=host, verbose=verbose)
if test_mormo_api:
proc.terminate()
sys.exit(res.code)
if __name__ == "__main__":
cli()
```
#### File: mormo/mormo/__init__.py
```python
import logging
import redis
from pydantic import BaseSettings
FAKE_REDIS_SERVER = None
class Settings(BaseSettings):
redis_host: str = '127.0.0.1'
redis_port: int = 6379
testing: bool = False
log_level: str = 'WARNING'
test_data_str_min_length: int = 1
test_data_int_min: int = 1
class Config:
env_file = '.env'
fields = {
'redis_dsn': {
'env': 'redis_url',
},
'testing': {
'env': 'testing'
}
}
def redis_handle():
settings = Settings().dict()
if settings['testing']:
import fakeredis
global FAKE_REDIS_SERVER
if not FAKE_REDIS_SERVER:
FAKE_REDIS_SERVER = fakeredis.FakeServer()
r = fakeredis.FakeRedis(server=FAKE_REDIS_SERVER)
else:
r = redis.Redis(
host=settings['redis_host'],
port=settings['redis_port'],
)
return r
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(Settings().log_level)
from . import cli # noqa: E402, F401
```
#### File: mormo/schema/postman_collection_v2.py
```python
import enum
import tempfile
from typing import List, Optional, Union, Sequence
from ..model import BaseModel
VERSION = "2.1.0"
EventListen = enum.Enum(
'listen',
[('test', "test"), ('prerequest', "prerequest")],
)
Mode = enum.Enum('mode', [
('raw', 'raw'),
('urlencoded', 'urlencoded'),
('formdata', 'formdata'),
('file', 'file'),
('graphql', 'graphql'),
])
class Url(BaseModel):
path: Sequence[str]
host: Sequence[str]
query: list
variable: list
class OriginalRequest(BaseModel):
method: str
url: Url
body: dict
class Header(BaseModel):
key: str
value: str
class Parameter(BaseModel):
key: str
value: str
disabled: bool = False
description: Optional[Union[str, dict]]
class Response(BaseModel):
id: str
name: str
originalRequest: OriginalRequest
status: str
code: int
header: Sequence[Header]
body: str
cookie: list
_postman_previewlanguage: Optional[str]
class Auth(BaseModel):
type: str
class QueryParam(Parameter):
pass
class RequestBody(BaseModel):
mode: Mode
raw: str
urlencoded: Optional[list]
formdata: Optional[list]
file: Optional[dict]
graphql: Optional[dict]
diabled: Optional[bool] = False
class Request(BaseModel):
name: str
description: dict
method: str
url: Url
auth: Auth
header: list
body: Optional[RequestBody]
class Script(BaseModel):
id: Optional[str]
type: Optional[str]
exec: Union[str, list]
src: Optional[Union[str, Url]]
name: Optional[str]
def __add__(self, x):
n = self.copy(deep=True)
if isinstance(n.exec, str):
n.exec = [n.exec]
if isinstance(x.exec, str):
x.exec = [x.exec]
n.exec.extend(x.exec)
return n
class Event(BaseModel):
id: Optional[str]
listen: str
disabled: Optional[bool]
script: Script
class Item(BaseModel):
id: str
name: str
request: Request
response: Sequence[Response]
event: List[Event]
class Variable(BaseModel):
id: str
type: str
value: Optional[str]
class Folder(BaseModel):
id: str
name: str
item: Union[Sequence[Item], Item]
event: list
variable: Optional[Sequence[Variable]]
class Description(BaseModel):
content: str
type: str
class Info(BaseModel):
name: str
postman_id: str
schema_: str
description: Description
class Config:
fields = {'postman_id': '_postman_id', 'schema_': 'schema'}
class Collection(BaseModel):
item: Sequence[Union[Item, Folder]]
event: Optional[list]
variable: Optional[Sequence[Variable]]
info: Info
def run(self, **kwargs):
from ..postman_test import run_newman
t = tempfile.NamedTemporaryFile()
self.to_file(t.name)
return run_newman(t.name, **kwargs)
```
#### File: mormo/scripts/api_client.py
```python
import argparse
import requests
import json
import os
import yaml
import sys
def is_local_file_path(s):
"""Does not consider paths above cwd to be valid."""
if (
isinstance(s, str)
and s.startswith('./')
and os.path.exists(s)
and os.path.isfile(s)
and os.path.abspath(s).startswith(os.getcwd())
):
return True
def load_file(f, content_type=None):
if f.endswith('.yaml') or f.endswith('.yml') or content_type == 'yaml':
load_f = yaml.safe_load
elif f.endswith('.json') or content_type == 'json':
load_f = json.load
else:
raise ValueError(f"Unknown file type: {f}")
with open(f, 'r') as fp:
return load_f(fp)
def resolve_local_file_refs(test_config):
for path, td_item in test_config.items():
variables = td_item.get('variables')
if isinstance(variables, str) and is_local_file_path(variables):
test_config[path]['variables'] = load_file(variables)
elif isinstance(variables, dict):
for k, v in (td_item.get('variables') or {}).items():
if is_local_file_path(v):
test_config[path]['variables'] = load_file(v)
return test_config
def main():
parser = argparse.ArgumentParser(description='HTTP Service Build Server')
parser.add_argument('--target', required=True, help='URL to OpenAPI schema on the host that will be tested')
parser.add_argument('--test_config', help='Path to test config to use')
parser.add_argument('--mormo_api', help='Host of mormo api to use')
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
req = {
'target': args.target,
'verbose': args.verbose,
}
if args.test_config:
with open(args.test_config, 'r') as f:
d = f.read()
req['test_config'] = resolve_local_file_refs(yaml.safe_load(d))
endpoint = f"{args.mormo_api}/run/test/from_schema"
res = requests.post(endpoint, data=json.dumps(req))
result = res.json()
print(result['result']['stdout'])
print(result['result']['stderr'])
if __name__ == "__main__":
main()
```
#### File: mormo/tests/conftest.py
```python
import random
import os
from types import GeneratorType
from pathlib import Path
import pytest
from mormo import redis_handle
from mormo.convert import OpenAPIToPostman
from mormo.model import BaseModel
from mormo.util import DB, gen_string, hashable_lru
tests_dir_path = Path(__file__).parent.absolute()
def get_test_data(content_type, limit=3):
data_path = str(tests_dir_path) + f'/data/openapi/{content_type}'
d = [f"{data_path}/{f}" for f in os.listdir(data_path)]
if limit and limit < len(d):
random.shuffle(d)
d = d[0:limit]
return d
def pytest_addoption(parser):
parser.addoption(
"--test_file",
help="Execute tests against OpenAPI Schema at path",
)
@hashable_lru
def test_data(paths, collection=False):
for path in paths:
o = OpenAPIToPostman(path=path)
if collection:
o = o.to_postman_collection_v2()
yield o
def pytest_generate_tests(metafunc):
files = [*get_test_data('yaml'), *get_test_data('json')]
if "mormo" in metafunc.fixturenames:
if metafunc.config.getoption("test_file"):
kwargs = {'paths': [metafunc.config.getoption("test_file")]}
else:
kwargs = {'paths': files}
metafunc.parametrize("mormo", test_data(**kwargs))
if "postman_collection" in metafunc.fixturenames:
if metafunc.config.getoption("test_file"):
kwargs = {'paths': [metafunc.config.getoption("test_file")], 'collection': True}
else:
kwargs = {'paths': files, 'collection': True}
metafunc.parametrize("postman_collection", test_data(**kwargs))
if "openapi_schema_file" in metafunc.fixturenames:
if metafunc.config.getoption("test_file"):
td = [
metafunc.config.getoption("test_file"),
]
else:
td = files
metafunc.parametrize("openapi_schema_file", td)
@pytest.fixture
def openapi_schema_paths(mormo):
assert isinstance(mormo.paths, GeneratorType)
yield mormo.paths
def generate_dicts(num):
return [
{gen_string(2): gen_string(5), gen_string(2): gen_string(2)}
for _ in range(num)
]
def generate_dict_expected(num, f):
x = []
for _ in range(num):
d = {gen_string(2): gen_string(5), gen_string(2): gen_string(2)}
x.append(
(f(d), d)
)
return x
@pytest.fixture
def random_dict():
for d in generate_dicts(1):
yield d
@pytest.fixture
def test_dbo(random_dict, redis):
yield (DB(redis, model=BaseModel.construct(**random_dict)), random_dict)
@pytest.fixture(params=[
(BaseModel.construct(**test_dict), test_dict)
for test_dict in generate_dicts(3)
]
)
def test_object(request):
yield request.param
@pytest.fixture
def redis(scope='session'):
os.environ['TESTING'] = '1'
yield redis_handle()
```
#### File: mormo/tests/test_model.py
```python
import tempfile
import json
from mormo.model import BaseModel
from mormo.util import load_db, save_db, cls_from_str, DB
def test_cls_from_str():
assert isinstance(cls_from_str('mormo.model.BaseModel'), type(BaseModel))
def test_base_model(test_object):
test_object, expected = test_object
tmp = tempfile.mktemp()
test_object.to_file(tmp)
with open(tmp, 'r') as f:
assert json.load(f) == expected
def test_base_model_save(test_object, redis):
test_object, expected = test_object
test_dbo = test_object.save()
assert json.loads(test_dbo._get(redis, test_dbo.uid))['data'] == expected
def test_save_db_model_wrapper(test_object, redis):
test_object, expected = test_object
o = save_db(test_object)
assert o.object == expected
assert load_db(o.id) == expected
def test_db_save(test_dbo, redis):
test_dbo, expected = test_dbo
test_dbo.save()
assert json.loads(test_dbo._get(redis, test_dbo.uid))['data'] == expected
def test_load_by_id(test_dbo, redis):
test_dbo, expected = test_dbo
test_dbo.save()
assert test_dbo.model == DB.load_model_from_uid(redis, uid=test_dbo.uid)
assert test_dbo.model == DB(redis, uid=test_dbo.uid).model
def test_get_safe(test_object):
test_object, expected = test_object
k = '*'*10 # longer than any keys generated by test_dict
assert test_object.get_safe(k) == None
#assert test_object.get_safe(k, {}) == {}
```
#### File: mormo/tests/test_test_runner.py
```python
from hypothesis import given
from hypothesis_jsonschema._from_schema import from_schema
from mormo.schema import TestData, list_of_test_data_to_params, openapi_v3
random_test_data = from_schema({
'type': 'object',
'required': ['key', 'value'],
'properties': {
'key': {'type': 'string', 'minLength': 2},
'value': {'type': 'string', 'minLength': 2},
}
})
@given(random_test_data)
def test_test_data_to_hash(data):
for route in ['POST /ab', 'GET /ab', 'POST /ab/:a']:
d = {data['key']: data['value']}
test_data = []
for k, v in d.items():
for in_ in list(openapi_v3.ParameterIn):
test_data.append(
TestData(route=route, in_=in_, key=k, value=v)
)
v = list_of_test_data_to_params(route, test_data).dict()
for td in test_data:
assert v[td.in_.value][td.key] == d[td.key]
``` |
{
"source": "joeystevens00/OTP",
"score": 3
} |
#### File: OTP/otp/otp.py
```python
import secrets
import string
import sys
VERSION = 0.11
letters = string.ascii_letters
numbers = string.digits
symbols = string.punctuation
ascii_chars = ''.join(chr(x) for x in range(128))
utf_chars = ''.join(chr(x) for x in range(0x110000-1))
def securegen(len, charset=letters):
s = ""
for i in range(len):
s += secrets.choice(charset)
return s
class VigenereCipher:
def __init__(self, key, msg, mode='encrypt', charset=letters):
self.key = key
self.msg = msg
mode = self.modedetect(mode)
self.mode = mode
self.charset = charset
if mode == 'encrypt':
self.translated = self.encrypt()
else:
self.translated = self.decrypt()
@classmethod
def modedetect(cls, mode):
modes = { 'e':'encrypt','d':'decrypt'}
parsed_mode = modes.get(mode.lower()[0])
return parsed_mode
@classmethod
def translate_charset(cls, key, msg, mode, charset=letters):
translated = []
keyIndex = 0
for symbol in msg:
if isinstance(symbol, int) and isinstance(msg, bytes):
num = symbol
else:
num = charset.find(symbol)
if num != -1: # if not none found
if mode == 'encrypt':
num += charset.find(key[keyIndex])
elif mode == 'decrypt':
num -= charset.find(key[keyIndex])
num %= len(charset) # Handle any wrap around
translated.append(charset[num])
keyIndex += 1
if keyIndex == len(key):
keyIndex = 0
else: # Character not in charset
translated.append(symbol)
return ''.join(translated)
@classmethod
def translate(cls, key, msg, mode):
translated = []
keyIndex = 0
key = key.upper()
for symbol in msg:
num = letters.find(symbol.upper())
if num != -1: # if not none found
if mode == 'encrypt':
num += letters.find(key[keyIndex])
elif mode == 'decrypt':
num -= letters.find(key[keyIndex])
num %= len(letters)
if symbol.isupper():
translated.append(letters[num])
elif symbol.islower():
translated.append(letters[num].lower())
keyIndex += 1
if keyIndex == len(key):
keyIndex = 0
else:
translated.append(symbol)
return ''.join(translated)
def encrypt(self):
return self.translate_charset(self.key, self.msg, self.mode, charset=self.charset)
def decrypt(self):
return self.translate_charset(self.key, self.msg, self.mode, charset=self.charset)
def otp(msg, key=None, encrypt=True, charset=letters):
if key is None:
key = securegen(len(msg), charset=charset)
mode = 'e' if encrypt else 'd'
#print(f'otp msg={msg}, key={key}, encrypt={encrypt}')
return VigenereCipher(msg=msg, key=key, mode=mode, charset=charset).translated, key
```
#### File: joeystevens00/OTP/setup.py
```python
from setuptools import setup
import os
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = 'Otpadder',
version = "0.0.1",
author = "<NAME>",
license = read("LICENSE"),
keywords = "otp one time pad vigenere cipher",
description='OTP encrypt/decrypt with full ASCII and UTF support',
long_description=read('README'),
setup_requires=['pytest-runner'],
tests_require=['pytest'],
packages = ["otp"],
)
```
#### File: OTP/tests/test_cli.py
```python
from cli import main
import cli
import sys
import pytest
import otp.otp as otp
import os
import random
from otp.util import array_to_dict, dict_key_value_swap
import otp.util as util
pkg = 'cli.py'
sys.argv = [pkg]
output_file = 'unittest-{}.json'.format(otp.securegen(8))
test_file = 'unittest-{}.otp'.format(otp.securegen(8))
test_file_key = 'unittest-{}.otp_key'.format(otp.securegen(8))
test_encrypted_file = 'unittest-{}.otp'.format(otp.securegen(8))
DATA_DIR = "{}/../data".format(os.path.dirname(os.path.abspath(__file__)))
@pytest.fixture
def encrypt_string():
yield sys.argv + ['-m', otp.securegen(100), '-oj', output_file]
os.remove(output_file)
@pytest.fixture
def cleanup_output_file():
yield
os.remove(output_file)
@pytest.fixture
def cleanup_test_file():
yield
os.remove(test_file)
@pytest.fixture
def cleanup_test_encrypted_file():
yield
os.remove(test_encrypted_file)
@pytest.fixture
def cleanup_test_file_key():
yield
os.remove(test_file_key)
@pytest.fixture
def output_file_arg():
return {'output_json':output_file}
@pytest.fixture
def image_file():
return "{}/screenshot.png".format(DATA_DIR)
def new_argv(args):
sys.argv = [pkg] + args
return sys.argv
def execute(args):
main(sys.argv[1:], no_exit=True)
result = cli.CLI(args={**output_file_arg(),**args}).get_message_from_file(file_arg='output_json', mode='r')
return result
def cli_validate(encrypt_argv=['-m', otp.securegen(100)], decrypt_argv=['-m', '{}'], execute_args={}, include_key=True):
"""
Calls main() with encrypt and decrypt arguments.
decrypt_argv supports inserting encrypted_msg by setting -m to {}
"""
encrypt_args = new_argv([*encrypt_argv, '-oj', output_file])
result = execute(execute_args)
encrypted_msg = result['MESSAGE']
errors = result['ERRORS']
key = result['KEY']
e_args = array_to_dict(encrypt_args[1:])
msg = e_args.get('-m')
# If no msg and have a message file then load message file
if not msg and e_args.get('-f'):
args = {'encoding':execute_args.get('encoding', 'utf-8'), 'file_arg':'message_file', 'message_file': e_args.get('-f')}
msg = cli.CLI(args).get_message_from_file()
assert len(key) == len(encrypted_msg)
assert len(encrypted_msg) == len(msg)
assert len(errors) == 0
decrypt_argv = [ i.format(encrypted_msg) if '{}' else i for i in decrypt_argv ]
key_args = ['-k', key]
if not include_key:
key_args = []
decrypt_args = new_argv(['-d', *decrypt_argv, *key_args, '-oj', output_file])
decrypt_result = execute(execute_args)
assert msg == decrypt_result['MESSAGE']
assert len(decrypt_result['ERRORS']) == 0
assert decrypt_result['KEY'] == key
def test_encrypt(cleanup_output_file):
sys.argv = encrypt_string
cli_validate()
for option, charset in util.charset_options.items():
encoding = None
if option is 'unicode':
encoding = 'utf-16'
cli_validate(
decrypt_argv=['-m', '{}', '--charset', option],
encrypt_argv=['-m', otp.securegen(100, charset=charset), '--charset', option],
execute_args = {'encoding':encoding }
)
def test_nested_operation():
roll = random.randint(20,1000)
makenumber = lambda x : roll
data = {'a':1, 'b':{'c':[1, 2, {'d':{1,2,3}}]}}
expected_result = {'a':roll, 'b':{'c':[roll, roll, {'d':roll}]}}
assert util.nested_operation(data, makenumber) == expected_result
multiply_num = lambda x,y : x*y
assert util.nested_operation([2,3,[4,5]], multiply_num, 2) == [4,6,[8,10]]
def test_encrypt_output_file(cleanup_test_file, cleanup_test_file_key, cleanup_output_file):
cli_validate(
decrypt_argv=['-f', test_file, '--charset', 'unicode', '-kf', test_file_key],
encrypt_argv=['-m', otp.securegen(100, charset=otp.utf_chars), '--charset', 'unicode', '-o', test_file, '-ok', test_file_key],
execute_args = {'encoding':'utf-16' },
include_key = False
)
def test_file_message_passing(cleanup_test_file):
messages = [otp.utf_chars[-1], otp.securegen(256, charset=otp.utf_chars)]
for message in messages:
args = {'encoding':'utf-16', 'file_arg':'message_file', 'message_file':test_file}
cli.CLI(args).store_message_file(test_file, message)
assert message == cli.CLI(args).get_message_from_file()
def test_encrypt_input_file(cleanup_test_file, cleanup_test_file_key, cleanup_test_encrypted_file, cleanup_output_file):
for charset in util.charset_options.keys():
args = {'encoding':util.charset_get_encoding(charset), 'file_arg':'message_file', 'message_file':test_file}
cli.CLI(args).store_message_file(test_file, otp.securegen(100, charset=util.charset_options[charset]))
cli_validate(
decrypt_argv=['-f', test_encrypted_file, '-kf', test_file_key, '--charset', charset],
encrypt_argv=['-f', test_file, '-ok', test_file_key, '-o', test_encrypted_file, '--charset', charset],
execute_args = {'encoding': util.charset_get_encoding(charset)},
include_key = False
)
#
# def test_encrypt_image(image_file):
# encrypted_image_file = "{}/encrypted_image.otp".format(DATA_DIR)
# key_file = "{}/encrypted_image.otp_key".format(DATA_DIR)
# cli_validate(
# decrypt_argv=['-f', encrypted_image_file, '--charset', 'unicode', '-kf', key_file],
# encrypt_argv=['-f', image_file, '--charset', 'unicode', '-ok', key_file, '-o', encrypted_image_file],
# execute_args = {'encoding':'utf-16' },
# include_key = False
# )
# TODO
# Test generation
# Test different output options
# Validate STDOUT?
# Test image file with unicode
``` |
{
"source": "JoeyStrandnes/Applied-IoT-1DT305",
"score": 3
} |
#### File: Applied-IoT-1DT305/Software/lora_callback.py
```python
def lora_cb(lora):
events = lora.events()
if events & LoRa.RX_PACKET_EVENT:
print('Lora packet received')
data = s.recv(64)
print(data)
if events & LoRa.TX_PACKET_EVENT:
print('Lora packet sent')
lora.callback(trigger=(LoRa.RX_PACKET_EVENT | LoRa.TX_PACKET_EVENT), handler=lora_cb)
```
#### File: Applied-IoT-1DT305/Software/lora.py
```python
from network import LoRa
import socket
import time
import ubinascii
import pycom
import json
pycom.heartbeat(False)
with open('config.json') as f:
config = json.load(f)
# Initialise LoRa in LORAWAN mode.
# Please pick the region that matches where you are using the device:
# Asia = LoRa.AS923
# Australia = LoRa.AU915
# Europe = LoRa.EU868
# United States = LoRa.US915
lora = LoRa(mode=LoRa.LORAWAN, region=LoRa.EU868)
#print("DevEUI: " + ubinascii.hexlify(lora.mac()).decode('utf-8').upper())
# create an OTAA authentication parameters
app_eui = ubinascii.unhexlify(config['APP_EUI']) ## app key
app_key = ubinascii.unhexlify(config['APP_KEY'])
def connect_lora():
# join a network using OTAA (Over the Air Activation)
lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)
while not lora.has_joined():
print('Not yet joined...')
pycom.rgbled(0xcc00ff)
time.sleep(2)
pycom.rgbled(0x000000)
time.sleep(0.5)
print("Joined network")
for n in range(3):
pycom.rgbled(0x2bff00)
time.sleep(1)
pycom.rgbled(0x000000)
time.sleep(0.5)
# create a LoRa socket
global s
s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
# set the LoRaWAN data rate
s.setsockopt(socket.SOL_LORA, socket.SO_DR, 0)
#s.setblocking(False)
``` |
{
"source": "Joeyt1008/python-dev-tools",
"score": 3
} |
#### File: python_dev_tools/formatters/common.py
```python
import subprocess # noqa: S404
from typing import List
class FormatterNotFound(FileNotFoundError):
"""
Exception to detect that a formatter is not found.
Note that this doesn't occur, except due to an installation error.
"""
class Formatter:
"""Base formatter class."""
name = "Formatter"
path = "/bin/unknownformatter"
cli_args = []
@classmethod
def format_file(cls, filepath: str) -> None:
"""Execute the formatter.
Args:
filepath (str): path of the file to format
"""
try:
cls._format_file(filepath)
except FormatterNotFound:
print(f"Formatter {cls.name} not found: {cls.path}")
@classmethod
def _format_file(cls, filepath: str):
args = [cls.path, *cls.cli_args, filepath]
cls._execute_command(args)
@classmethod
def _execute_command(cls, args: List[str]) -> subprocess.CompletedProcess:
"""Execute the formatter.
Args:
args (list[str]): arguments of the command including command name
Raises:
FormatterNotFound: formatter ``cls.path`` not found in path
Returns:
CompletedProcess: result of the execution
"""
try:
return subprocess.run( # noqa: S603
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
timeout=10,
encoding="utf-8",
)
except FileNotFoundError as exc:
if exc.filename == cls.path:
raise FormatterNotFound
raise
```
#### File: python-dev-tools/tests/test_python_dev_tools.py
```python
import sys
from pathlib import Path
from textwrap import dedent
import python_dev_tools.whataformatter
import python_dev_tools.whatalinter
from python_dev_tools.whataformatter import main as main_formatter
from python_dev_tools.whatalinter import lint, main as main_linter
def test_main_formatter(tmpdir):
"""Test main call."""
p = tmpdir.join("foo.py")
p.write(
dedent(
"""
a = 1
"""
)
)
sys.argv = ["whataformatter", str(p)]
python_dev_tools.whataformatter.__name__ = "__main__"
main_formatter()
# TODO assert file content
# TODO test formatting
def test_main_linter(tmpdir, capsys):
"""Test main call."""
p = tmpdir.join("foo.py")
p.write("a = 1\n")
sys.argv = ["whatalinter", str(p)]
python_dev_tools.whatalinter.__name__ = "__main__"
main_linter()
captured = capsys.readouterr()
assert "Missing docstring in public module" in captured.out
def test_long_line(tmpdir, capsys):
"""Test pycodestyle is working."""
p = tmpdir.join("foo.py")
p.write('"""Docstring."""\n\n"' + 87 * "#" + '"\n')
lint(p)
captured = capsys.readouterr()
assert "line too long (89 > 88 characters)" in captured.out
def test_duplicate_key(tmpdir, capsys):
"""Test pyflakes is working."""
p = tmpdir.join("foo.py")
p.write('"""Docstring."""\n\naaa = {1: 5, 1: 6}\n')
lint(p)
captured = capsys.readouterr()
assert "dictionary key 1 repeated with different values" in captured.out
def test_complexity(tmpdir, capsys):
"""Test McCabe is working."""
p = tmpdir.join("foo.py")
file_content = '"""Docstring."""\n\n'
file_content += dedent(
"""
elements = [open(str(i)) for i in range(10)]
def foo():
\"\"\"Docstring.\"\"\"
if elements[0]:
aaa = 1
elif elements[1]:
aaa = 1
elif elements[2]:
aaa = 1
elif elements[3]:
aaa = 1
elif elements[4]:
aaa = 1
elif elements[5]:
aaa = 1
elif elements[6]:
aaa = 1
elif elements[7]:
aaa = 1
elif elements[8]:
aaa = 1
elif elements[9]:
aaa = 1
print(aaa)
"""
)
p.write(file_content)
lint(p)
captured = capsys.readouterr()
assert "'foo' is too complex (11)" in captured.out
def test_lint_myself(capsys):
"""Test no lint message for this project."""
source_dir = Path("python_dev_tools")
if not source_dir.exists():
# run from inside tests directory
source_dir = Path("../python_dev_tools")
lint(source_dir)
captured = capsys.readouterr()
assert captured.out.replace("../", "") == dedent(
"""\
python_dev_tools/whataformatter.py:0:1: WPS226 Found string constant over-use: PATH > 3
python_dev_tools/whataformatter.py:26:1: WPS213 Found too many expressions: 10 > 9
python_dev_tools/whatalinter.py:0:1: WPS202 Found too many module members: 8 > 7
python_dev_tools/whatalinter.py:13:28: WPS323 Found `%` string formatting
python_dev_tools/whatalinter.py:72:13: WPS420 Found wrong keyword: pass
python_dev_tools/whatalinter.py:72:21: T101 fixme found (TODO)
python_dev_tools/whatalinter.py:93:7: T101 fixme found (TODO)
"""
)
``` |
{
"source": "Joeyt1008/tcex",
"score": 2
} |
#### File: tcex/app_config_object/permutations.py
```python
import json
import logging
import os
import random
import sys
try:
# standard library
import sqlite3
except ImportError:
# only required for local development
pass
from .install_json import InstallJson
from .layout_json import LayoutJson
class Permutations:
"""Permutations Module
Args:
logger (logging.Logger, optional): A instance of Logger. Defaults to None.
"""
def __init__(self, logger=None):
"""Initialize Class properties"""
self.log = logger or logging.getLogger('permutations')
# properties
self._db_conn = None
self._input_names = None
self._input_permutations = None
self._output_permutations = None
self.app_path = os.getcwd()
self.ij = InstallJson()
self.lj = LayoutJson()
self.input_table = 'inputs'
def _gen_permutations(self, index=0, args=None):
"""Iterate recursively over layout.json parameter names to build permutations.
.. NOTE:: Permutations are for layout.json based Apps.
Args:
index (int, optional): The current index position in the layout names list.
args (list, optional): Defaults to None. The current list of args.
"""
if args is None:
args = []
try:
hidden = False
if self.ij.runtime_level.lower() in [
'playbook',
'triggerservice',
'webhooktriggerservice',
]:
name = list(self.lj.parameters_names)[index]
display = self.lj.params_dict.get(name, {}).get('display')
hidden = self.lj.params_dict.get(name, {}).get('hidden', False)
else:
name = list(self.ij.params_dict.keys())[index]
display = False
input_type = self.ij.params_dict.get(name, {}).get('type')
if input_type is None:
self.handle_error(f'No value found in install.json for "{name}".')
if (
self.ij.runtime_level.lower() == 'organization'
or self.validate_layout_display(self.input_table, display)
or hidden
):
if input_type.lower() == 'boolean':
for val in [True, False]:
args.append({'name': name, 'value': val})
self.db_update_record(self.input_table, name, val)
self._gen_permutations(index + 1, list(args))
# remove the previous arg before next iteration
args.pop()
elif input_type.lower() == 'choice':
valid_values = self.ij.expand_valid_values(
self.ij.params_dict.get(name, {}).get('validValues', [])
)
for val in valid_values:
args.append({'name': name, 'value': val})
self.db_update_record(self.input_table, name, val)
self._gen_permutations(index + 1, list(args))
# remove the previous arg before next iteration
args.pop()
else:
args.append({'name': name, 'value': None})
self._gen_permutations(index + 1, list(args))
else:
self._gen_permutations(index + 1, list(args))
except IndexError:
# when IndexError is reached all data has been processed.
self._input_permutations.append(args)
outputs = []
for output_data in self.ij.output_variables:
name = output_data.get('name')
if self.lj.outputs_dict.get(name) is not None:
display = self.lj.outputs_dict.get(name, {}).get('display')
valid = self.validate_layout_display(self.input_table, display)
if display is None or not valid:
continue
outputs.append(output_data)
self._output_permutations.append(outputs)
@property
def db_conn(self):
"""Create a temporary in memory DB and return the connection."""
if self._db_conn is None:
try:
self._db_conn = sqlite3.connect(':memory:')
except sqlite3.Error as e:
self.handle_error(e)
return self._db_conn
def db_create_table(self, table_name, columns):
"""Create a temporary DB table.
Arguments:
table_name (str): The name of the table.
columns (list): List of columns to add to the DB.
"""
formatted_columns = ''
for col in set(columns):
formatted_columns += f""""{col.strip('"').strip("'")}" text, """
formatted_columns = formatted_columns.strip(', ')
create_table_sql = f'CREATE TABLE IF NOT EXISTS {table_name} ({formatted_columns});'
try:
cr = self.db_conn.cursor()
cr.execute(create_table_sql)
except sqlite3.Error as e:
self.handle_error(e)
def db_drop_table(self, table_name):
"""Drop a DB table.
Arguments:
table_name (str): The name of the table.
"""
create_table_sql = f'DROP TABLE IF EXISTS {table_name};'
try:
cr = self.db_conn.cursor()
cr.execute(create_table_sql)
except sqlite3.Error as e:
self.handle_error(e)
def db_insert_record(self, table_name, columns):
"""Insert records into DB.
Args:
table_name (str): The name of the table.
columns (list): List of columns for insert statement.
"""
bindings = ('?,' * len(columns)).strip(',')
values = [None] * len(columns)
try:
sql = f"INSERT INTO {table_name} ({', '.join(columns)}) VALUES ({bindings})"
cur = self.db_conn.cursor()
cur.execute(sql, values)
except sqlite3.OperationalError as e:
raise RuntimeError(f'SQL insert failed - SQL: "{sql}", Error: "{e}"')
def db_update_record(self, table_name, column, value):
"""Insert records into DB.
Args:
table_name (str): The name of the table.
column (str): The column name in which the value is to be updated.
value (str): The value to update in the column.
"""
# escape any single quotes in value
if isinstance(value, str):
value = value.replace('\'', '\\')
elif isinstance(value, bool):
# core expects true/false so we convert bool value to string and lower
value = str(value).lower()
else:
# no other types can be used in a diplay clause so skip them
return
# only column defined in install.json can be updated
if column in self.ij.params_dict:
try:
# value should be wrapped in single quotes to be properly parsed
sql = f"UPDATE {table_name} SET {column} = '{value}'"
cur = self.db_conn.cursor()
cur.execute(sql)
except sqlite3.OperationalError as e:
raise RuntimeError(f'SQL update failed - SQL: "{sql}", Error: "{e}"')
def exists(self):
"""Return True if permutation file exists."""
return os.path.isfile(self.filename)
@property
def filename(self):
"""Return all output permutations for current App."""
return os.path.join(self.app_path, 'permutations.json')
@staticmethod
def handle_error(err, halt=True):
"""Print errors message and optionally exit.
Args:
err (str): The error message to print.
halt (bool, optional): Defaults to True. If True the script will exit.
"""
print(err)
if halt:
sys.exit(1)
def init_permutations(self):
"""Process layout.json names/display to get all permutations of args."""
if self._input_permutations is None and self._output_permutations is None:
self._input_permutations = []
self._output_permutations = []
# create db for permutations testing
self.db_create_table(self.input_table, self.ij.params_dict.keys())
self.db_insert_record(self.input_table, self.ij.params_dict.keys())
# only gen permutations if none have been generated previously
self._gen_permutations()
# drop database
self.db_drop_table(self.input_table)
def input_dict(self, permutation_id):
"""Return all input permutation names for provided permutation id.
{'tc_action': 'Append', 'input_strings': None, 'append_chars': None}
Args:
permutation_id (int): The index of the permutation input array.
Returns:
dict: A dict with key / value for each input for the provided permutation id.
"""
input_dict = {}
if self.lj.has_layout:
for permutation in self.input_permutations[permutation_id]:
input_dict.setdefault(permutation.get('name'), permutation.get('value'))
return input_dict
@property
def input_names(self):
"""Return all input permutation names for current App."""
if self._input_names is None and self.lj.has_layout:
self._input_names = []
for permutation in self.input_permutations:
self._input_names.append([p.get('name') for p in permutation])
return self._input_names
@property
def input_permutations(self):
"""Return all input permutations for current App.
self._input_permutations is an array of permutations arrays.
[[<perm obj #1], [<perm obj #2]]
"""
if self._input_permutations is None and self.lj.has_layout:
self.init_permutations()
return self._input_permutations
@property
def output_permutations(self):
"""Return all output permutations for current App."""
if self._output_permutations is None:
self.init_permutations()
return self._output_permutations
def outputs_by_inputs(self, inputs):
"""Return all output based on provided inputs
Args:
inputs (dict): The args/inputs dict.
"""
table = f'temp_{random.randint(100,999)}' # nosec
self.db_create_table(table, self.ij.params_dict.keys())
self.db_insert_record(table, self.ij.params_dict.keys())
for name, val in inputs.items():
self.db_update_record(table, name, val)
outputs = []
# loop through all output variables in install.json
for output_data in self.ij.output_variables:
name = output_data.get('name')
if self.lj.outputs_dict.get(name) is None:
# an output not listed in layout.json should always be shown
valid = True
else:
# all other outputs must be validated
display = self.lj.outputs_dict.get(name, {}).get('display')
valid = self.validate_layout_display(table, display)
if valid:
# valid outputs get added to array
outputs.append(output_data)
# drop database
self.db_drop_table(table)
return outputs
def permutations(self):
"""Process layout.json names/display to get all permutations of args."""
if 'sqlite3' not in sys.modules:
print('The sqlite3 module needs to be build-in to Python for this feature.')
sys.exit(1)
# create db for permutations testing
self.db_create_table(self.input_table, self.ij.params_dict.keys())
self.db_insert_record(self.input_table, self.ij.params_dict.keys())
# only gen permutations if none have been generated previously
if not self._input_permutations and not self._output_permutations:
self._gen_permutations()
# output permutations
self.write_permutations_file()
def validate_input_variable(self, input_name, inputs):
"""Return True if the provided variables display where clause returns results.
Args:
input_name (dict): The input variable name (e.g. tc_action).
inputs (dict): The current name/value dict.
Returns:
bool: True if the display value returns results.
"""
if not self.lj.has_layout or not inputs:
# always return try if current App doesn't have a layouts file
return True
table = f'temp_{random.randint(100,999)}' # nosec
self.db_create_table(table, self.ij.params_dict.keys())
self.db_insert_record(table, self.ij.params_dict.keys())
# APP-98 Added to cover the use case of interdependent variables in the layout.json.
for name, item in self.ij.filter_params_dict(_type='Boolean').items():
self.db_update_record(table, name, item.get('default', False))
for name, val in inputs.items():
self.db_update_record(table, name, val)
lj_data = self.lj.params_dict.get(input_name)
if lj_data is None:
# this shouldn't happen as all ij inputs must be in lj
raise RuntimeError(f'The provided input {input_name} was not found in layout.json.')
display = lj_data.get('display')
# check if provided variable meets display requirements
valid = self.validate_layout_display(table, display)
# cleanup temp table
self.db_drop_table(table)
return valid
def validate_layout_display(self, table, display_condition):
"""Check to see if the display condition passes.
Args:
table (str): The name of the DB table which hold the App data.
display_condition (str): The "where" clause of the DB SQL statement.
Returns:
bool: True if the row count is greater than 0.
"""
display = False
if display_condition is None:
display = True
else:
display_query = f'select count(*) from {table} where {display_condition}' # nosec
try:
cur = self.db_conn.cursor()
cur.execute(display_query.replace('"', ''))
rows = cur.fetchall()
if rows[0][0] > 0:
display = True
except sqlite3.Error as e:
print(f'"{display_query}" query returned an error: ({e}).')
sys.exit(1)
return display
def write_permutations_file(self):
"""Print all valid permutations."""
permutations = []
for index, p in enumerate(self.input_permutations):
permutations.append({'index': index, 'args': p})
with open(self.filename, 'w') as fh:
json.dump(permutations, fh, indent=2, sort_keys=True)
print('All permutations written to the "permutations.json" file.')
```
#### File: tcex/bin/lib.py
```python
import os
import platform
import shutil
import subprocess # nosec
import sys
from distutils.version import StrictVersion # pylint: disable=no-name-in-module
from urllib.parse import quote
# third-party
import colorama as c
from .bin import Bin
class Lib(Bin):
"""Install Required Modules for App.
Args:
_args (namespace): The argparser args Namespace.
"""
def __init__(self, _args):
"""Initialize Class properties.
Args:
_args (namespace): The argparser args Namespace.
"""
super().__init__(_args)
# properties
self.latest_version = None
self.lib_directory = (
f'lib_{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}'
)
self.requirements_file = 'requirements.txt'
self.static_lib_dir = 'lib_latest'
self.use_temp_requirements_file = False
# update tcex.json
self.tj.update()
def _build_command(self, python_executable, lib_dir_fq, proxy_enabled):
"""Build the pip command for installing dependencies.
Args:
python_executable (str): The fully qualified path of the Python executable.
lib_dir_fq (str): The fully qualified path of the lib directory.
Returns:
list: The Python pip command with all required args.
"""
exe_command = [
os.path.expanduser(python_executable),
'-m',
'pip',
'install',
'-r',
self.requirements_file,
'--ignore-installed',
'--quiet',
'--target',
lib_dir_fq,
]
if self.args.no_cache_dir:
exe_command.append('--no-cache-dir')
if proxy_enabled:
# trust the pypi hosts to avoid ssl errors
trusted_hosts = ['pypi.org', 'pypi.python.org', 'files.pythonhosted.org']
for host in trusted_hosts:
exe_command.append('--trusted-host')
exe_command.append(host)
return exe_command
def _configure_proxy(self):
"""Configure proxy settings using environment variables."""
if os.getenv('HTTP_PROXY') or os.getenv('HTTPS_PROXY'):
# TODO: is this appropriate?
# don't change proxy settings if the OS already has them configured.
return True
proxy_enabled = False
if self.args.proxy_host is not None and self.args.proxy_port is not None:
if self.args.proxy_user is not None and self.args.proxy_pass is not None:
proxy_user = quote(self.args.proxy_user, safe='~')
proxy_pass = quote(self.args.proxy_pass, safe='~')
# proxy url with auth
proxy_url = (
f'{proxy_user}:{proxy_pass}@{self.args.proxy_host}:{self.args.proxy_port}'
)
else:
# proxy url without auth
proxy_url = f'{self.args.proxy_host}:{self.args.proxy_port}'
os.putenv('HTTP_PROXY', f'http://{proxy_url}')
os.putenv('HTTPS_PROXY', f'http://{proxy_url}')
print(
f'Using Proxy Server: {c.Fore.CYAN}{self.args.proxy_host}:{self.args.proxy_port}.'
)
proxy_enabled = True
return proxy_enabled
def _create_lib_latest(self):
"""Create the lib_latest symlink for App Builder."""
if platform.system() == 'Windows':
shutil.copytree(f'lib_{self.latest_version}', self.static_lib_dir)
else:
if os.path.islink(self.static_lib_dir):
os.unlink(self.static_lib_dir)
elif os.path.isfile(self.static_lib_dir):
os.rmdir(self.static_lib_dir)
os.symlink(f'lib_{self.latest_version}', self.static_lib_dir)
def _create_temp_requirements(self):
"""Create a temporary requirements.txt.
This allows testing again a git branch instead of pulling from pypi.
"""
self.use_temp_requirements_file = True
# Replace tcex version with develop branch of tcex
with open(self.requirements_file) as fh:
current_requirements = fh.read().strip().split('\n')
self.requirements_file = f'temp-{self.requirements_file}'
with open(self.requirements_file, 'w') as fh:
new_requirements = ''
for line in current_requirements:
if not line:
continue
if line.startswith('tcex'):
line = (
'git+https://github.com/ThreatConnect-Inc/tcex.git@'
f'{self.args.branch}#egg=tcex'
)
# print('line', line)
new_requirements += f'{line}\n'
fh.write(new_requirements)
def install_libs(self):
"""Install Required Libraries using pip."""
# check for requirements.txt
if not os.path.isfile(self.requirements_file):
self.handle_error('A requirements.txt file is required to install modules.')
# if branch arg is provide use git branch instead of pypi
if self.args.branch is not None:
self._create_temp_requirements()
# default or current python version
lib_data = [{'python_executable': sys.executable, 'lib_dir': self.lib_directory}]
if self.tj.lib_versions:
# overwrite default with config data
lib_data = self.tj.lib_versions
print(f'{c.Style.BRIGHT}Using "lib" directories defined in tcex.json file.')
# configure proxy settings
proxy_enabled = self._configure_proxy()
# install all requested lib directories
for data in lib_data:
lib_dir = data.get('lib_dir')
lib_dir_fq = os.path.join(self.app_path, lib_dir)
if os.access(lib_dir_fq, os.W_OK):
# remove lib directory from previous runs
shutil.rmtree(lib_dir_fq)
# replace env vars with env val in the python executable
python_executable = os.path.expanduser(data.get('python_executable'))
if not os.path.isfile(python_executable) and not os.path.islink(python_executable):
print(
f'{c.Style.BRIGHT}{c.Fore.RED}The link Python executable ({python_executable}) '
f'could not be found. Skipping building lib directory for this Python version.'
)
continue
print(f'Building Lib Dir: {c.Style.BRIGHT}{c.Fore.CYAN}{lib_dir_fq}')
exe_command = self._build_command(python_executable, lib_dir_fq, proxy_enabled)
print(f"Running: {c.Style.BRIGHT}{c.Fore.GREEN}{' '.join(exe_command)}")
p = subprocess.Popen(
exe_command,
shell=False, # nosec
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, err = p.communicate(timeout=60) # pylint: disable=unused-variable
if p.returncode != 0:
print(f'{c.Style.BRIGHT}{c.Fore.RED}FAIL')
print(f"{c.Style.BRIGHT}{c.Fore.RED}{err.decode('utf-8')}")
sys.exit(f"ERROR: {err.decode('utf-8')}")
# version comparison
try:
python_version = lib_dir.split('_', 1)[1]
except IndexError:
python_version = None
self.handle_error('Could not determine version from lib string.')
# track the latest Python version
if self.latest_version is None:
self.latest_version = python_version
elif StrictVersion(python_version) > StrictVersion(self.latest_version):
self.latest_version = python_version
# cleanup temp file if required
if self.use_temp_requirements_file:
os.remove(self.requirements_file)
# create lib_latest
self._create_lib_latest()
```
#### File: tcex/bin/test.py
```python
import copy
import json
import os
from ..app_config_object.templates import (
CustomTemplates,
DownloadTemplates,
TestProfileTemplates,
ValidationTemplates,
)
from ..profile import Interactive, Profile
from .bin import Bin
class Test(Bin):
"""Create testing files for ThreatConnect Exchange App.
Args:
_args (namespace): The argparser args Namespace.
"""
def __init__(self, _args):
"""Initialize Class properties."""
super().__init__(_args)
# properties
self.profile = Profile(
default_args={}, feature=self.args.feature, name=self.args.profile_name
)
self.custom_templates = CustomTemplates(self.profile, self.args.branch)
self.download_template = DownloadTemplates(self.profile, self.args.branch)
self.profile_interactive = Interactive(self.profile)
self.test_profile_template = TestProfileTemplates(self.profile, self.args.branch)
self.validation_templates = ValidationTemplates(self.profile, self.args.branch)
def add_negative_profile(self, profile_name, inputs, fail_on_error=None):
"""Create a negative profile."""
# build profile name
exit_code = 1 # default exit code is 1
if fail_on_error is not None:
if fail_on_error is False:
exit_code = 0
profile_name = f'{profile_name}_foe_{str(fail_on_error).lower()}'
# get profile data and update with new inputs
profile_data = self.profile.contents
profile_data['exit_codes'] = [exit_code]
profile_data['exit_message'] = None
profile_data['inputs'] = inputs
profile_data['outputs'] = None
profile_data['environments'] = [*self.profile.environments, 'negative']
# create a meaningful profile name
new_profile = Profile(default_args={}, feature=self.args.feature, name=profile_name)
new_profile.add(profile_data=profile_data)
def create_dirs(self):
"""Create tcex.d directory and sub directories."""
for d in [
self.profile.test_directory,
self.profile.feature_directory,
self.profile.directory,
]:
if not os.path.isdir(d):
os.makedirs(d)
# create __init__ files
self.create_dirs_init()
def create_dirs_init(self):
"""Create the __init__.py file under dir."""
for d in [self.profile.test_directory, self.profile.feature_directory]:
if os.path.isdir(d):
with open(os.path.join(d, '__init__.py'), 'a'):
os.utime(os.path.join(d, '__init__.py'), None)
def create_negative_profiles(self):
"""Create negative profiles using interactive profile base."""
for inputs in self.profile.profile_inputs:
for name, value in inputs.get('required', {}).items():
ij_data = self.profile.ij.params_dict.get(name, {})
# create a profile for each pb data type
for pb_data_type in ij_data.get('playbookDataType', []):
for negative_type in self.negative_inputs.get(pb_data_type.lower(), []):
# the value is pre-staged in test_case_playbook_common.py
value = f'#App:1234:{negative_type}!{pb_data_type}'
profile_name = f'negative_{name}_{pb_data_type.lower()}_{negative_type}'
# modify copy so original is preserved for next interation
new_inputs = copy.deepcopy(inputs)
new_inputs['required'][name] = value
if 'fail_on_error' in inputs.get('optional', {}):
# handle fail on error
for b in [False, True]:
new_inputs['optional']['fail_on_error'] = b
self.add_negative_profile(profile_name, new_inputs, b)
else:
self.add_negative_profile(profile_name, new_inputs)
def interactive_profile(self, negative=False):
"""Present interactive profile inputs."""
self.profile_interactive.present()
profile_data = {
'exit_codes': self.profile_interactive.exit_codes,
'inputs': self.profile_interactive.inputs,
'stage': self.profile_interactive.staging_data,
}
self.profile.add(profile_data=profile_data)
if negative:
# if user specified negative arg then create negative test profiles
self.create_negative_profiles()
@staticmethod
def load_legacy_profiles(staging_files):
"""Load staging data to migrate legacy templates."""
staging_data = {}
for sf in staging_files:
with open(sf) as fh:
data = json.load(fh)
for d in data:
staging_data[d.get('variable')] = d.get('data')
return staging_data
def migrate_profile(self):
"""Migrate legacy profile to new framework."""
data = []
profile_file = os.path.join(self.app_path, 'tcex.d', 'profiles', self.args.profile_file)
if os.path.isfile(self.args.profile_file):
with open(self.args.profile_file) as fh:
data = json.load(fh)
elif os.path.isfile(profile_file):
with open(profile_file) as fh:
data = json.load(fh)
else:
self.handle_error(f'Error reading in profile file: {self.args.profile_file}', True)
for d in data:
profile_data = {
'exit_codes': d.get('exit_codes'),
'exit_message': None,
'inputs': d.get('args', {}).get('app'),
'stage': {'kvstore': self.load_legacy_profiles(d.get('data_files', []))},
}
# add profile
self.profile.add(profile_data=profile_data, profile_name=d.get('profile_name'))
@property
def negative_inputs(self):
"""Return dict of negative inputs."""
return {
'binary': ['empty', 'null'],
'binaryarray': ['empty', 'null'],
'keyvalue': ['null'],
'keyvaluearray': ['null'],
'string': ['empty', 'null'],
'stringarray': ['empty', 'null'],
'tcentity': ['null'],
'tcentityarray': ['null'],
}
```
#### File: tcex/decorators/on_exception.py
```python
import traceback
# third-party
import wrapt
class OnException:
"""Set exit message on failed execution.
This decorator will catch the generic "Exception" error, log the supplied error message, set
the "exit_message", and exit the App with an exit code of 1.
.. code-block:: python
:linenos:
:lineno-start: 1
@OnException(exit_msg='Failed to process JSON data.')
def my_method(json_data):
json.dumps(json_data)
Args:
exit_msg (str): The message to send to exit method.
exit_enabled (boolean|str, kwargs): Accepts a boolean or string value. If a boolean value
is provided that value will control enabling/disabling this feature. A string
value should reference an item in the args namespace which resolves to a boolean.
The value of this boolean will control enabling/disabling this feature.
write_output (boolean): default True.
If enabled, will call app.write_output() when an exception is raised.
"""
def __init__(self, exit_msg=None, exit_enabled=True, write_output=True):
"""Initialize Class properties"""
self.exit_enabled = exit_enabled
self.exit_msg = exit_msg or 'An exception has been caught. See the logs for more details.'
self.write_output = write_output
@wrapt.decorator
def __call__(self, wrapped, instance, args, kwargs):
"""Implement __call__ function for decorator.
Args:
wrapped (callable): The wrapped function which in turns
needs to be called by your wrapper function.
instance (App): The object to which the wrapped
function was bound when it was called.
args (list): The list of positional arguments supplied
when the decorated function was called.
kwargs (dict): The dictionary of keyword arguments
supplied when the decorated function was called.
Returns:
function: The custom decorator function.
"""
def exception(app, *args, **kwargs): # pylint: disable=inconsistent-return-statements
"""Call the function and handle any exception.
Args:
app (class): The instance of the App class "self".
"""
# self.enable (e.g., True or 'fail_on_false') enables/disables this feature
enabled = self.exit_enabled
if not isinstance(self.exit_enabled, bool):
enabled = getattr(app.args, self.exit_enabled)
if not isinstance(enabled, bool): # pragma: no cover
raise RuntimeError(
'The exit_enabled value must be a boolean or resolved to bool.'
)
app.tcex.log.debug(f'Fail enabled is {enabled} ({self.exit_enabled}).')
try:
return wrapped(*args, **kwargs)
except Exception:
app.tcex.log.error(traceback.format_exc())
app.exit_message = self.exit_msg # for test cases
if enabled:
if self.write_output:
app.tcex.playbook.write_output()
if hasattr(app, 'write_output'):
app.write_output()
app.tcex.exit(1, self.exit_msg)
return exception(instance, *args, **kwargs)
```
#### File: tcex/profile/interactive.py
```python
import json
import math
# import os
import re
import sys
from base64 import b64encode
from typing import Optional, Union
# third-party
import colorama as c
# autoreset colorama
c.init(autoreset=True, strip=False)
class Interactive:
"""Testing Profile Interactive Class."""
def __init__(self, profile: object):
"""Initialize Class properties.
Args:
profile (Profile): The profile object to build interactive inputs.
"""
self.profile = profile
# properties
self._inputs = {
'optional': {},
'required': {},
}
self._no_selection_text = 'No Selection'
self._staging_data = {'kvstore': {}}
# self._user_defaults = None
self.collect_type_map = {
'Any': self.collect_string,
'Binary': self.collect_binary,
'BinaryArray': self.collect_binary_array,
'KeyValue': self.collect_key_value,
'KeyValueArray': self.collect_key_value_array,
'String': self.collect_string,
'StringArray': self.collect_string_array,
'TCEntity': self.collect_tcentity,
'TCEntityArray': self.collect_tcentity_array,
}
self.exit_codes = []
self.input_type_map = {
'boolean': self.present_boolean,
'choice': self.present_choice,
'keyvaluelist': self.present_key_value_list,
'multichoice': self.present_multichoice,
'string': self.present_string,
'editchoice': self.present_editchoice,
}
# self.user_defaults_filename = os.path.join('tests', '.user_defaults')
def _default(self, data: dict) -> Union[list, str]: # pylint: disable=unused-argument
"""Return the best option for default.
Args:
data: The install.json params object.
Returns:
list, str: The default value for the input.
"""
if data.get('type').lower() == 'boolean':
default = str(data.get('default', 'false')).lower()
elif data.get('type').lower() == 'choice':
default = 0
valid_values: list = self._expand_valid_values(data.get('validValues', []))
if data.get('name') == 'tc_action':
for vv in valid_values:
if self.profile.feature.lower() == vv.replace(' ', '_').lower():
default = vv
break
else:
default: str = data.get('default')
elif data.get('type').lower() == 'multichoice':
default: str = data.get('default')
if default is not None and isinstance(default, str):
default: list = default.split('|')
else:
default = data.get('default')
# if default is None:
# # set default from user default file
# default = self.user_defaults.get(data.get('name'))
return default
def _expand_valid_values(self, valid_values: list) -> list:
"""Expand supported playbook variables to their full list.
Args:
valid_values (list): The list of valid values for Choice or MultiChoice inputs.
Returns:
list: An expanded list of valid values for Choice or MultiChoice inputs.
"""
valid_values = list(valid_values)
if '${ARTIFACT_TYPES}' in valid_values:
valid_values.remove('${ARTIFACT_TYPES}')
valid_values.extend(
[
'ASN',
'Asset Group ID',
'Certificate File',
'CIDR',
'Credential ID',
'Document Metadata',
'Email Address',
'Email Attachment File',
'Email Attachment File Name',
'Email Body',
'Email Message File',
'Email Subject',
'Event File',
'Exploit ID',
'File Hash',
'Filter ID',
'Hashtag',
'Host',
'Image File',
'IP Address',
'Log File',
'MutEx',
'PCAP File',
'Policy ID',
'Registry Key',
'Results ID',
'Screenshot File',
'Tactic ID',
'Technique ID',
'Ticket ID',
'Timestamp',
'URL',
'User Agent',
'Vulnerability Detection ID',
'Vulnerability ID',
]
)
elif '${GROUP_TYPES}' in valid_values:
valid_values.remove('${GROUP_TYPES}')
valid_values.extend(
[
'Adversary',
'Attack Pattern',
'Campaign',
'Course of Action',
'Document',
'Email',
'Event',
'Incident',
'Intrusion Set',
'Malware',
'Report',
'Signature',
'Tactic',
'Task',
'Threat',
'Tool',
'Vulnerability',
]
)
elif '${INDICATOR_TYPES}' in valid_values:
valid_values.remove('${INDICATOR_TYPES}')
r = self.profile.session.get('/v2/types/indicatorTypes')
if r.ok:
valid_values.extend(
[t.get('name') for t in r.json().get('data', {}).get('indicatorType', {})]
)
elif '${OWNERS}' in valid_values:
valid_values.remove('${OWNERS}')
r = self.profile.session.get('/v2/owners')
if r.ok:
valid_values.extend(
[o.get('name') for o in r.json().get('data', {}).get('owner', {})]
)
elif '${USERS}' in valid_values:
valid_values.remove('${USERS}')
r = self.profile.session.get('/v2/owners/mine/members')
if r.ok:
valid_values.extend(
[o.get('userName') for o in r.json().get('data', {}).get('user', {})]
)
elif '${USER_GROUPS}' in valid_values:
valid_values.remove('${USER_GROUPS}')
valid_values.extend(['User Group 1', 'User Group 1'])
return valid_values
def _input_value(self, label: str, option_text: Optional[str] = None) -> str:
"""Return user input.
Args:
label: The label to display to the user.
option_text: the Option text to display to the user.
Returns:
str: The value selected by the user.
"""
# update option text to include help message
option_text = option_text or ''
if option_text:
# add space for cleaness in user display
option_text = f' {option_text}'
print(f'{c.Fore.WHITE}[? for help]')
prompt = f'{c.Fore.MAGENTA}{label}{c.Fore.RESET}{c.Style.BRIGHT}{option_text}: '
input_value = input(prompt).strip() # nosec
# handle special user inputs
if input_value == '?':
self.present_help()
return self._input_value(label, option_text)
return input_value
@staticmethod
def _split_list(data: list) -> tuple:
"""Split a list in two "equal" parts.
Args:
data: The list of data to split into two equal parts.
Returns:
tuple: The two halves of the list.
"""
half: int = math.ceil(len(data) / 2)
return data[:half], data[half:]
def add_input(self, name: str, data: dict, value: str) -> None:
"""Add an input to inputs.
Args:
name: The name of the input.
data: The install.json params object.
value: The value for the input.
"""
if data.get('required', False):
self._inputs['required'].setdefault(name, value)
else:
self._inputs['optional'].setdefault(name, value)
# def add_user_default(self, key, value, data_type=None):
# """Add data to user default."""
# self.user_defaults.setdefault(self.profile.feature, {})
# if data_type is None:
# self.user_defaults[self.profile.feature][key] = value
# else:
# # store the value under the appropriate data type
# self.user_defaults[self.profile.feature].setdefault(key, {})
# self.user_defaults[self.profile.feature][key].setdefault(data_type, value)
# if self.user_defaults.get('base') is None:
# self.user_defaults['base'] = self.user_defaults[self.profile.feature]
def add_staging_data(self, name: str, type_: str, value: str) -> str:
"""Create staging data and return variable value.
Args:
name: The name of the input.
type_: The type of input (Binary, StringArray, etc.)
value: The value to write in the staging data.
Returns:
str: The newly create variable string.
"""
arg_value = value
if (
self.profile.ij.runtime_level.lower() not in ['triggerservice', 'webhooktriggerservice']
and value is not None
):
arg_value: str = self.profile.ij.create_variable(name, type_)
self._staging_data['kvstore'].setdefault(arg_value, value)
return arg_value
def collect_binary(self, **kwargs) -> str:
"""Collect binary data
Args:
default (str, kwargs): The default value if no value provided by user.
feedback (bool, kwargs): If True user feedback will be printed.
option_text (str, kwargs): The text shown to the user.
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
str: The input str from the user.
"""
input_value: str = self._input_value('Input', kwargs.get('option_text'))
if not input_value:
# if no default value and required force user to input again
if kwargs.get('default') is None and kwargs.get('required') is True:
self.print_required()
return self.collect_binary(**kwargs)
if input_value not in [None, '']:
input_data: str = b64encode(input_value.encode()).decode()
feedback = f'{input_value} -> ({input_data})'
else:
input_data: str = kwargs.get('default')
feedback = input_data
# print user feedback
if kwargs.get('feedback', True):
self.print_feedback(feedback)
return input_data
def collect_binary_array(self, **kwargs) -> list:
"""Collect binary array data
Args:
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
list: The input list from the user.
"""
input_values = []
required = kwargs.get('required', False)
while True:
input_value = self.collect_binary(feedback=False, required=required)
if not input_value:
break
input_values.append(input_value)
required = False # only the first input is required
if not input_values:
# return None to ensure data doesn't get added to inputs
input_values = None
# print user feedback
self.print_feedback(input_values)
return input_values
def collect_boolean(self, **kwargs) -> bool:
"""Collect binary data
Args:
default (str, kwargs): The default value if no value provided by user.
option_text (str, kwargs): The text shown to the user.
Returns:
bool: The boolean value select by the user.
"""
input_value = self._input_value('Input', kwargs.get('option_text'))
if input_value == '':
input_value = kwargs.get('default')
if str(input_value).lower() not in ['0', 'f', 'false', '1', 't', 'true']:
self.print_invalid_bool()
return self.collect_boolean(**kwargs)
# covert input value to a proper boolean
input_value = self.profile.utils.to_bool(input_value)
# print user feedback
self.print_feedback(input_value)
return input_value
def collect_editchoice(self, **kwargs) -> str:
"""Collect edit choice data
Args:
default (str, kwargs): The default value if no value provided by user.
option_text (str, kwargs): The text shown to the user.
required (str, kwargs): If True the user cannot continue until they provide a value.
valid_values (str, kwargs): A list of valid values
Returns:
str: The users selected choice.
"""
# collect input value from user and set default if required
input_value: str = self._input_value('EditChoice', kwargs.get('option_text')) or kwargs.get(
'default'
)
# ensure input value is provided when input is required
if input_value is None and kwargs.get('required') is True:
self.print_required()
return self.collect_editchoice(**kwargs)
# if input value is None then there is not need to continue
if input_value is None:
return input_value
# set valid values
valid_values: list = kwargs.get('valid_values', [])
# convert to int or recollect input
try:
input_value = int(input_value)
is_between = 0 <= input_value <= (len(valid_values) - 1)
if not is_between:
self.print_invalid_index(f'0-{len(valid_values)}')
return self.collect_editchoice(**kwargs)
input_value = valid_values[input_value]
if input_value == self._no_selection_text:
# special case for when user select no selection
input_value = None
except ValueError:
self.print_feedback(f'Using custom input {input_value}.')
# print user feedback
if kwargs.get('feedback', True):
self.print_feedback(input_value)
return input_value
def collect_choice(self, **kwargs) -> str:
"""Collect choice data
Args:
default (str, kwargs): The default value if no value provided by user.
option_text (str, kwargs): The text shown to the user.
required (str, kwargs): If True the user cannot continue until they provide a value.
valid_values (str, kwargs): A list of valid values
Returns:
str: The users selected choice.
"""
# collect input value from user and set default if required
input_value: str = self._input_value('Choice', kwargs.get('option_text')) or kwargs.get(
'default'
)
# ensure input value is provided when input is required
if input_value is None and kwargs.get('required') is True:
self.print_required()
return self.collect_choice(**kwargs)
# if input value is None then there is not need to continue
if input_value is None:
return input_value
# set valid values
valid_values: list = kwargs.get('valid_values', [])
# convert to int or recollect input
try:
input_value = int(input_value)
except ValueError:
self.print_invalid_index(f'0-{len(valid_values)}')
return self.collect_choice(**kwargs)
# ensure input value is valid
valid_index_values = [i for i, _ in enumerate(valid_values)]
# valid_index_values = list(range(0, len(valid_values) - 1))
if input_value not in valid_index_values:
self.print_invalid_index(f'0-{len(valid_values)}')
return self.collect_choice(**kwargs)
# using index value provided by user, set value to valid value
input_value = valid_values[input_value]
if input_value == self._no_selection_text:
# special case for when user select no selection
input_value = None
# print user feedback
if kwargs.get('feedback', True):
self.print_feedback(input_value)
return input_value
def collect_exit_code(self, **kwargs) -> int:
"""Collect exit codes.
Args:
option_text (str, kwargs): The text shown to the user.
Returns:
str: The users provided exit code.
"""
input_value = self._input_value('Code', kwargs.get('option_text'))
if input_value != '':
try:
input_value = int(input_value)
except ValueError:
self.print_invalid_exit_code()
return self.collect_exit_code(**kwargs)
if input_value not in [0, 1, 3]:
self.print_invalid_exit_code()
return self.collect_exit_code(**kwargs)
return input_value
def collect_exit_codes(self, **kwargs) -> list:
"""Collect exit codes.
Returns:
list: The users provided exit code.
"""
input_values = []
while True:
input_value = self.collect_exit_code(**kwargs)
if input_value == '':
break
input_values.append(input_value)
if not input_values:
# return None to ensure data doesn't get added to inputs
input_values = [0]
# print user feedback
self.print_feedback(input_values)
return input_values
def collect_key_value(self, **kwargs) -> dict:
"""Collect key value data.
Args:
option_text (str, kwargs): The text shown to the user.
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
dict: The users provided key value input.
"""
input_value = None
key = self._input_value('Key', option_text=kwargs.get('option_text'))
# ensure input value is provided when input is required
if key == '' and kwargs.get('required') is True:
self.print_required()
return self.collect_key_value(**kwargs)
if key != '':
value = self._input_value('Value')
input_value = {'key': key, 'value': value}
else:
input_value = kwargs.get('default')
# print user feedback
if kwargs.get('feedback', True):
self.print_feedback(input_value)
return input_value
def collect_key_value_array(self, **kwargs) -> list:
"""Collect key value array data
Args:
default (str, kwargs): The default value if no value provided by user.
option_text (str, kwargs): The text shown to the user.
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
list: The users provided list of key value inputs.
"""
input_values = []
required: bool = kwargs.get('required')
while True:
input_value = self.collect_key_value(
default=kwargs.get('default'),
feedback=False,
option_test=kwargs.get('option_text'),
required=required,
)
if not input_value:
break
input_values.append(input_value)
required = False
if not input_values:
# return None to ensure data doesn't get added to inputs
input_values = None
# print user feedback
self.print_feedback(input_values)
return input_values
def collect_multichoice(self, **kwargs) -> list:
"""Collect multichoice data
Args:
required (str, kwargs): If True the user cannot continue until they provide a value.
valid_values (str, kwargs): A list of valid values
Returns:
list: The users provided list of choice inputs.
"""
input_values = []
required = kwargs.get('required', False)
while True:
input_value = self.collect_choice(
feedback=False,
# option_text=kwargs.get('option_text'),
required=required,
valid_values=kwargs.get('valid_values'),
)
if not input_value:
break
input_values.append(input_value)
required = False
input_values = list(set(input_values))
if input_values:
# format multichoice value as pipe delimited string
input_values = '|'.join(input_values)
else:
# return None to ensure data doesn't get added to inputs
input_values = None
# print user feedback
self.print_feedback(input_values)
return input_values
def collect_string(self, **kwargs) -> str:
"""Collect string data
Args:
option_text (str, kwargs): The text shown to the user.
default (str, kwargs): The default value if no value provided by user.
Returns:
str: The user provided input.
"""
input_value = self._input_value('Input', kwargs.get('option_text', ''))
if not input_value:
input_value = kwargs.get('default')
if input_value is None and kwargs.get('required', False) is True:
self.print_required()
return self.collect_string(**kwargs)
# print user feedback
if kwargs.get('feedback', True):
self.print_feedback(input_value)
# APP-622 - handle null/None values
if input_value == 'null':
input_value = None
elif input_value in ['"null"', "'null'"]:
input_value = 'null'
return input_value
def collect_string_array(self, **kwargs) -> list:
"""Collect string data
Args:
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
str: The user provided input.
"""
input_values = []
required = kwargs.get('required', False)
while True:
input_value = self.collect_string(feedback=False, required=required)
if not input_value:
break
input_values.append(input_value)
required = False
if not input_values:
# return None to ensure data doesn't get added to inputs
input_values = None
# print user feedback
self.print_feedback(input_values)
return input_values
def collect_tcentity(self, **kwargs) -> dict:
"""Collect tcentity data
Args:
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
str: The user provided input.
"""
input_value = None
id_ = self._input_value('ID')
if id_:
value = self._input_value('Value')
type_ = self._input_value('Type')
input_value = {'id': id_, 'value': value, 'type': type_}
if input_value is None and kwargs.get('required', False) is True:
self.print_required()
return self.collect_tcentity(**kwargs)
# print user feedback
if kwargs.get('feedback', True):
self.print_feedback(input_value)
return input_value
def collect_tcentity_array(self, **kwargs) -> list:
"""Collect tcentity array data
Args:
required (str, kwargs): If True the user cannot continue until they provide a value.
Returns:
list: The user provided inputs.
"""
input_values = []
required = kwargs.get('required', False)
while True:
input_value = self.collect_tcentity(feedback=False, required=required)
if not input_value:
break
input_values.append(input_value)
required = False
if not input_values:
# return None to ensure data doesn't get added to inputs
input_values = None
# print user feedback
self.print_feedback(input_values)
return input_values
@property
def inputs(self) -> dict:
"""Return inputs dict."""
return self._inputs
def present(self) -> None:
"""Present interactive menu to build profile."""
def params_data() -> tuple:
# handle non-layout and layout based App appropriately
if self.profile.lj.has_layout:
# using inputs from layout.json since they are required to be in order
# (display field can only use inputs previously defined)
for name in self.profile.lj.params_dict:
# get data from install.json based on name
data = self.profile.ij.params_dict.get(name)
yield name, data
# hidden fields will not be in layout.json so they need to be include manually
for name, data in self.profile.ij.filter_params_dict(hidden=True).items():
yield name, data
else:
for name, data in self.profile.ij.params_dict.items():
yield name, data
inputs = {}
for name, data in params_data():
if data.get('serviceConfig'):
# inputs that are serviceConfig are not applicable for profiles
continue
if not data.get('hidden'):
# each input will be checked for permutations if the App has layout and not hidden
if not self.profile.permutations.validate_input_variable(name, inputs):
continue
# present the input
value: str = self.input_type_map.get(data.get('type').lower())(name, data)
# update inputs
inputs[name] = value
self.present_exit_code()
def present_boolean(self, name: str, data) -> bool:
"""Build a question for boolean input.
Args:
name: The name of the input field.
data: The install.json input param object.
Returns:
bool: The user provided input.
"""
# print header information
self.print_header(data)
default = self._default(data)
valid_values = ['true', 'false']
option_default = 'false'
option_text = ''
options = []
for v in valid_values:
if v.lower() == default.lower():
option_default = v
v = f'[{v}]'
options.append(v)
option_text = f'''({'/'.join(options)})'''
value = self.collect_boolean(default=option_default, option_text=option_text)
# add input
self.add_input(name, data, value)
return value
def present_editchoice(self, name: str, data: dict) -> str:
"""Build a question for editchoice input.
Args:
name: The name of the input field.
data: The install.json input param object.
Returns:
str: The user provided input.
"""
# print header information
self.print_header(data)
default = self._default(data)
option_index = 0
valid_values = self._expand_valid_values(data.get('validValues', []))
if data.get('required', False) is False:
# add option to invalidate defaults
valid_values.insert(0, self._no_selection_text)
# default value needs to be converted to index
if default:
try:
option_index = valid_values.index(default)
except ValueError:
# if "magic" variable (e.g., ${GROUP_TYPES}) was not expanded then use index 0.
# there is no way to tell if the default value is be part of the expansion.
if any(re.match(r'^\${.*}$', v) for v in valid_values):
option_index = 0
else:
print(
f'''{c.Fore.RED}Invalid value of ({default}) for {data.get('name')}, '''
'check that default value and validValues match in install.json.'
)
sys.exit()
option_text = f'[{option_index}]'
# build options list to display to the user in two columns
options = []
for i, v in enumerate(valid_values):
options.append(f'{i}. {v}')
# display options list into two columns
left, right = self._split_list(options)
for i, _ in enumerate(left):
ld = left[i]
try:
rd = right[i]
except IndexError:
rd = ''
print(f'{ld:40} {rd:40}')
# collect user input
value = self.collect_editchoice(
default=option_index, option_text=option_text, valid_values=valid_values
)
# add input
self.add_input(name, data, value)
return value
def present_choice(self, name: str, data: dict) -> str:
"""Build a question for choice input.
Args:
name: The name of the input field.
data: The install.json input param object.
Returns:
str: The user provided input.
"""
# print header information
self.print_header(data)
default = self._default(data)
option_index = 0
valid_values = self._expand_valid_values(data.get('validValues', []))
if data.get('required', False) is False:
# add option to invalidate defaults
valid_values.insert(0, self._no_selection_text)
# default value needs to be converted to index
if default:
try:
option_index = valid_values.index(default)
except ValueError:
# if "magic" variable (e.g., ${GROUP_TYPES}) was not expanded then use index 0.
# there is no way to tell if the default value is be part of the expansion.
if any(re.match(r'^\${.*}$', v) for v in valid_values):
option_index = 0
else:
print(
f'''{c.Fore.RED}Invalid value of ({default}) for {data.get('name')}, '''
'check that default value and validValues match in install.json.'
)
sys.exit()
option_text = f'[{option_index}]'
# build options list to display to the user in two columns
options = []
for i, v in enumerate(valid_values):
options.append(f'{i}. {v}')
# display options list into two columns
left, right = self._split_list(options)
for i, _ in enumerate(left):
ld = left[i]
try:
rd = right[i]
except IndexError:
rd = ''
print(f'{ld:40} {rd:40}')
# collect user input
value = self.collect_choice(
default=option_index, option_text=option_text, valid_values=valid_values
)
# add input
self.add_input(name, data, value)
return value
def present_data_types(self, data_types: list, required: Optional[bool] = False) -> str:
"""Present data types options.
Args:
data_types: A list of optional data types.
required: If False the no selection option will be added.
Returns:
str: The user provided input.
"""
if 'Any' in data_types:
data_types = [
'Binary',
'BinaryArray',
'KeyValue',
'KeyValueArray',
'String',
'StringArray',
'TCEntity',
'TCEntityArray',
]
# add option to not select an index value if input is not required
if required is False:
data_types.insert(0, self._no_selection_text)
# build options list to display to the user in two columns
options = []
for i, v in enumerate(data_types):
options.append(f'{i}. {v}')
left, right = self._split_list(options)
for i, _ in enumerate(left):
ld = left[i]
try:
rd = right[i]
except IndexError:
rd = ''
print(f'{ld:40} {rd:40}')
data_type = None
while not data_type:
index = self._input_value('Type', '[0]') or 0
try:
data_type = data_types[int(index)]
except (IndexError, TypeError, ValueError):
print(
f'{c.Fore.RED}Invalid index of {index} provided. '
f'Please provide a integer between 0-{len(data_types) - 1}'
)
sys.exit(1)
return data_type
def present_exit_code(self) -> None:
"""Provide user input for exit code."""
self.print_header({'label': 'Exit Codes'})
self.exit_codes = list(set(self.collect_exit_codes(default=[0], option_text='[0]')))
@staticmethod
def present_help() -> None:
"""Provide user help information."""
print(
f'{c.Fore.CYAN}For String type inputs: \n'
' * A value of null will be treated as an actual null value.\n'
' * Using "null" or \'null\' to insert a string of null.\n'
)
print(f'{c.Fore.CYAN}When done entering array data press enter to continue.')
def present_key_value_list(self, name: str, data: dict) -> None:
"""Build a question for key value list input.
Args:
name: The name of the input field.
data: The install.json input param object.
Returns:
str: The user provided input.
"""
# print header information
self.print_header(data)
# the default value from install.json or user_data
default = self._default(data) # array of default values
# collect input
input_data = self.collect_key_value_array(default=default, required=data.get('required'))
# create variable
variable = self.add_staging_data(name, 'KeyValueArray', input_data)
# add input to args
self.add_input(name, data, variable)
# user feedback
feedback_data = input_data
if input_data is not None:
feedback_data = json.dumps(feedback_data)
# # update default
# if default is None:
# self.add_user_default(name, input_data)
return variable
def present_multichoice(self, name: str, data: dict) -> list:
"""Build a question for multichoice input.
Args:
name: The name of the input field.
data: The install.json input param object.
Returns:
list: The user provided inputs.
"""
# print header information
self.print_header(data)
default = self._default(data) # array of default values
option_indexes = [0]
valid_values = self._expand_valid_values(data.get('validValues', []))
if data.get('required', False) is False:
# add option to invalidate defaults
valid_values.insert(0, self._no_selection_text)
# default values will be return as an array (e.g., one|two -> ['one'. 'two']).
# using the valid values array we can look up these values to show as default in input.
if default:
option_indexes = []
for d in default:
try:
option_indexes.append(valid_values.index(d))
except ValueError:
# if "magic" variable (e.g., ${GROUP_TYPES}) was not expanded then skip value.
# there is no way to tell if the default value is be part of the expansion.
if any(re.match(r'^\${.*}$', v) for v in valid_values):
continue
print(
f'''{c.Fore.RED}Invalid value of ({d}) for {data.get('name')}, check '''
'that default value(s) and validValues match in install.json.'
)
sys.exit()
option_text = f''' [{','.join([str(v) for v in option_indexes])}]'''
# build options list to display to the user in two columns
options = []
for i, v in enumerate(valid_values):
options.append(f'{i}. {v}')
# display options list into two columns
left, right = self._split_list(options)
for i, _ in enumerate(left):
ld = left[i]
try:
rd = right[i]
except IndexError:
rd = ''
print(f'{ld:40} {rd:40}')
# collect user input
values = self.collect_multichoice(
default=option_indexes,
option_text=option_text,
required=data.get('required'),
valid_values=valid_values,
)
# add input
self.add_input(name, data, values)
return values
def present_string(self, name: str, data: dict) -> str:
"""Build a question for string input.
Args:
name: The name of the input field.
data: The install.json input param object.
Returns:
str: The user provided input.
"""
# display header information
self.print_header(data)
# use playbook data types to determine what input to provide (default to String)
data_type = data.get('playbookDataType', ['String'])[0]
if len(data.get('playbookDataType', [])) > 1 or data_type.lower() == 'any':
data_type = self.present_data_types(
data.get('playbookDataType'), required=data.get('required', False)
)
# no need to proceed if there is not valid data type selected.
if data_type == self._no_selection_text:
self.add_input(name, data, None)
self.print_feedback('null')
return None
# the default value from install.json or user_data
default = self._default(data)
option_text = ''
if default is not None:
option_text = f'[{default}]'
# use data_type to properly format collection input
input_value = self.collect_type_map[data_type](
default=default, option_text=option_text, required=data.get('required', False)
)
# add staging data and get variable name
variable = self.add_staging_data(name, data_type, input_value)
# add input
self.add_input(name, data, variable)
# # update default
# if default is None:
# if len(data.get('playbookDataType', [])) > 1 or data_type.lower() == 'any':
# # for inputs that take multiple types we need to store user default with the type
# self.add_user_default(name, input_value, data_type)
# else:
# self.add_user_default(name, input_value)
return variable
@staticmethod
def print_feedback(feedback_value: Union[list, str]) -> None:
"""Print the value used."""
print(f'Using value: {c.Fore.GREEN}{feedback_value}\n')
@staticmethod
def print_header(data: dict) -> None:
"""Enrich the header with metadata.
Args:
data: The install.json input param object.
"""
def _print_metadata(title: str, value: str) -> None:
"""Print the title and value"""
print(f'{c.Fore.CYAN}{title!s:<22}: {c.Fore.RESET}{c.Style.BRIGHT}{value}')
label = data.get('label', 'NO LABEL')
print(f'\n{c.Fore.GREEN}{label}')
# type
_print_metadata('Type', data.get('type'))
# default
default = data.get('default')
if default:
_print_metadata('Default', default)
# note
note = data.get('note', '')[:200]
if note:
_print_metadata('Note', note)
# required
_print_metadata('Required', str(data.get('required', False)).lower())
# hidden
if data.get('hidden'):
_print_metadata('Hidden', 'true')
# Input Types
pbt = ','.join(data.get('playbookDataType', []))
if pbt:
_print_metadata('Playbook Data Types', pbt)
vv = ','.join(data.get('validValues', []))
if vv:
_print_metadata('Valid Values', vv)
print('-' * 50)
@staticmethod
def print_invalid_bool() -> None:
"""Print a invalid bool error."""
print(f'{c.Fore.RED}The provided value is not a boolean value (true/false).\n')
@staticmethod
def print_invalid_exit_code() -> None:
"""Print a invalid exit code error."""
print(f'{c.Fore.RED}The provided value is not a valid exit code (0, 1).\n')
@staticmethod
def print_invalid_index(range_: str) -> None:
"""Print a invalid index error.
Args:
range_: The range of possible value for choice or multichoice selections.
"""
print(
f'{c.Fore.RED}The provided index value is not '
f'valid, please select a valid value between {range_}.\n'
)
@staticmethod
def print_required() -> None:
"""Print a required error."""
print(f'{c.Fore.RED}This input is required, please enter an appropriate value.\n')
@property
def staging_data(self) -> None:
"""Return staging data dict."""
return self._staging_data
# @property
# def user_defaults(self):
# """Return user defaults"""
# if self._user_defaults is None:
# user_defaults = {}
# if os.path.isfile(self.user_defaults_filename):
# with open(self.user_defaults_filename, 'r') as fh:
# user_defaults = json.load(fh)
# # use feature defaults
# self._user_defaults = user_defaults.get(self.profile.feature)
# if self._user_defaults is None:
# # use base defaults if not feature defaults found
# self._user_defaults = user_defaults.get('base', {})
# return self._user_defaults
```
#### File: group/group_types/adversary.py
```python
from ..group import Group
class Adversary(Group):
"""Unique API calls for Adversary API Endpoints"""
def __init__(self, ti: 'ThreatIntelligenc', **kwargs):
"""Initialize Class properties"""
super().__init__(
ti, sub_type='Adversary', api_entity='adversary', api_branch='adversaries', **kwargs
)
def add_asset(self, asset_type, asset_value):
"""Add an asset to the Adversary
Args:
asset_type: (str) Either phone, handle, or urL
asset_value: (str) the value for the asset
Returns:
requests.Response: The response from the API call.
"""
if not self.can_update():
self._handle_error(910, [self.type])
asset_methods = {
'handle': self.tc_requests.add_adversary_handle_asset,
'phone': self.tc_requests.add_adversary_phone_asset,
'url': self.tc_requests.add_adversary_url_asset,
}
# handle invalid input
if asset_methods.get(asset_type.lower()) is None:
self._handle_error(
925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]
)
return asset_methods.get(asset_type.lower())(self.unique_id, asset_value)
def add_handle_asset(self, value):
"""Add a Handle asset to the adversary.
Args:
value: The value of the asset
Returns:
requests.Response: The response from the API call.
"""
return self.add_asset('HANDLE', value)
def add_phone_asset(self, value):
"""Add a phone asset to the adversary.
Args:
value: The value of the asset
Returns:
requests.Response: The response from the API call.
"""
return self.add_asset('PHONE', value)
def add_url_asset(self, value):
"""Add a URL asset to the adversary.
Args:
value: The value of the asset
Returns:
requests.Response: The response from the API call.
"""
return self.add_asset('URL', value)
def asset(self, asset_id, asset_type, action='GET'):
"""Get specific Adversary asset type from API
Args:
asset_id: (str) The ID of the asset.
asset_type: (str) Either phone, handle, or url.
action: (str): The HTTP method (e.g., DELETE or GET)
Returns:
requests.Response: The response from the API call.
"""
if not self.can_update():
self._handle_error(910, [self.type])
asset_methods = {
'handle': self.tc_requests.adversary_handle_asset,
'phone': self.tc_requests.adversary_phone_asset,
'url': self.tc_requests.adversary_url_asset,
}
# handle invalid input
if asset_methods.get(asset_type.lower()) is None:
self._handle_error(
925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]
)
return asset_methods.get(asset_type.lower())(self.unique_id, asset_id, action=action)
def assets(self, asset_type=None):
"""Retrieve all of the assets of a given asset_type
Args:
asset_type: (str) Either None, PHONE, HANDLE, or URL
Returns:
requests.Response: The response from the API call.
"""
if not self.can_update():
self._handle_error(910, [self.type])
asset_methods = {
'handle': self.tc_requests.adversary_handle_assets,
'phone': self.tc_requests.adversary_phone_assets,
'url': self.tc_requests.adversary_url_assets,
}
if asset_type is None:
return self.tc_requests.adversary_assets(self.unique_id)
# handle invalid input
if asset_methods.get(asset_type.lower()) is None:
self._handle_error(
925, ['asset_type', 'assets', 'asset_type', 'asset_type', asset_type]
)
return asset_methods.get(asset_type.lower())(self.unique_id)
def delete_asset(self, asset_id, asset_type):
"""Delete the asset with the provided asset_id.
Args:
asset_id: The id of the asset.
asset_type: The asset type.
Returns:
requests.Response: The response from the API call.
"""
return self.asset(asset_id, asset_type=asset_type, action='DELETE')
def delete_handle_asset(self, asset_id):
"""Delete the handle asset with the passed in id
Args:
asset_id: The id of the asset to be deleted
Returns:
requests.Response: The response from the API call.
"""
return self.delete_asset(asset_id, 'HANDLE')
def delete_phone_asset(self, asset_id):
"""Delete the phone asset with the passed in id
Args:
asset_id: The id of the asset to be deleted
Returns:
requests.Response: The response from the API call.
"""
return self.delete_asset(asset_id, 'PHONE')
def delete_url_asset(self, asset_id):
"""Delete the url asset with the passed in id
Args:
asset_id: The id of the asset to be deleted
Returns:
requests.Response: The response from the API call.
"""
return self.delete_asset(asset_id, 'URL')
def get_asset(self, asset_id, asset_type):
"""Get the asset with the provided asset_id & asset_type.
Args:
asset_id: (str) The ID of the asset.
asset_type: (str) Either None, PHONE, HANDLE, or URL
Returns:
requests.Response: The response from the API call.
"""
return self.asset(asset_id, asset_type=asset_type)
def get_handle_asset(self, asset_id):
"""Get the handle asset with the passed in id
Args:
asset_id: The id of the asset.
Returns:
requests.Response: The response from the API call.
"""
return self.get_asset(asset_id, 'HANDLE')
def get_phone_asset(self, asset_id):
"""Get the phone asset with the passed in id
Args:
asset_id: The id of the asset.
Returns:
requests.Response: The response from the API call.
"""
return self.get_asset(asset_id, 'PHONE')
def get_url_asset(self, asset_id):
"""Get the url asset with the passed in id
Args:
asset_id: The id of the asset.
Returns:
requests.Response: The response from the API call.
"""
return self.get_asset(asset_id, 'URL')
# def handle_asset(self, asset_id, action='GET'):
# """Get the handle asset with the passed in id.
# Args:
# asset_id: The id of the asset.
# action: (str): The HTTP method (e.g., DELETE or GET)
# Returns:
# requests.Response: The response from the API call.
# """
# return self.asset(asset_id, 'HANDLE', action=action)
def handle_assets(self):
"""Return all of the handle assets"""
return self.assets(asset_type='HANDLE')
# def phone_asset(self, asset_id, action='GET'):
# """Get the phone asset with the passed in id.
# Args:
# asset_id: The id of the asset.
# action: (str): The HTTP method (e.g., DELETE or GET)
# Returns:
# requests.Response: The response from the API call.
# """
# return self.asset(asset_id, 'PHONE', action=action)
def phone_assets(self):
"""Return all of the phone assets"""
return self.assets(asset_type='PHONE')
# def url_asset(self, asset_id, action='GET'):
# """Get the url asset with the passed in id.
# Args:
# asset_id: The id of the asset.
# action: (str): The HTTP method (e.g., DELETE or GET)
# Returns:
# requests.Response: The response from the API call.
# """
# return self.asset(asset_id, 'URL', action=action)
def url_assets(self):
"""Return all of the url assets"""
return self.assets(asset_type='URL')
```
#### File: group/group_types/tactic.py
```python
from ..group import Group
class Tactic(Group):
"""Unique API calls for Tactic API Endpoints
Args:
name (str, kwargs): [Required for Create] The name for this Group.
owner (str, kwargs): The name for this Group. Default to default Org when not provided
"""
def __init__(self, ti: 'ThreatIntelligence', **kwargs):
"""Initialize Class properties."""
super().__init__(ti, sub_type='Tactic', api_entity='tactic', api_branch='tactics', **kwargs)
```
#### File: group/group_types/tool.py
```python
from ..group import Group
class Tool(Group):
"""Unique API calls for Tool API Endpoints
Args:
name (str, kwargs): [Required for Create] The name for this Group.
owner (str, kwargs): The name for this Group. Default to default Org when not provided
"""
def __init__(self, ti: 'ThreatIntelligence', **kwargs):
"""Initialize Class properties."""
super().__init__(ti, sub_type='Tool', api_entity='tool', api_branch='tools', **kwargs)
```
#### File: threat_intelligence/mappings/tag.py
```python
from tcex.utils import Utils
from ..tcex_ti_tc_request import TiTcRequest
class Tag:
"""Unique API calls for Tag API Endpoints
Args:
group_type (str): The ThreatConnect define Group type.
name (str): The name for this Group.
xid (str, kwargs): The external id for this Group.
"""
def __init__(self, ti: 'ThreatIntelligenc', name):
"""Initialize Class Properties."""
self._name = name
# properties
self._api_entity = 'tag'
self._api_sub_type = None
self._api_type = None
self._tc_requests = TiTcRequest(ti.session)
self._type = 'tags'
self._utils = Utils()
self.ti = ti
@staticmethod
def is_tag():
"""Return true is instance is a tag object."""
return True
def groups(self, group_type=None, filters=None, owner=None, params=None):
"""Get all groups from a tag.
Args:
filters:
params:
group_type:
"""
if group_type and group_type.lower() == 'task':
group = self.ti.task()
else:
group = self.ti.group(group_type)
return self.tc_requests.groups_from_tag(
group, self.name, filters=filters, owner=owner, params=params
)
def indicators(self, indicator_type=None, filters=None, owner=None, params=None):
"""Get all indicators from a tag.
Args:
params:
filters:
indicator_type:
"""
indicator = self.ti.indicator(indicator_type)
yield from self.tc_requests.indicators_from_tag(
indicator, self.name, filters=filters, owner=owner, params=params
)
def victims(self, filters=None, owner=None, params=None):
"""Get all victims from a tag."""
victim = self.ti.victim()
yield from self.tc_requests.victims_from_tag(
victim, self.name, filters=filters, owner=owner, params=params
)
@property
def name(self):
"""Get the tag name."""
return self._name
@property
def tc_requests(self):
"""Get the tc request object"""
return self._tc_requests
@name.setter
def name(self, name):
"""Set the tag name
Args:
name:
Returns:
"""
self._name = name
@tc_requests.setter
def tc_requests(self, tc_requests):
"""Set the tc request object.
Args:
tc_requests:
Returns:
"""
self._tc_requests = tc_requests
```
#### File: tests/case_management/test_artifact_interface.py
```python
import os
import time
from random import randint
# third-party
from dateutil.parser import parse
# first-party
from tcex.case_management.tql import TQL
from .cm_helpers import CMHelper, TestCaseManagement
class TestArtifact(TestCaseManagement):
"""Test TcEx CM Artifact Interface."""
def setup_method(self):
"""Configure setup before all tests."""
self.cm_helper = CMHelper('artifact')
self.cm = self.cm_helper.cm
self.tcex = self.cm_helper.tcex
def teardown_method(self):
"""Configure teardown before all tests."""
if os.getenv('TEARDOWN_METHOD') is None:
self.cm_helper.cleanup()
def test_artifact_api_options(self):
"""Test filter keywords."""
super().obj_api_options()
def test_artifact_code_gen(self):
"""Generate code and docstring from Options methods.
This is not truly a test case, but best place to store it for now.
"""
doc_string, filter_map, filter_class = super().obj_code_gen()
assert doc_string
assert filter_map
print(filter_map)
assert filter_class
def test_artifact_filter_keywords(self):
"""Test filter keywords."""
super().obj_filter_keywords()
def test_artifact_object_properties(self):
"""Test properties."""
super().obj_properties()
def test_artifact_object_properties_extra(self):
"""Test properties."""
super().obj_properties_extra()
def test_artifact_create_by_case_id(self):
"""Test Artifact Creation"""
# create case
case = self.cm_helper.create_case()
# artifact data
artifact_data = {
'case_id': case.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
# get artifact from API to use in asserts
artifact = self.cm.artifact(id=artifact.id)
artifact.get()
# run assertions on returned data
assert artifact.required_properties # coverage: required_properties
assert artifact.intel_type == artifact_data.get('intel_type')
assert artifact.summary == artifact_data.get('summary')
assert artifact.type == artifact_data.get('type')
assert artifact.field_name is None
def test_artifact_create_by_case_xid(self, request):
"""Test Artifact Creation"""
# create case
case_xid = f'{request.node.name}-{time.time()}'
self.cm_helper.create_case(xid=case_xid)
# artifact data
artifact_data = {
'case_xid': case_xid,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
# get single artifact by id
artifact = self.cm.artifact(id=artifact.id)
artifact.get()
# run assertions on returned data
assert artifact.intel_type == artifact_data.get('intel_type')
assert artifact.summary == artifact_data.get('summary')
assert artifact.type == artifact_data.get('type')
def test_artifact_delete_by_id(self):
"""Test Artifact Deletion"""
# create case
case = self.cm_helper.create_case()
# artifact data
artifact_data = {
'case_id': case.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
# get single artifact by id
artifact = self.cm.artifact(id=artifact.id)
artifact.get()
# delete the artifact
artifact.delete()
# test that artifact is deleted
try:
artifact.get()
assert False
except RuntimeError:
pass
def test_artifact_get_many(self):
"""Test Artifact Get Many"""
# create case
case = self.cm_helper.create_case()
# artifact data
artifact_data = {
'case_id': case.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
# iterate over all artifact looking for needle
for a in self.cm.artifacts():
if a.summary == artifact_data.get('summary'):
assert artifact.intel_type == artifact_data.get('intel_type')
assert artifact.type == artifact_data.get('type')
break
else:
assert False
def test_artifact_get_single_by_id(self):
"""Test Artifact Get by Id"""
# create case
case = self.cm_helper.create_case()
# artifact data
artifact_data = {
'case_id': case.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
# get single artifact by id
artifact = self.cm.artifact(id=artifact.id)
artifact.get(params={'result_limit': 10})
# run assertions on returned data
assert str(artifact) # coverage: __str__ method
assert artifact.intel_type == artifact_data.get('intel_type')
assert artifact.summary == artifact_data.get('summary')
assert artifact.type == artifact_data.get('type')
def test_artifact_task_get_single_by_id_properties(self, request):
"""Test Artifact get single attached to task by id"""
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
'workflow_phase': 0,
'workflow_step': 1,
'xid': f'{request.node.name}-{time.time()}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
file_data = (
'<KEY>IGZ<KEY>
)
# task data
artifact_data = {
'task_id': task.id,
'task_xid': task.xid,
'source': 'artifact source',
'file_data': f'{file_data}',
'summary': 'pytest test file artifact',
'type': 'Certificate File',
'note_text': 'artifact note text',
}
# create task
artifact = self.cm.artifact()
# add properties
artifact.task_id = artifact_data.get('task_id')
artifact.task_xid = artifact_data.get('task_xid')
artifact.file_data = artifact_data.get('file_data')
artifact.source = artifact_data.get('source')
artifact.summary = artifact_data.get('summary')
artifact.type = artifact_data.get('type')
# add note
note_data = {'text': artifact_data.get('note_text')}
artifact.add_note(**note_data)
artifact.submit()
# get task from API to use in asserts
artifact = self.cm.artifact(id=artifact.id)
artifact.get(all_available_fields=True)
# run assertions on returned data
assert artifact.case_id == case.id
assert artifact.case_xid == case.xid
assert artifact.file_data == file_data
assert artifact.source == artifact_data.get('source')
assert artifact.summary == artifact_data.get('summary')
assert artifact.task.name == task.name
assert artifact.task_id == task.id
assert artifact.task_xid == task.xid
assert artifact.intel_type is None
assert artifact.type == artifact_data.get('type')
for note in artifact.notes:
if note.text == artifact_data.get('note_text'):
break
assert False, 'Note not found'
# assert read-only data
assert artifact.analytics_priority_level is None
assert artifact.analytics_score is None
assert artifact.analytics_type is None
assert artifact.artifact_type.name == artifact_data.get('type')
try:
parse(artifact.date_added)
except ValueError:
assert False, 'Invalid date added'
assert artifact.parent_case.id == case.id
# test as_entity
assert artifact.as_entity.get('value') == artifact_data.get('summary')
def test_artifact_case_get_single_by_id_properties(self):
"""Test Artifact get single attached to case by id"""
# create case
case = self.cm_helper.create_case()
file_data = (
'<KEY>
)
# task data
artifact_data = {
'case_id': case.id,
'case_xid': case.xid,
'source': 'artifact source',
'file_data': f'{file_data}',
'summary': 'pytest test file artifact',
'type': 'Certificate File',
'note_text': 'artifact note text',
}
# create task
artifact = self.cm.artifact()
# add properties
artifact.case_id = artifact_data.get('case_id')
artifact.case_xid = artifact_data.get('case_xid')
artifact.file_data = artifact_data.get('file_data')
artifact.source = artifact_data.get('source')
artifact.summary = artifact_data.get('summary')
artifact.type = artifact_data.get('type')
# add note
notes = {'data': [{'text': artifact_data.get('note_text')}]}
artifact.notes = notes
artifact.submit()
# get task from API to use in asserts
artifact = self.cm.artifact(id=artifact.id)
artifact.get(all_available_fields=True)
# run assertions on returned data
assert artifact.case_id == artifact_data.get('case_id')
assert artifact.case_xid == artifact_data.get('case_xid')
assert artifact.file_data == file_data
assert artifact.source == artifact_data.get('source')
assert artifact.summary == artifact_data.get('summary')
assert artifact.task is None
assert artifact.task_id is None
assert artifact.task_xid is None
assert artifact.intel_type is None
assert artifact.type == artifact_data.get('type')
for note in artifact.notes:
if note.text == artifact_data.get('note_text'):
break
assert False, 'Note not found'
# assert read-only data
assert artifact.analytics_priority_level is None
assert artifact.analytics_score is None
assert artifact.analytics_type is None
assert artifact.artifact_type.name == artifact_data.get('type')
try:
parse(artifact.date_added)
except ValueError:
assert False, 'Invalid date added'
assert artifact.parent_case.id == case.id
# test as_entity
assert artifact.as_entity.get('value') == artifact_data.get('summary')
def test_artifact_get_by_tql_filter_case_id(self):
"""Test Artifact Get by TQL"""
# create case
case = self.cm_helper.create_case()
# artifact data
artifact_data = {
'case_id': case.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
# retrieve artifacts using TQL
artifacts = self.cm.artifacts()
artifacts.filter.case_id(TQL.Operator.EQ, case.id)
for artifact in artifacts:
assert artifact.summary == artifact_data.get('summary')
assert artifact.type == artifact_data.get('type')
break
else:
assert False, 'No artifact returned for TQL'
def test_artifact_get_by_note_id_filter(self, request):
"""Test Artifact Get by TQL"""
# create case
case = self.cm_helper.create_case()
# artifact data
artifact_data = {
'case_id': case.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
note_data = {
'text': f'note for artifact in {request.node.name}',
'artifact_id': artifact.id,
}
# add a note to a artifact
note = self.cm.note(**note_data)
note.submit()
artifacts = self.cm.artifacts()
artifacts.filter.note_id(TQL.Operator.EQ, note.id)
assert len(artifacts) == 1
for artifact in artifacts:
assert artifact.id == note_data.get('artifact_id')
assert artifact.summary == artifact_data.get('summary')
def test_artifact_get_by_has_case_filter_id(self):
"""Test Artifact Get by TQL"""
# create case
case = self.cm_helper.create_case()
# artifact data
artifact_data_1 = {
'case_id': case.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
artifact_data_2 = {
'case_id': case.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# create artifact
artifact_1 = self.cm.artifact(**artifact_data_1)
artifact_2 = self.cm.artifact(**artifact_data_2)
artifact_1.submit()
artifact_2.submit()
artifacts = self.cm.artifacts()
artifacts.filter.has_case.id(TQL.Operator.EQ, case.id)
assert len(artifacts) == 2
ids = [artifact_1.id, artifact_2.id]
summaries = [artifact_1.summary, artifact_2.summary]
for artifact in artifacts:
assert artifact.id in ids
assert artifact.summary in summaries
ids.remove(artifact.id)
summaries.remove(artifact.summary)
def test_artifact_get_by_has_note_filter_id(self, request):
"""Test Artifact Get by TQL"""
# create case
case = self.cm_helper.create_case()
# artifact data
artifact_data = {
'case_id': case.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
note_data = {
'text': f'note for artifact in {request.node.name}',
'artifact_id': artifact.id,
}
# add a note to a artifact
note = self.cm.note(**note_data)
note.submit()
artifacts = self.cm.artifacts()
artifacts.filter.has_note.id(TQL.Operator.EQ, note.id)
assert len(artifacts) == 1
for artifact in artifacts:
assert artifact.id == note_data.get('artifact_id')
assert artifact.summary == artifact_data.get('summary')
def test_artifact_get_by_has_task_filter_id(self, request):
"""Test Artifact Get by TQL"""
# create case
case = self.cm_helper.create_case()
# artifact data
artifact_data = {
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# task data
task_data = {
'case_id': case.id,
'description': f'a description from {request.node.name}',
'name': f'name-{request.node.name}',
}
task = self.cm.task(**task_data)
task.add_artifact(**artifact_data)
task.submit()
task.get(all_available_fields=True)
artifact_id = None
for artifact in task.artifacts:
artifact_id = artifact.id
artifacts = self.cm.artifacts()
artifacts.filter.has_task.id(TQL.Operator.EQ, task.id)
assert len(artifacts) == 1
for artifact in artifacts:
assert artifact.id == artifact_id
assert artifact.summary == artifact_data.get('summary')
# TODO: checking with MJ on what this should be
def test_artifact_get_by_tql_filter_comment_id(self):
"""Test Artifact Get by TQL"""
def test_artifact_get_by_tql_filter_id(self):
"""Test Artifact Get by TQL"""
# create case
case = self.cm_helper.create_case()
# artifact data
artifact_data = {
'case_id': case.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
# retrieve artifacts using TQL
artifacts = self.cm.artifacts()
artifacts.filter.id(TQL.Operator.EQ, artifact.id)
for artifact in artifacts:
assert artifact.summary == artifact_data.get('summary')
assert artifact.type == artifact_data.get('type')
break
else:
assert False, 'No artifact returned for TQL'
# TODO: this needs some consideration
def test_artifact_get_by_tql_filter_hascase(self):
"""Test Artifact Get by TQL"""
def test_artifact_get_by_tql_filter_source(self):
"""Test Artifact Get by TQL"""
# create case
case = self.cm_helper.create_case()
# artifact data
artifact_data = {
'case_id': case.id,
'intel_type': 'indicator-ASN',
'source': 'pytest',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
# retrieve artifacts using TQL
artifacts = self.cm.artifacts()
artifacts.filter.case_id(TQL.Operator.EQ, case.id)
artifacts.filter.source(TQL.Operator.EQ, artifact_data.get('source'))
for artifact in artifacts:
assert artifact.summary == artifact_data.get('summary')
assert artifact.type == artifact_data.get('type')
break
else:
assert False, 'No artifact returned for TQL'
def test_artifact_get_by_tql_filter_summary(self):
"""Test Artifact Get by TQL"""
# create case
case = self.cm_helper.create_case()
# artifact data
artifact_data = {
'case_id': case.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
# retrieve artifacts using TQL
artifacts = self.cm.artifacts()
artifacts.filter.case_id(TQL.Operator.EQ, case.id)
artifacts.filter.summary(TQL.Operator.EQ, artifact_data.get('summary'))
for artifact in artifacts:
assert artifact.summary == artifact_data.get('summary')
assert artifact.type == artifact_data.get('type')
break
else:
assert False, 'No artifact returned for TQL'
# TODO: MJ working on this for AD-4631
def test_artifact_get_by_tql_filter_task_id(self, request):
"""Test Artifact Get by TQL"""
# create case
case = self.cm_helper.create_case()
# task data
task_data = {
'case_id': case.id,
'name': f'name-{request.node.name}',
}
# create task
task = self.cm.task(**task_data)
task.submit()
# artifact data
artifact_data = {
# 'case_id': case.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'task_id': task.id,
'type': 'ASN',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
# retrieve artifacts using TQL
artifacts = self.cm.artifacts()
artifacts.filter.case_id(TQL.Operator.EQ, case.id)
artifacts.filter.task_id(TQL.Operator.EQ, task.id)
for artifact in artifacts:
assert artifact.summary == artifact_data.get('summary')
assert artifact.type == artifact_data.get('type')
break
else:
assert False, 'No artifact returned for TQL'
def test_artifact_get_by_tql_filter_type_name(self):
"""Test Artifact Get by TQL"""
# create case
case = self.cm_helper.create_case()
# artifact data
artifact_data = {
'case_id': case.id,
'intel_type': 'indicator-ASN',
'summary': f'asn{randint(100, 999)}',
'type': 'ASN',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
# retrieve artifacts using TQL
artifacts = self.cm.artifacts()
artifacts.filter.case_id(TQL.Operator.EQ, case.id)
artifacts.filter.type_name(TQL.Operator.EQ, artifact_data.get('type'))
assert str(artifacts) # coverage: __str__ method
for artifact in artifacts:
assert artifact.summary == artifact_data.get('summary')
assert artifact.type == artifact_data.get('type')
break
else:
assert False, 'No artifact returned for TQL'
def test_artifact_update_properties(self):
"""Test updating artifacts properties"""
case = self.cm_helper.create_case()
file_data = (
'FmFpbGVkIHRvIGZpbmQgbGliIGRpcmVjdG9yeSAoWydsaWJfbGF0ZXN0JywgJ2xpYl8yLjcuMTUnXSkuCg=='
)
# artifact data initially
artifact_data = {
'case_id': case.id,
'file_data': f'{file_data}',
'summary': f'asn{randint(100, 999)}',
'type': 'Certificate File',
}
# create artifact
artifact = self.cm.artifact(**artifact_data)
artifact.submit()
# artifact data updated
file_data = (
'<KEY>
)
# artifact data
artifact_data = {
'source': 'artifact source',
'file_data': f'{file_data}',
'summary': f'asn{randint(100, 999)}',
}
artifact.source = artifact_data.get('source')
artifact.summary = artifact_data.get('summary')
artifact.file_data = artifact_data.get('file_data')
artifact.submit()
artifact.get(all_available_fields=True)
assert artifact.source == artifact_data.get('source')
assert artifact.summary == artifact_data.get('summary')
assert artifact.file_data == artifact_data.get('file_data')
def test_artifact_get_by_tql_filter_fail_tql(self):
"""Test Artifact Get by TQL"""
# retrieve artifacts using TQL
artifacts = self.cm.artifacts()
artifacts.filter.tql('Invalid TQL')
try:
for artifact in artifacts: # pylint: disable=unused-variable
pass
assert False, 'TQL should have failed'
except Exception:
pass
```
#### File: tests/datastore/test_datastore.py
```python
import json
import uuid
class MockPost:
"""Mock tcex session.get() method."""
def __init__(self, data, ok=True):
"""Initialize class properties."""
self.data = data
self._ok = ok
@property
def headers(self):
"""Mock headers property"""
return {'content-type': 'application/json'}
def json(self):
"""Mock json method"""
return self.data
@property
def ok(self):
"""Mock ok property"""
return self._ok
@property
def reason(self):
"""Mock reason property"""
return 'reason'
@property
def status_code(self):
"""Mock status_code property"""
return 500
@property
def text(self):
"""Mock text property"""
return json.dumps(self.data)
class TestDataStore:
"""Test the TcEx DataStore Module."""
data_type = None
def setup_class(self):
"""Configure setup before all tests."""
self.data_type = 'pytest'
@staticmethod
def test_create_index_fail_test(tcex, monkeypatch):
"""Test failure to create an index.
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
monkeypatch (_pytest.monkeypatch.MonkeyPatch, fixture): Pytest monkeypatch
"""
# monkeypatch method
def mp_post(*args, **kwargs): # pylint: disable=unused-argument
return MockPost({}, ok=False)
monkeypatch.setattr(tcex.session, 'post', mp_post)
# create index
key = <KEY>
try:
tcex.datastore('local', key)
assert False, 'Failed to catch error on ok=False'
except RuntimeError:
assert True
def test_data_store_local_index(self, tcex):
"""Test creating a local datastore index
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
"""
tcex.datastore('local', self.data_type)
@staticmethod
def test_data_store_local_new_index(tcex):
"""Test creating a local datastore index
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
"""
data = {'one': 1}
key = str(uuid.uuid4())
rid = 'one'
ds = tcex.datastore('local', key)
results = ds.add(rid=rid, data=data)
assert results.get('_id') == rid
assert results.get('_shards').get('successful') == 1
def test_data_store_local_add(self, tcex):
"""Test local datastore add
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
"""
data = {'one': 1}
rid = 'one'
ds = tcex.datastore('local', self.data_type)
results = ds.add(rid=rid, data=data)
assert results.get('_id') == rid
assert results.get('_shards').get('successful') == 1
# This seems to unsupported in latest datastore on 6.3.1
# def test_data_store_local_add_no_rid(self, tcex):
# """Test local datastore add with no rid
# Args:
# tcex (TcEx, fixture): An instantiated instance of TcEx.
# """
# data = {'one': 1}
# rid = None
# ds = tcex.datastore('local', self.data_type)
# results = ds.add(rid=rid, data=data)
# assert results.get('_shards').get('successful') == 1
def test_data_store_local_add_fail(self, tcex, monkeypatch):
"""Test failure of data store add
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
monkeypatch (_pytest.monkeypatch.MonkeyPatch, fixture): Pytest monkeypatch
"""
rid = None
# monkeypatch method
def mp_post(*args, **kwargs): # pylint: disable=unused-argument
return MockPost({}, ok=False)
# delete
ds = tcex.datastore('local', self.data_type)
# patch after datastore created
monkeypatch.setattr(tcex.session, 'post', mp_post)
try:
ds.add(rid=rid, data=None)
assert False
except RuntimeError:
assert True
def test_data_store_local_delete(self, tcex):
"""Test local datastore delete
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
"""
rid = 'three'
ds = tcex.datastore('local', self.data_type)
# add entry to be deleted
ds.add(rid, {'delete': 'delete'})
# delete
results = ds.delete(rid=rid)
assert results.get('_id') == rid
assert results.get('_shards').get('successful') == 1
assert results.get('result') == 'deleted'
def test_data_store_local_delete_fail(self, tcex, monkeypatch):
"""Test failure of data store local delete
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
monkeypatch (_pytest.monkeypatch.MonkeyPatch, fixture): Pytest monkeypatch
"""
rid = 'fail-test'
# monkeypatch method
def mp_post(*args, **kwargs): # pylint: disable=unused-argument
return MockPost({}, ok=False)
# delete
ds = tcex.datastore('local', self.data_type)
# patch after datastore created
monkeypatch.setattr(tcex.session, 'post', mp_post)
try:
ds.delete(rid=rid)
assert False
except RuntimeError:
assert True
def test_data_store_local_get(self, tcex):
"""Test local datastore get
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
"""
data = {'two': 2}
rid = 'two'
ds = tcex.datastore('local', self.data_type)
# add entry to be deleted
ds.add(rid, data)
results = ds.get(rid=rid)
assert results.get('_id') == rid
assert results.get('_source').get('two') == 2
assert results.get('found') is True
# delete
ds.delete(rid)
def test_data_store_local_get_no_rid(self, tcex):
"""Test local datastore get no reid
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
"""
ds = tcex.datastore('local', self.data_type)
results = ds.get()
assert results.get('hits') is not None
def test_data_store_local_get_fail(self, tcex, monkeypatch):
"""Test failure of data store local get
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
monkeypatch (_pytest.monkeypatch.MonkeyPatch, fixture): Pytest monkeypatch
"""
# monkeypatch method
def mp_post(*args, **kwargs): # pylint: disable=unused-argument
return MockPost({}, ok=False)
# delete
ds = tcex.datastore('local', self.data_type)
# patch after datastore created
monkeypatch.setattr(tcex.session, 'post', mp_post)
try:
ds.get()
assert False
except RuntimeError:
assert True
def test_data_store_organization_add(self, tcex):
"""Test organization datastore add
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
"""
data = {'one': 1}
rid = 'one'
ds = tcex.datastore('organization', self.data_type)
results = ds.add(rid=rid, data=data)
assert results.get('_id') == rid
assert results.get('_shards').get('successful') == 1
def test_data_store_organization_delete(self, tcex):
"""Test organization datastore delete
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
"""
rid = 'three'
ds = tcex.datastore('organization', self.data_type)
# add entry to be deleted
ds.add(rid, {'three': 3})
# delete
results = ds.delete(rid=rid)
assert results.get('_id') == rid
assert results.get('_shards').get('successful') == 1
assert results.get('result') == 'deleted'
def test_data_store_organization_get(self, tcex):
"""Test organization datastore get
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
"""
data = {'two': 2}
rid = 'two'
ds = tcex.datastore('organization', self.data_type)
# add entry to get
ds.add(rid, data)
results = ds.get(rid=rid)
assert results.get('_id') == rid
assert results.get('_source').get('two') == 2
assert results.get('found') is True
# delete
ds.delete(rid)
def test_data_store_local_put(self, tcex):
"""Test local datastore put
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
"""
data = {'one': 1}
rid = 'one'
ds = tcex.datastore('local', self.data_type)
# add entry to update
ds.add(rid, {'one': 2})
results = ds.put(rid=rid, data=data)
assert results.get('_id') == rid
assert results.get('_shards').get('successful') == 1
def test_data_store_local_put_fail(self, monkeypatch, tcex):
"""Test failure of data store local put
Args:
tcex (TcEx, fixture): An instantiated instance of TcEx.
monkeypatch (_pytest.monkeypatch.MonkeyPatch, fixture): Pytest monkeypatch
"""
# monkeypatch method
def mp_post(*args, **kwargs): # pylint: disable=unused-argument
return MockPost({}, ok=False)
# delete
ds = tcex.datastore('local', self.data_type)
# patch after datastore created
monkeypatch.setattr(tcex.session, 'post', mp_post)
try:
ds.update(rid=None, data=None)
assert False
except RuntimeError:
assert True
```
#### File: tests/playbooks/test_playbook_string_types.py
```python
import pytest
# pylint: disable=no-self-use
class TestUtils:
"""Test the TcEx Batch Module."""
@pytest.mark.parametrize(
'variable,value',
[
('#App:0002:s1!String', 1),
('#App:0002:s2!String', '2'),
('#App:0002:s3!String', '3'),
('#App:0002:s4!String', True),
],
)
def test_playbook_string(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
tcex.playbook.create_string(variable, value)
result = tcex.playbook.read_string(variable)
value = str(value).lower()
assert result == value, f'result of ({result}) does not match ({value})'
tcex.playbook.delete(variable)
assert tcex.playbook.read(variable) is None
@pytest.mark.parametrize(
'variable,value',
[
('#App:0002:s1!String', []),
('#App:0002:s2!String', {}),
('#App:0002:s3!String', b'bytes'),
('#App:0002:b3!WrongType', 'wrong type'),
],
)
def test_playbook_string_fail(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
try:
tcex.playbook.create_string(variable, value)
assert False, f'{value} is not a valid String value'
except RuntimeError:
assert True
@pytest.mark.parametrize(
'variable,value',
[
('#App:0003:sa1!StringArray', ['1', '1']),
('#App:0003:sa2!StringArray', ['2', '2']),
('#App:0003:sa3!StringArray', ['3', '3']),
('#App:0003:sa4!StringArray', ['4', '4']),
],
)
def test_playbook_string_array(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
tcex.playbook.create_string_array(variable, value)
result = tcex.playbook.read_string_array(variable)
assert result == value, f'result of ({result}) does not match ({value})'
tcex.playbook.delete(variable)
assert tcex.playbook.read(variable) is None
@pytest.mark.parametrize(
'variable,value',
[
('#App:0003:sa5!StringArray', ('6', '6')),
('#App:0003:sa6!StringArray', map(lambda a: a, ['6', '6'])),
('#App:0003:sa6!StringArray', filter(lambda a: True, ['6', '6'])),
],
)
def test_playbook_string_array_iterables(self, variable, value, tcex):
"""Test the string array method of Playbook module With an iterable as input.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
tcex.playbook.create_string_array(variable, value)
result = tcex.playbook.read_string_array(variable)
assert result == ['6', '6'], f'result of ({result}) does not match ({list(value)})'
tcex.playbook.delete(variable)
assert tcex.playbook.read(variable) is None
@pytest.mark.parametrize(
'variable,value', [('#App:0003:sa5!StringArray', 'foobar')],
)
def test_playbook_string_array_string(self, variable, value, tcex):
"""Test the string array method of Playbook module with string input (should fail)
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
try:
tcex.playbook.create_string_array(variable, value)
except RuntimeError:
pass # expected
else:
assert False, 'Should have failed.'
@pytest.mark.parametrize(
'variable,value',
[
('#App:0003:sa1!StringArray', ['1', []]),
('#App:0003:sa2!StringArray', ['2', {}]),
('#App:0003:sa3!StringArray', ['3', b'bytes']),
('#App:0002:b3!WrongType', 'wrong type'),
],
)
def test_playbook_string_array_fail(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
try:
tcex.playbook.create_string_array(variable, value)
assert False, f'{value} is not a valid String Array value'
except RuntimeError:
assert True
#
# Type Specific
#
@pytest.mark.parametrize(
'variable,value', [('#App:0002:s1!String', None)],
)
def test_playbook_string_none(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
tcex.playbook.create_string(variable, value)
tcex.playbook.read_string(variable)
assert tcex.playbook.read(variable) is None
@pytest.mark.parametrize(
'variable,value', [('#App:0003:sa1!StringArray', None)],
)
def test_playbook_string_array_none(self, variable, value, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
tcex.playbook.create_string_array(variable, value)
tcex.playbook.read_string_array(variable)
assert tcex.playbook.read(variable) is None
def test_playbook_string_read_none(self, tcex):
"""Test the string array method of Playbook module.
Args:
variable (str): The key/variable to create in Key Value Store.
value (str): The value to store in Key Value Store.
tcex (TcEx, fixture): An instantiated instance of TcEx object.
"""
assert tcex.playbook.read_string(None) is None
``` |
{
"source": "JoeyTeng/Algorithm-Selection-for-Classification-Problems-via-Cluster-based-Meta-features",
"score": 2
} |
#### File: JoeyTeng/Algorithm-Selection-for-Classification-Problems-via-Cluster-based-Meta-features/convex_hull_cluster.py
```python
import collections
import functools
import itertools
import json
import logging
import logging.handlers
import multiprocessing.pool
import os
import queue
import sys
import numpy
import scipy.special
import meta_features
PROCESS_COUNT = int(os.cpu_count() / 2)
def _tree():
"""Define a recursive structure of collection.defaultdict(self)."""
return collections.defaultdict(_tree)
def initialize_logger(filename=None, level=logging.DEBUG, filemode='w'):
"""Initialize a logger in module logging.
Args:
filename (string, optional): Defaults to None.
The path of log file
By default, logger will stream to the standard output
level (logging level, optional): Defaults to logging.INFO
filemode (string, optional): Defaults to 'w'.
'w' or 'a', overwrite or append
Returns:
logger: [description]
"""
log_format = '%(asctime)s %(levelname)s\n' + \
' %(filename)s:%(lineno)s: %(name)s %(message)s'
if filename is None:
handler = logging.StreamHandler()
else:
handler = logging.handlers.RotatingFileHandler(
filename=filename, mode=filemode)
handler.setFormatter(logging.Formatter(log_format))
logger = logging.getLogger('LOG')
logger.addHandler(handler)
logger.setLevel(level)
return logger, handler
def load_dataset(filename):
"""Load data from a csv file.
Args:
filename (string): path of input file.
CSV format
[coordinate, ...] + [label]
Returns:
Dataset: dataset
"""
return [(
lambda point: {
'coordinate': tuple(map(float, point[:-1])),
'label': int(point[-1])})
(string.strip().rstrip().split(','))
for string in open(filename, 'r').read()
.strip().rstrip().split('\n')]
def signed_volume(vertices):
"""Calculate the signed volume of n-dimensional simplex.
The simplex is defined by (n + 1) vertices
Reference:
Wedge Product: http://mathworld.wolfram.com/WedgeProduct.html
Args:
vertices (Vertices): Define the n-d simplex.
Returns:
tuple: (
sign (float):
-1, 0 or 1, the sign of the signed volume,
logvolume (float):
The natural log of the absolute value of the volume)
If the signed volume is zero, then sign will be 0
and logvolume will be -Inf.
In all cases, the signed volume is equal to sign * np.exp(logvolume)
Reference:
From scipy manual
https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.slogdet.html#numpy.linalg.slogdet
"""
dimension = len(vertices[0])
(sign, logvolume) = numpy.linalg.slogdet(
numpy.stack(vertices[1:]) +
numpy.array(vertices[0]) * numpy.ones((dimension, dimension)) * -1)
return (sign, logvolume)
def squared_area(vertices):
"""Calculte the squared area of the n-1-d simplex.
Calculate the squared area of (n - 1)-dimensional simplex defined by
n vertices in n-dimensional space
Reference:
Wedge Product: http://mathworld.wolfram.com/WedgeProduct.html
Args:
vertices (Vertices): Define the n-1-d simplex
Returns:
float: The natural log of the squared area of the simplex
Reference:
From scipy manual
https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.slogdet.html#numpy.linalg.slogdet
"""
dimension = len(vertices[0])
matrix = numpy.matrix(
numpy.stack(vertices[1:]) +
numpy.array(vertices[0]) *
numpy.ones((len(vertices) - 1, dimension)) * -1)
logvolume = numpy.linalg.slogdet(matrix * matrix.T)[1] # sign, logvolume
return logvolume
def check_inside(face, instance, edge=None, area=None):
"""Check if the instance given is at the inner side of the face.
Args:
face (Vertices): [description]
instance (Vertex): [description]
edge (Vertices, optional): Defaults to None.
By default, edge = face[:-1]
Used to calculate the area and
thus check when instance is on the same plane with the face.
area (float, optional): Defaults to None.
By default, area = squared_area(face)
Returns:
tuple: (
inside (bool),
new face generated with (edge + pivot) (Vertices),
new squared_area calculated using new face (float))
"""
edge = edge or face[:-1]
area = area or squared_area(face)
sign, logvolume = signed_volume(form_face(face, instance))
_face = form_face(edge, instance)
_area = squared_area(_face)
if ((numpy.isclose([numpy.exp(logvolume)], [0]) and _area > area)
or sign < 0):
# outside
return (False, _face, _area)
return (True, _face, _area)
def check_inside_hull(hull, instance):
"""Check if the instance given is inside the hull.
Args:
hull (list): Faces on the hull
instance (Vertex): [description]
Returns:
bool: If the instance is inside the hull
"""
for face in hull:
if not check_inside(face=face, instance=instance)[0]:
return False
return True
def check_homogeneity(impurities, hull, used_pivots):
"""Check if the hull is homogeneous.
Args:
impurities (Vertices): Instances with different label
hull (list): all the faces of the hull
used_pivots (set): [description]
Returns:
bool: If the convex hull have homogeneity
"""
for instance in impurities:
if check_inside_hull(hull, instance):
return False
return True
def check_convexity(hull, used_pivots):
"""Check if the hull is convex.
Args:
hull (list): Faces on the hull
used_pivots (set): set of turning points on the hull
Returns:
bool: If the hull maintains convexity
"""
for instance in used_pivots:
if not check_inside_hull(hull, instance):
return False
return True
def pivot_on_edge(instances, edge, used_pivots):
"""Search for the next best possible vertex on the hull.
Homogeneity of the hull may not be maintained.
Args:
instances (Vertices): [description]
edge (Vertices): [description]
used_pivots (set): [description]
Recieve:
Homogeneity (bool): If the choice of the vertex will maintain
the homogeneity of the hull
Yields:
tuple:
(None, False): No vertex is found
(pivot (Vertex), homogeneity (bool)): A candidate is returned,
with the side-effect of homogeneity of the hull
(pivot (Vertex)): A candidate is found and
checking of homogeniety is requested
"""
vertices_in_edge = set(edge)
index = 0
length = len(instances)
while index < length and instances[index] in used_pivots:
index += 1
if index == length:
yield (None, False) # Not found
return
homo = {}
homo['pivot'] = instances[index]
homo['face'] = form_face(edge, homo['pivot'])
homo['area'] = squared_area(homo['face'])
homogeneity = False
check = yield (homo['pivot'], )
if check:
homogeneity = True
for instance in instances:
if instance in vertices_in_edge:
# Skip all used pivots in edge to prevent self-orientating
# Skip all instances labelled differently
# Homogeneity test is checked every round
continue
current = {}
current['pivot'] = instance
inside, current['face'], current['area'] = check_inside(
homo['face'], current['pivot'],
edge=edge, area=homo['area'])
if not inside:
check = yield (current['pivot'], )
if check:
# update
homo = current
homogeneity = True
yield (homo['pivot'], homogeneity)
return
def find_next_pivot(instances, hull, edge,
used_pivots, edge_count, impurities):
"""Find next available vertex while ensure the homogeneity.
Iteratively call pivot_on_edge(), check_homogeneity() and check_convexity()
to find the next available vertex on the hull.
Args:
instances (Vertices):
hull (list): Faces of the hull
edge (Vertex):
used_pivots (set):
edge_count (list):
impurities (Vertices):
Returns:
pivot (Vertex):
found (bool):
"""
find_pivot = pivot_on_edge(instances, edge, used_pivots)
pivot = next(find_pivot)
while len(pivot) == 1:
# Find next pivot
# Feedback: if the pivot suggested is a valid choice
if pivot[0] in used_pivots:
# Choose back will always generate a homogeneous hull
# Skip the checking process
pivot = find_pivot.send(True)
continue
check = {}
check['_face'] = form_face(edge, pivot[0])
hull.append(check['_face'])
used_pivots.add(pivot[0])
# Update Edge Count based on new face formed
check['_edges'] = [
tuple(sort_vertices(edge))
for edge in itertools.combinations(
check['_face'], len(check['_face']) - 1)]
for _edge in check['_edges']:
edge_count[_edge] += 1
check['number of face added'] = close_up_hull(
hull, edge_count, used_pivots)
check['homogeneity'] = check_homogeneity(
impurities, hull, used_pivots)
check['convexity'] = check_convexity(hull, used_pivots)
# Revert update
while check['number of face added']:
hull.pop() # close_up
check['number of face added'] -= 1
for _edge in check['_edges']:
edge_count[_edge] -= 1
used_pivots.remove(pivot[0])
hull.pop() # _face
if check['homogeneity'] and check['convexity']:
pivot = find_pivot.send(True)
else:
pivot = find_pivot.send(False)
pivot, found = pivot
if not found or pivot in used_pivots:
# best next choice is used
# stop searching and start closing up
return (pivot, False)
return (pivot, True)
def form_face(edge, pivot):
"""Form face by appending pivot and convert it into a tuple.
Args:
edge (Vertices): [description]
pivot (Vertex): [description]
Returns:
tuple: Face formed
"""
return tuple(list(edge) + [pivot])
def close_up(edge_count, used_pivots):
"""Provide faces required to close up the hull with existing vertices.
Args:
edge_count (dict): [description]
used_pivots (set): [description]
Returns:
list: Faces required.
"""
edges = []
for edge, count in edge_count.items():
if count == 1:
edges.append(edge)
faces = []
lazy_update = collections.defaultdict(int) # default = 0
while edges:
vertices = None
for (i, edge_a), (j, edge_b) in\
itertools.combinations(enumerate(edges), 2):
vertices = set(edge_a).union(set(edge_b))
if len(vertices) == len(edge_a[0]):
edges[i], edges[j], edges[-1], edges[-2] =\
edges[-1], edges[-2], edges[i], edges[j]
edges.pop()
edges.pop()
break
else:
# Cannot find a face, update edges and edges count
updated = False
for edge in lazy_update: # = .keys()
if lazy_update[edge] + edge_count[edge] == 1:
edges.append(edge)
lazy_update[edge] = 2 # Avoid duplicated edges
updated = True
if not updated:
break
continue
face = list(vertices)
for pivot in used_pivots: # = .keys()
if pivot not in vertices:
if not check_inside(face, pivot)[0]:
# det(A) = -det (B) if two cols swap (odd and even)
face[-1], face[-2] = face[-2], face[-1]
break
else:
# This edge is the first edge
return []
faces.append(tuple(face))
for edge in itertools.combinations(tuple(face), len(face) - 1):
lazy_update[tuple(sort_vertices(edge))] += 1
return faces
def close_up_hull(hull, edge_count, used_pivots):
"""Close up the hull.
Second stage.
Add all remaining faces into the hull to form
a closed simplicial complex
Args:
hull (list): All faces of the hull.
edge_count (dict): [description]
used_pivots (set): [description]
Returns:
int: Number of face added
"""
face_added = close_up(edge_count, used_pivots)
if not face_added:
face = list(hull[0])
# det(A) = -det (B) if two cols swap (odd and even)
face[-2], face[-1] = face[-1], face[-2]
face_added = [tuple(face)]
for face in face_added:
hull.append(face)
return len(face_added)
def sort_vertices(*args, **kwargs):
"""Call wrapped sorting function.
A wrapper of sorting function
Using buitin sorted() for now
Args:
same as the wrapped function
Returns
same as the wrapped function
Raises:
same as the wrapped fucntion
"""
return sorted(*args, **kwargs)
def qsort_partition(data, target=1, lhs=0, rhs=None):
"""Find the smallest [target] values in the [data] using [comp] as __lt__.
Complexity: O(n)
Args:
data (Vertices): A list of vertex in tuple type
target (int, optional): Defaults to 1.
[terget] smallest values will be returned.
lhs (int, optional): Defaults to 0. Lowest index
rhs (int, optional): Defaults to None. Highest index + 1
comp (func, Currently not supported): Defaults to __builtin__.__lt__.
Cumstomised function used for comparing
Returns:
list: [target] shallow copies of Vertex
"""
# comp is Partially supported: only used in partitioning
# but not in sorting return values
# BUG: Work around instead for now
# comp = (lambda x, y: x < y)
data = list(set(data)) # Remove repeated vertices
# BUG: Work around instead for now
# lhs = lhs or 0
# rhs = len(data) - 1 # Since [data] is updated
# position = -1
# while position != target:
# if position < target:
# lhs = position + 1
# elif position > target:
# rhs = position - 1
# pivot = data[rhs]
# index = lhs
# for i in range(lhs, rhs + 1):
# if comp(data[i], pivot):
# data[i], data[index] = data[index], data[i]
# index += 1
# data[rhs], data[index] = data[index], data[rhs]
# position = index # Return value
# return sort_vertices(data[:target])
return sort_vertices(data)[:target]
def initialize_hull(instances, impurities):
"""Initialize the hull by obtain the first face of the hull.
face: a n-1-d structure
Args:
instances (Vertices): Instances with same label
impurities (Vertices): Instances with different label
Returns:
tuple:
dimension (int): Dimension of the space, n
face (tuple): The face obtained
(Vertex, ...)
used_pivots (set): The set of used instances on the hull
set{Vertex}
edge_count (dict): Counting of how many times an edge is used
{edge (Vertices): times (int)}
"""
dimension = len(instances[0])
edge = qsort_partition(instances, target=dimension - 1)
used_pivots = set(edge)
edge_count = collections.defaultdict(int) # default = 0
face = edge
if len(edge) == dimension - 1:
pivot, found = find_next_pivot(
instances, [], edge, used_pivots, edge_count, impurities)
if found:
face = form_face(edge, pivot)
used_pivots.add(pivot)
return (dimension, tuple(face), used_pivots, edge_count)
def queuing_face(face, _queue, edge_count):
"""Push all the possible edges (n-2-d structure) into the queue.
Edges are obtained by making combinations.
No edge will join the queue more than once.
Gurantee the order that the later one in the face
will be excluded first in combinations.
Args:
face (Vertices): A face made of many vertices (n-1)
_queue (Queue): Target queue which supports .push()
edge_count (dict): Counting of how many times an edge is used
{edge (Vertices): times (int)}
"""
for i in range(len(face) - 1, -1, -1):
sub_face = []
for j, element in enumerate(face):
if i != j:
sub_face.append(element)
edge = tuple(sub_face)
sorted_edge = tuple(sort_vertices(edge))
if not edge_count[sorted_edge]:
_queue.put(edge)
edge_count[sorted_edge] += 1
def gift_wrapping(instances, impurities, logger):
"""Use modified gift-wrapping method for convex hull building.
Two stages: Finding new vertex & Close-up
Args:
instances (Vertices): List of instances with same label
impurities (Vertices): List of instances with different label
Returns:
dict:
{
"faces": All the faces,
list: [face]
"vertices": All the vertices
dict: {Vertex: True}
"dimension": Dimension of the hull
int: len(face)
}
"""
instances = sorted(set(instances))
dimension, face, used_pivots, edge_count = initialize_hull(
instances, impurities)
_queue = queue.LifoQueue()
if len(face) == dimension:
queuing_face(face, _queue, edge_count)
hull = []
hull.append(face)
vertices = [coordinate for coordinate in face]
slices = PROCESS_COUNT
all_instances = instances
instances = [
all_instances[
int(len(all_instances) * i / slices):
int(len(all_instances) * (i + 1) / slices)]
for i in range(slices)]
# First stage: find all new pivots
while not _queue.empty():
edge = _queue.get()
if edge_count[edge] > 1:
continue
pool = multiprocessing.pool.Pool(PROCESS_COUNT)
func = functools.partial(
find_next_pivot,
hull=hull, edge=edge, used_pivots=used_pivots,
edge_count=edge_count, impurities=impurities)
result = pool.map(func, instances)
# result = list(map(func, instances))
pool.close()
pool.join()
not_found = [i[0] for i in enumerate(result) if i[1][0] is None]
candidate = [element[0] for element in result if element[0]]
pivot, found = func(candidate)
if found:
pivot, found = func(list(itertools.chain(
*[instances[i] for i in not_found], [pivot])))
if not found:
continue
face = form_face(edge, pivot)
vertices.append(pivot)
used_pivots.add(pivot)
hull.append(face)
queuing_face(face, _queue, edge_count)
logger.debug("gift_wrapping: First stage complete. Starting second.")
# Second stage: close up the hull
if dimension < len(used_pivots):
close_up_hull(hull, edge_count, used_pivots)
logger.debug("gift_wrapping: Second stage complete.")
return {
"faces": hull,
"vertices": used_pivots,
"dimension": dimension}
def map_generate_tuple(*args):
"""Generate a tuple with the results from the func.
Used to assist dict(), map() to generate a dictionary.
Args:
*args (list): [0]:(
key (immutable): key of the generated dict,
func (function): function to be called,
arg (tuple): arguments for func)
Returns:
tuple: (key, func(*arg))
"""
key, func, arg = args[0][0], args[0][1], args[0][2]
return (key, func(*arg))
def clustering(dataset, logger):
"""Calculate all convex hulls.
All hulls will be pure(only contains data points with same label)
Args:
dataset (list): All the instances in the space with label
list of dict objects:
[Point, ...]
logger (logger): logger for logging
Returns:
dict: Clusters obtained separated by labels
label: clusters (list of dict objects)
[{
'vertices' (list): Turning instances on the hull
[Vertex, ...],
'points' (list) : Instances in the hull. Vertices are excluded
[Vertex, ...],
'size' (int): Number of instances covered by the hull
len(['vertices']) + len(['points']),
'volume': The volume of the hull
float(optional)
}, ...]
"""
all_instances = dataset
meta_dataset = collections.defaultdict(list)
for instance in all_instances:
meta_dataset[instance['label']].append(instance['coordinate'])
tasklist = map(
lambda item, meta_dataset=meta_dataset, logger=logger: (
item[0],
clustering_by_label,
(item[1], item[0], meta_dataset, logger)), meta_dataset.items())
# pool = multiprocessing.pool.Pool(PROCESS_COUNT)
# clusters = dict(pool.map(map_generate_tuple, tasklist))
clusters = dict(map(map_generate_tuple, tasklist))
# pool.close()
# pool.join()
return clusters
def clustering_by_label(instances, label, meta_dataset, logger):
"""Obtain all possible clusters with given label.
Args:
instances (Vertices): all instances with given label
label (label): label
meta_dataset (meta_dataset): dict of the whole dataset
logger (logger): logger inherited
Returns:
list: list of all clusters obtained
"""
clusters = []
impurities = {
item[0]: item[1]
for item in meta_dataset.items() if item[0] != label}
impurities = list(itertools.chain(*impurities.values()))
while instances:
# List is not empty
cluster = gift_wrapping(instances, impurities, logger)
found = cluster['dimension'] < len(cluster['vertices'])
_dataset = []
vertices = []
points = []
for vertex in instances:
if vertex in cluster['vertices']:
vertices.append(vertex)
else:
if found and check_inside_hull(cluster['faces'], vertex):
points.append(vertex)
else:
_dataset.append(vertex)
if found:
volume = round(calculate_volume(cluster['faces']), 15)
elif len(cluster['faces'][0]) > 1:
volume = round(numpy.exp(squared_area(cluster['faces'][0])), 15)
else:
volume = 0.0
instances = _dataset
clusters.append({'vertices': vertices,
'points': points,
'size': len(vertices) + len(points),
'volume': volume})
logger.info(
'Clustering: %d clusters found, '
'%d/%d instance processed for label %r',
len(clusters), len(meta_dataset[label]) - len(instances),
len(impurities) + len(meta_dataset[label]), label)
return clusters
def calculate_volume(hull):
"""Calculate the volume of a convex hull.
Args:
hull (list): All faces in the hull.
Returns:
float: Volume calculated.
"""
origin = hull[0][0]
volume = 0.0
for face in hull:
logvolume = signed_volume(form_face(face, origin))[1]
volume += numpy.exp(logvolume)
# n-dimensional simplex = det / n!
volume /= scipy.special.factorial(len(origin))
return volume
def centroid(clusters):
"""Calculate the centroid of the vertices on the convex hulls.
Inner instances are excluded.
Args:
clusters (list): list of clusters
Returns:
list: [vertex, ...]
"""
centroids = list(map(
lambda cluster: tuple(map(
lambda x, cluster=cluster: x / len(cluster['vertices']),
sum(map(
numpy.array,
cluster['vertices'])))),
clusters))
return centroids
def main(argv):
"""Start main function here."""
dataset_filename = argv[0]
clusters_filename = dataset_filename + ".clusters.json"
output_filename = dataset_filename + ".output.json"
log_file = dataset_filename + ".log"
logger, handler = initialize_logger(log_file)
logger.info('Start: Version 1.0.1')
logger.debug('Logger initialized')
logger.debug('sys.argv: %r', sys.argv)
logger.debug('Loading dataset')
dataset = load_dataset(dataset_filename)
logger.info('Dataset loaded')
logger.info('Trying to load clusters from %s', clusters_filename)
clusters = None
try:
clusters = json.load(open(clusters_filename, 'r'))
except FileNotFoundError:
logger.warning('Clusters data file not found')
except json.decoder.JSONDecodeError:
logger.warning('File broken. Not Json Decodable')
if not clusters:
logger.debug('Clustering data points')
clusters = clustering(dataset, logger)
logger.debug(
'Dumping clusters data into json file: %s', clusters_filename)
json.dump(clusters, open(clusters_filename, 'w'))
logger.info('Data points clustered')
logger.debug('Calculating meta-feature indicators')
features = meta_features.meta_features(clusters)
logger.debug(
'Dumping meta-feature indicators into json file: %s',
clusters_filename)
json.dump(features, open(output_filename, 'w'))
logger.info('Meta-feature indicators calculated')
logger.info('Completed')
logger.removeHandler(handler)
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: JoeyTeng/Algorithm-Selection-for-Classification-Problems-via-Cluster-based-Meta-features/spherical_brute_force.py
```python
import argparse
import collections
import json
import logging
import logging.handlers
import math
import os
import numpy
import meta_features
INFINITESIMAL = 1e-323
PROCESS_COUNT = int(os.cpu_count() / 2)
def initialize_logger(
name='LOG',
filename=None,
level=logging.DEBUG,
filemode='a'):
"""Initialize a logger in module logging.
Args:
name (string, optional): Name of logger. Defaults to None.
filename (string, optional): Defaults to None.
The path of log file
By default, logger will stream to the standard output
level (logging level, optional): Defaults to logging.INFO
filemode (string, optional): Defaults to 'a'.
'w' or 'a', overwrite or append
Returns:
logger: [description]
"""
log_format = '%(asctime)s %(levelname)s\n' + \
' %(filename)s:%(lineno)s: %(name)s: %(message)s'
if filename is None:
handler = logging.StreamHandler()
else:
handler = logging.handlers.RotatingFileHandler(
filename=filename, mode=filemode)
handler.setFormatter(logging.Formatter(log_format))
logger = logging.getLogger(name)
logger.addHandler(handler)
logger.setLevel(level)
return logger, handler
def load_dataset(filename):
"""Load data from a csv file.
Args:
filename (string): path of input file.
CSV format
[coordinate, ...] + [label]
Returns:
Dataset: dataset
"""
return [(
lambda point: {
'coordinate': tuple(map(float, point[:-1])),
'label': int(point[-1])})
(string.strip().rstrip().split(','))
for string in open(filename, 'r').read()
.strip().rstrip().split('\n')]
def initialize_cluster(coordinates):
"""Construct a cluster instance with given coordiante.
A factory function
Args:
coordinates (list): The coordinates that needed to be included.
[Vertex, ...]
Returns:
dict: a cluster initialized with given coordinates
[{
'centroid' (Vertex): centroid of the sphere,
'radius' (float): radius of the sphere,
'points' (list): Instances in the cluster
i.e. distance <= radius
[Vertex, ...],
'size' (int): Number of instances covered by the sphere
len(['points']),
'volume' (float): volume of the sphere
}]
"""
points = coordinates
_points = list(map(numpy.array, coordinates))
centroid = sum(_points) / len(_points)
radius = max(
map(lambda x, y=centroid: numpy.linalg.norm((x - y)), _points))
return {
'centroid': tuple(centroid),
'radius': radius,
'points': points,
'size': len(points),
'log-volume': calculate_log_volume(len(centroid), radius)
}
def calculate_distance(lhs, rhs):
"""Calculate the euclidean distance between 2 points.
Args:
lhs, rhs (Vertex): Coordinates of 2 points
Returns:
float: Euclidean distance between them
"""
return numpy.linalg.norm((numpy.array(lhs) - numpy.array(rhs)))
def calculate_log_volume(dimension, radius):
"""Calculate the log-volume of a sphere with given dimension and radius.
Args:
dimension (int): dimension of the space
radius (float): radius of the sphere
Returns:
float: the log-volume of the sphere
radius is set as REL_TOL (1e-09)
"""
if (math.isclose(radius, 0)):
radius = INFINITESIMAL
try:
log_volume = ((dimension / 2.0) * math.log(math.pi) + dimension *
math.log(radius) - math.lgamma(dimension / 2.0 + 1))
except ValueError as message:
raise ValueError("".join([
"{0}\n".format(message),
"(({0} / 2.0) * ln(pi) + ({0} * ln({1})".format(dimension, radius),
" - ln(gamma({0} / 2.0 + 1)))".format(dimension)]))
if math.isnan(log_volume):
raise ValueError(
"Volume is NaN: pi ^ " +
"({0} / 2.0) / gamma({0} / 2.0 + 1) * {1} ^ {0}".format(
dimension, radius))
return log_volume
def float_less_or_equal(lhs, rhs, **kwargs):
"""Determine float A is less than or equal to B using numpy.isclose().
Use numpy.isclose() to determine if A and B are equal
with default tolerance.
Args:
lhs, rhs (float): values that need to be compared
kwargs: kwargs for numpy.isclose()
Returns:
bool: result of comparison.
"""
return numpy.isclose(lhs, rhs, **kwargs) or (lhs < rhs)
def check_inside_cluster(cluster, point):
"""Check if point is inside the cluster.
Args:
cluster (dict): cluster to be checked
{
'centroid' (Vertex): centroid of the cluster,
'radius' (float): radius of the cluster
}
point (Vertex): point to be checked
Returns:
bool: if the point is encompassed by the boundary
"""
return float_less_or_equal(
calculate_distance(cluster['centroid'], point), cluster['radius'])
def check_homogeneity(cluster, label, clusters):
"""Check homogeneity of the cluster with given clusters.
A homogeneous cluster will not overlap with any other cluster which has
different label, but may overlap with cluster that has the same label.
Which means, there should be no region with ambiguity in
categorisation process.
Args:
cluster (dict): Cluster that need to be checked
{
'centroid' (Vertex): centroid of the cluster,
'radius' (float): radius of the cluster
}
label (): label of the cluster
clusters (dict): list of clusters with labels as keys.
{
label: [cluster, ...]
}
Returns:
bool: if cluster is homogeneous
"""
for _label, _clusters in clusters.items():
if _label == label:
continue
for _cluster in _clusters:
if float_less_or_equal(
calculate_distance(
cluster['centroid'], _cluster['centroid']),
(cluster['radius'] + _cluster['radius'])):
return False
return True
def check_homogeneity_instances(indices, dataset):
labels = set(map(lambda x: dataset[x]['label'], indices))
if len(labels) > 1:
return False
return True
def sub_partitions(indices, n, current):
# n (int) is the number of groups
# current (list) is the current grouping
r = len(indices)
# print(indices, n, current)
if n == 1:
yield [list(indices)]
return
if n == r:
for i, index in enumerate(indices):
tmp = [current + [index]]
tmp.extend(list(map(lambda x: [x], indices[:i] + indices[i + 1:])))
yield tmp
return
for other in sub_partitions(indices[1:], n - 1, []):
tmp = [current + [indices[0]]]
tmp.extend(other)
yield tmp
for index in range(1, len(indices)):
indices[1], indices[index] = indices[index], indices[1]
for tmp in sub_partitions(indices[1:], n, current + [indices[0]]):
yield tmp
indices[1], indices[index] = indices[index], indices[1]
return
def partition(indices):
r = len(indices)
for n in range(1, r + 1):
for tmp in sub_partitions(indices[:], n, []):
yield tmp
def clustering(dataset, logger):
"""Calculate all spherical clusters.
All spheres will be pure(only contains data points with same label)
Args:
dataset (list): All the instances in the space with label
list of dict objects:
[Point, ...]
logger (logger): logger for logging
Returns:
dict: Clusters obtained separated by labels
label: clusters (list of dict objects)
[{
'centroid' (Vertex): centroid of the sphere,
'radius' (float): radius of the sphere,
'points' (list) : Instances in the cluster
[Vertex, ...],
'size' (int): Number of instances covered by the sphere
len(['points']),
'volume': The volume of the sphere
float(optional)
}, ...]
"""
logger.info('Sorting datasets...')
dataset.sort(key=lambda x: x['coordinate'])
clusters = collections.defaultdict(list)
instances = [instance['coordinate'] for instance in dataset]
count = 0
found_count = 0
minimum = len(instances)
logger.info('Checking clusters...')
for groups in partition(list(range(len(dataset)))):
tmp_clusters = collections.defaultdict(list)
if len(groups) > minimum:
logger.info('Minimum found. #groups: {}'.format(len(groups)))
break
for indices in groups:
cluster = initialize_cluster(list(
map(lambda x: instances[x], indices)))
label = dataset[indices[0]]['label']
if (not check_homogeneity(cluster, label, tmp_clusters)
or not check_homogeneity_instances(indices, dataset)):
break
tmp_clusters[label].append(cluster)
else:
minimum = len(groups)
clusters = tmp_clusters
logger.info('Minimum updated. #{} group'.format(count))
found_count += 1
logger.info(
'One option found. Total till now: {}'.format(found_count))
count += 1
if count % 50 == 0:
logger.info('{} groupings checked'.format(count))
return clusters
def main(args):
"""
Start main function here.
Dispatching all the tasks to process.
"""
log_file = args.log
logger, handler = initialize_logger("Parent", log_file)
logger.info('Start: Version 2.1.1')
logger.debug('Logger initialized')
logger.debug('argparse: %r', args)
logger.removeHandler(handler)
_args = []
for dataset_filename in args.paths:
clusters_filename = dataset_filename + ".clusters.json"
output_filename = dataset_filename + ".output.json"
_args.append(tuple([
dataset_filename,
clusters_filename,
output_filename,
log_file]))
list(map(task_processing, _args))
def task_processing(args): # Take note here!!!
"""Unwrap the args tuple to adapt a function with multiple args to map."""
def worker(
dataset_filename,
clusters_filename,
output_filename,
log_file):
"""Link the submodules to process the data."""
logger, handler = initialize_logger(dataset_filename, log_file)
logger.debug('Logger initialized')
logger.debug('Loading dataset')
dataset = load_dataset(dataset_filename)
logger.info('Dataset loaded')
logger.info('Trying to load clusters from %s', clusters_filename)
clusters = None
try:
clusters = json.load(open(clusters_filename, 'r'))
except FileNotFoundError:
logger.warning('Clusters data file not found')
except json.decoder.JSONDecodeError:
logger.warning('File broken. Not Json Decodable')
if not clusters:
logger.debug('Clustering data points')
clusters = clustering(dataset, logger)
logger.debug(
'Dumping clusters data into json file: %s', clusters_filename)
json.dump(clusters, open(clusters_filename, 'w'))
logger.info('Data points clustered')
logger.debug('Calculating meta-feature indicators')
features = meta_features.meta_features(clusters)
logger.debug(
'Dumping meta-feature indicators into json file: %s',
clusters_filename)
json.dump(features, open(output_filename, 'w'))
logger.info('Meta-feature indicators calculated')
logger.info('Complete')
logger.removeHandler(handler)
return worker(*args)
def traverse(paths):
"""Traverse to collect all the data files."""
print("Starting Traverse Through", flush=True)
files = []
while paths:
path = paths[0]
paths = paths[1:]
for file in os.listdir(path):
if (file.find('.json') == -1
and file.find('.log') == -1
and file.find('.DS_Store') == -1
and file.find('.png') == -1
and file.find('.html') == -1):
files.append('{0}/{1}'.format(path, file))
elif os.path.isdir('{0}/{1}'.format(path, file)):
paths.append('{0}/{1}'.format(path, file))
print("Traverse Completed.", flush=True)
return files
def parse_args():
"""Parse all necessary args."""
parser = argparse.ArgumentParser(
description="Obtain clusters and calculate meta-features")
parser.add_argument('-r', action='store', nargs='+',
default=[], metavar='Directory',
help='Recursively processing all files in the folder')
parser.add_argument('-i', action='store', nargs='+',
default=[], metavar='File',
help='Files that need to be processed')
parser.add_argument('--log', action='store', type=str,
default='spherical_cluster.log', metavar='Log file',
help='Path to the log file')
args = parser.parse_args()
paths = []
if (args.r):
paths = traverse(args.r)
paths.extend(args.i)
paths.sort()
args.paths = paths
return args
if __name__ == '__main__':
args = parse_args()
main(args)
```
#### File: JoeyTeng/Algorithm-Selection-for-Classification-Problems-via-Cluster-based-Meta-features/spherical_cluster.py
```python
import argparse
import collections
import json
import logging
import logging.handlers
import math
import multiprocessing.pool
import os
import numpy
import meta_features
INFINITESIMAL = 1e-323
PROCESS_COUNT = int(os.cpu_count() / 2)
def initialize_logger(
name='LOG',
filename=None,
level=logging.DEBUG,
filemode='a'):
"""Initialize a logger in module logging.
Args:
name (string, optional): Name of logger. Defaults to None.
filename (string, optional): Defaults to None.
The path of log file
By default, logger will stream to the standard output
level (logging level, optional): Defaults to logging.INFO
filemode (string, optional): Defaults to 'a'.
'w' or 'a', overwrite or append
Returns:
logger: [description]
"""
log_format = '%(asctime)s %(levelname)s\n' + \
' %(filename)s:%(lineno)s: %(name)s: %(message)s'
if filename is None:
handler = logging.StreamHandler()
else:
handler = logging.handlers.RotatingFileHandler(
filename=filename, mode=filemode)
handler.setFormatter(logging.Formatter(log_format))
logger = logging.getLogger(name)
logger.addHandler(handler)
logger.setLevel(level)
return logger, handler
def load_dataset(filename):
"""Load data from a csv file.
Args:
filename (string): path of input file.
CSV format
[coordinate, ...] + [label]
Returns:
Dataset: dataset
"""
return [(
lambda point: {
'coordinate': tuple(map(float, point[:-1])),
'label': int(point[-1])})
(string.strip().rstrip().split(','))
for string in open(filename, 'r').read()
.strip().rstrip().split('\n')]
def initialize_cluster(coordinates):
"""Construct a cluster instance with given coordiante.
A factory function
Args:
coordinates (list): The coordinates that needed to be included.
[Vertex, ...]
Returns:
dict: a cluster initialized with given coordinates
[{
'centroid' (Vertex): centroid of the sphere,
'radius' (float): radius of the sphere,
'points' (list): Instances in the cluster
i.e. distance <= radius
[Vertex, ...],
'size' (int): Number of instances covered by the sphere
len(['points']),
'volume' (float): volume of the sphere
}]
"""
points = coordinates
_points = list(map(numpy.array, coordinates))
centroid = sum(_points) / len(_points)
radius = max(
map(lambda x, y=centroid: numpy.linalg.norm((x - y)), _points))
return {
'centroid': tuple(centroid),
'radius': radius,
'points': points,
'size': len(points),
'log-volume': calculate_log_volume(len(centroid), radius)
}
def calculate_distance(lhs, rhs):
"""Calculate the euclidean distance between 2 points.
Args:
lhs, rhs (Vertex): Coordinates of 2 points
Returns:
float: Euclidean distance between them
"""
return numpy.linalg.norm((numpy.array(lhs) - numpy.array(rhs)))
def calculate_log_volume(dimension, radius):
"""Calculate the log-volume of a sphere with given dimension and radius.
Args:
dimension (int): dimension of the space
radius (float): radius of the sphere
Returns:
float: the log-volume of the sphere
radius is set as REL_TOL (1e-09)
"""
if (math.isclose(radius, 0)):
radius = INFINITESIMAL
try:
log_volume = ((dimension / 2.0) * math.log(math.pi) + dimension *
math.log(radius) - math.lgamma(dimension / 2.0 + 1))
except ValueError as message:
raise ValueError("".join([
"{0}\n".format(message),
"(({0} / 2.0) * ln(pi) + ({0} * ln({1})".format(dimension, radius),
" - ln(gamma({0} / 2.0 + 1)))".format(dimension)]))
if math.isnan(log_volume):
raise ValueError(
"Volume is NaN: pi ^ " +
"({0} / 2.0) / gamma({0} / 2.0 + 1) * {1} ^ {0}".format(
dimension, radius))
return log_volume
def float_less_or_equal(lhs, rhs, **kwargs):
"""Determine float A is less than or equal to B using numpy.isclose().
Use numpy.isclose() to determine if A and B are equal
with default tolerance.
Args:
lhs, rhs (float): values that need to be compared
kwargs: kwargs for numpy.isclose()
Returns:
bool: result of comparison.
"""
return numpy.isclose(lhs, rhs, **kwargs) or (lhs < rhs)
def check_inside_cluster(cluster, point):
"""Check if point is inside the cluster.
Args:
cluster (dict): cluster to be checked
{
'centroid' (Vertex): centroid of the cluster,
'radius' (float): radius of the cluster
}
point (Vertex): point to be checked
Returns:
bool: if the point is encompassed by the boundary
"""
return float_less_or_equal(
calculate_distance(cluster['centroid'], point), cluster['radius'])
def check_homogeneity(cluster, label, clusters):
"""Check homogeneity of the cluster with given clusters.
A homogeneous cluster will not overlap with any other cluster which has
different label, but may overlap with cluster that has the same label.
Which means, there should be no region with ambiguity in
categorisation process.
Args:
cluster (dict): Cluster that need to be checked
{
'centroid' (Vertex): centroid of the cluster,
'radius' (float): radius of the cluster
}
label (): label of the cluster
clusters (dict): list of clusters with labels as keys.
{
label: [cluster, ...]
}
Returns:
bool: if cluster is homogeneous
"""
for _label, _clusters in clusters.items():
if _label == label:
continue
for _cluster in _clusters:
if float_less_or_equal(
calculate_distance(
cluster['centroid'], _cluster['centroid']),
(cluster['radius'] + _cluster['radius'])):
return False
return True
def clustering(dataset, logger):
"""Calculate all spherical clusters.
All spheres will be pure(only contains data points with same label)
Args:
dataset (list): All the instances in the space with label
list of dict objects:
[Point, ...]
logger (logger): logger for logging
Returns:
dict: Clusters obtained separated by labels
label: clusters (list of dict objects)
[{
'centroid' (Vertex): centroid of the sphere,
'radius' (float): radius of the sphere,
'points' (list) : Instances in the cluster
[Vertex, ...],
'size' (int): Number of instances covered by the sphere
len(['points']),
'volume': The volume of the sphere
float(optional)
}, ...]
"""
logger.info('Sorting datasets...')
dataset.sort(key=lambda x: x['coordinate'])
logger.info('Initialise clusters...')
clusters = collections.defaultdict(list)
for instance in dataset:
clusters[instance['label']].append(
initialize_cluster((instance['coordinate'], )))
logger.info('Merging clusters...')
logger_count = 0
for label, homo_clusters in clusters.items():
index = 0
while index < len(homo_clusters):
current = homo_clusters[index]
merging_index = -1
distance = float('inf')
for j_index, cluster in enumerate(homo_clusters[index + 1:]):
new_distance = calculate_distance(
current['centroid'], cluster['centroid'])
if new_distance < distance:
merging_index = j_index + index + 1
distance = new_distance
if merging_index == -1:
index += 1
continue
cluster = initialize_cluster(
current['points'] + homo_clusters[merging_index]['points'])
if (check_homogeneity(cluster, label, clusters)):
homo_clusters[merging_index], homo_clusters[-1] =\
homo_clusters[-1], homo_clusters[merging_index]
homo_clusters.pop()
current = cluster
homo_clusters[index] = current
else:
index += 1
logger_count += 1
logger.info('{0}/{1} categories completed'.format(
logger_count, len(clusters.keys())))
return clusters
def main(args):
"""
Start main function here.
Dispatching all the tasks to process.
"""
log_file = args.log
logger, handler = initialize_logger("Parent", log_file)
logger.info('Start: Version 2.1.1')
logger.debug('Logger initialized')
logger.debug('argparse: %r', args)
logger.removeHandler(handler)
_args = []
for dataset_filename in args.paths:
clusters_filename = dataset_filename + ".clusters.json"
output_filename = dataset_filename + ".output.json"
_args.append(tuple([
dataset_filename,
clusters_filename,
output_filename,
log_file]))
pool = multiprocessing.pool.Pool(PROCESS_COUNT)
list(pool.map(task_processing, _args))
pool.close()
pool.join()
def task_processing(args): # Take note here!!!
"""Unwrap the args tuple to adapt a function with multiple args to map."""
def worker(
dataset_filename,
clusters_filename,
output_filename,
log_file):
"""Link the submodules to process the data."""
logger, handler = initialize_logger(dataset_filename, log_file)
logger.debug('Logger initialized')
logger.debug('Loading dataset')
dataset = load_dataset(dataset_filename)
logger.info('Dataset loaded')
logger.info('Trying to load clusters from %s', clusters_filename)
clusters = None
try:
clusters = json.load(open(clusters_filename, 'r'))
except FileNotFoundError:
logger.warning('Clusters data file not found')
except json.decoder.JSONDecodeError:
logger.warning('File broken. Not Json Decodable')
if not clusters:
logger.debug('Clustering data points')
clusters = clustering(dataset, logger)
logger.debug(
'Dumping clusters data into json file: %s', clusters_filename)
json.dump(clusters, open(clusters_filename, 'w'))
logger.info('Data points clustered')
logger.debug('Calculating meta-feature indicators')
features = meta_features.meta_features(clusters)
logger.debug(
'Dumping meta-feature indicators into json file: %s',
clusters_filename)
json.dump(features, open(output_filename, 'w'))
logger.info('Meta-feature indicators calculated')
logger.info('Complete')
logger.removeHandler(handler)
return worker(*args)
def traverse(paths):
"""Traverse to collect all the data files."""
print("Starting Traverse Through", flush=True)
files = []
while paths:
path = paths[0]
paths = paths[1:]
for file in os.listdir(path):
if (file.find('.json') == -1
and file.find('.log') == -1
and file.find('.DS_Store') == -1
and file.find('.png') == -1
and file.find('.html') == -1):
files.append('{0}/{1}'.format(path, file))
elif os.path.isdir('{0}/{1}'.format(path, file)):
paths.append('{0}/{1}'.format(path, file))
print("Traverse Completed.", flush=True)
return files
def parse_args():
"""Parse all necessary args."""
parser = argparse.ArgumentParser(
description="Obtain clusters and calculate meta-features")
parser.add_argument('-r', action='store', nargs='+',
default=[], metavar='Directory',
help='Recursively processing all files in the folder')
parser.add_argument('-i', action='store', nargs='+',
default=[], metavar='File',
help='Files that need to be processed')
parser.add_argument('--log', action='store', type=str,
default='spherical_cluster.log', metavar='Log file',
help='Path to the log file')
args = parser.parse_args()
paths = []
if (args.r):
paths = traverse(args.r)
paths.extend(args.i)
paths.sort()
args.paths = paths
return args
if __name__ == '__main__':
args = parse_args()
main(args)
```
#### File: Algorithm-Selection-for-Classification-Problems-via-Cluster-based-Meta-features/test/test.py
```python
import contextlib
import inspect
import json
import os
import nose.tools
import convex_hull_cluster
import spherical_cluster
STD_PATH = 'test/'
cluster = spherical_cluster
def get_path():
# 0 is the current function
# 1 in the father function
return '{0}{1}'.format(STD_PATH,
inspect.stack()[1].function.replace('_', '.'))
def _test(path):
with contextlib.suppress(FileNotFoundError):
os.remove("{0}.clusters.json".format(path))
cluster.main([path])
nose.tools.assert_equal(
json.load(open("{0}.clusters.json".format(path), 'r')),
json.load(open("{0}.clusters.control.json".format(path))))
os.remove("{0}.clusters.json".format(path))
def test_homo():
path = get_path()
_test(path)
def test_hetro():
path = get_path()
_test(path)
def test_hetro_size():
path = get_path()
_test(path)
def test_hetro_duplication():
path = get_path()
_test(path)
```
#### File: Algorithm-Selection-for-Classification-Problems-via-Cluster-based-Meta-features/utilities/paired_t_test.py
```python
import argparse
import json
import numpy
import scipy.stats
def main(args):
pathA, pathB = args.i
print(pathA, pathB, flush=True)
dataA = numpy.matrix(json.load(open(pathA))).T.tolist()
dataB = numpy.matrix(json.load(open(pathB))).T.tolist()
p_values = []
for index in range(len(dataA)):
output = scipy.stats.ttest_rel(dataA[index], dataB[index])
p_values.append(output[1])
print(numpy.matrix(dataA) - numpy.matrix(dataB))
print(p_values)
def parse_args():
parser = argparse.ArgumentParser(
description="Calculate Pearson r square value pair-wisely"
)
parser.add_argument('-i', action='store', nargs='+', default=[],
help='Path to two input json files')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args)
```
#### File: Algorithm-Selection-for-Classification-Problems-via-Cluster-based-Meta-features/utilities/pair_wise_pearson.py
```python
import argparse
import collections
import csv
import scipy.stats.stats
def load(path):
with open(path, newline='') as csvfile:
reader = csv.DictReader(csvfile)
fieldnames = list(reader.fieldnames)[1:] # Omit "Dataset"
data = collections.OrderedDict([(key, list()) for key in fieldnames])
for row in reader:
for field in fieldnames:
data[field].append(float(row[field]))
print("Loaded: {}".format(path), flush=True)
return data
def dump(path, data):
print("Writing into csv file...", flush=True)
with open("{}.paired_r.csv".format(path), 'w', newline='') as csvfile:
writer = csv.writer(csvfile, dialect='excel')
writer.writerows(data)
print("Dump Completed.", flush=True)
def pair_wise_pearson(data):
table = [["Quantity"] + list(data.keys())]
for X in data.items():
table.append(list([X[0]]))
for Y in data.items():
pearsonr = scipy.stats.pearsonr(X[1], Y[1])
r_square = pearsonr[0] ** 2
table[-1].append(r_square)
return table
def main(args):
path = args.i
print(path, flush=True)
data = load(path)
output = pair_wise_pearson(data)
dump(path, output)
def parse_args():
parser = argparse.ArgumentParser(
description="Calculate Pearson r square value pair-wisely"
)
parser.add_argument('-i', action='store', type=str,
help='Path to input csv file (with headers)')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
main(args)
``` |
{
"source": "JoeyTeng/topology-and-meta-learning",
"score": 3
} |
#### File: topology-and-meta-learning/utilities/artificial_datasets.py
```python
import argparse
import bisect
import collections
import itertools
import json
import os
import random
import numpy
import plotly
import sklearn.neighbors
import download_png
INCREMENT = dict(
# -x, -y, angle/, random-angle, +n for uniform
corner=(0, 0, 2, 0, 1),
side=(),
centre=(0.5, 0.5, 1, 0.5, 0)
)
class PlotGraph(object):
@classmethod
def __call__(cls, *args, **kwargs):
return cls.run(*args, **kwargs)
@classmethod
def run(cls, path, _data, _layout):
print("Plotting graph of: {}".format(path), flush=True)
data = cls.plot_data_generation(_data)
layout = cls.layout(
"2-D Artificial Dataset",
**_layout)
cls.plot(
path,
data,
layout)
print("Graph Plotted: {}".format(path), flush=True)
@classmethod
def title_generation(cls, title, **kwargs):
return "{}{}".format(
title,
"".join(
["<br>{}: {}".format(key, value)
for key, value in kwargs.items()]))
@classmethod
def plot_data_generation(cls, _data):
return [
plotly.graph_objs.Scatter(
x=_data[0]['x'],
y=_data[0]['y'],
mode='markers',
name='category 0'
),
plotly.graph_objs.Scatter(
x=_data[1]['x'],
y=_data[1]['y'],
mode='markers',
name='category 1'
)
]
@classmethod
def plot_offline(cls, fig, path):
filename = "{}.html".format(path[:-len('.png')])
url = plotly.offline.plot(
fig,
image="png",
image_filename=path[path.rfind('/') + 1:-len('.png')],
filename=filename,
auto_open=False)
destination = path[:path.rfind('/')]
try:
download_png.download(destination, url)
except RuntimeError:
print("RuntimeError occurs when downloading {}".format(url),
flush=True)
return
print("Offline Graph Plotted: {}".format(path), flush=True)
@classmethod
def layout(cls, title, **kwargs):
layout = dict(
title=cls.title_generation(title, **kwargs))
return layout
@classmethod
def plot(cls, path, data, layout):
fig = plotly.graph_objs.Figure(data=data, layout=layout)
cls.plot_offline(fig, path)
def label(point, separators):
count = 0
for separator in separators:
matrix = numpy.matrix([
numpy.array(point) - numpy.array(separator[0]),
numpy.array(separator[1]) - numpy.array(separator[0])])
if numpy.linalg.det(matrix) < 0:
count += 1
if (count % 2):
return 1
return 0
def intersection(args):
n = args.n # number of linear separators
randomise = args.random
increment = INCREMENT[args.intersection]
if randomise:
angles = numpy.array([random.random() for i in range(n)])
angles = (angles - increment[3]) * numpy.pi / increment[2]
angles = numpy.array(sorted(angles.tolist()))
else:
angles = ((numpy.array(list(range(n))) + 1) / (n + increment[4])
* numpy.pi / increment[2])
separators = []
for angle in angles:
separators.append((
(increment[0], increment[1]), (
numpy.cos(angle) + increment[0],
numpy.sin(angle) + increment[1])))
return separators
def orthogonal(args):
n = args.n # number of linear separators
randomise = args.random
if args.nh == -1 and args.nv != -1:
n_v = args.nv
n_h = n - n_v
elif args.nh != -1 and args.nv == -1:
n_h = args.nh
n_v = n - n_h
elif args.nh != -1 and args.nv != -1:
n_h = args.nh
n_v = args.nv
else:
n_h = n // 2
n_v = n - n_h
if randomise:
distance = [random.random() for i in range(n)]
horizontal = distance[n_v:][:n_h]
vertical = distance[:n_v]
else:
horizontal = (numpy.array(list(range(n_h))) + 1) / (n_h + 1)
vertical = (numpy.array(list(range(n_v))) + 1) / (n_v + 1)
separators = [(
(0.0, y),
(1.0, y)
) for y in horizontal] + [(
(x, 0.0),
(x, 1.0)
) for x in vertical]
return separators
def kNN(args):
n = args.n # number of centroids
# Class h && class v
if args.nh == -1 and args.nv != -1:
n_v = args.nv
n_h = n - n_v
elif args.nh != -1 and args.nv == -1:
n_h = args.nh
n_v = n - n_h
elif args.nh != -1 and args.nv != -1:
n_h = args.nh
n_v = args.nv
else:
n_h = n // 2
n_v = n - n_h
return ([(random.random(), random.random(), 0) for i in range(n_h)] +
[(random.random(), random.random(), 1) for i in range(n_v)])
class kNN_predict(object):
predictor = None
@classmethod
def __call__(cls, point, centroids, initialise=False):
if initialise or not cls.predictor:
cls.predictor = sklearn.neighbors.KNeighborsClassifier(
n_neighbors=len(centroids),
weights='distance')
X = numpy.array([[p[0], p[1]] for p in centroids])
y = numpy.array([p[2] for p in centroids])
cls.predictor.fit(X, y)
return cls.predictor.predict(numpy.array([point]))[0]
def main(args):
path = args.o
number_of_points = int((args.np) ** 0.5)
mode = (
+ (int(args.intersection != ''))
+ (int(args.orthogonal) << 1)
+ (int(args.kNN) << 2))
if mode == 0:
print("Please choose only one mode!")
return None
elif (mode != 1 and
mode != (1 << 1) and
mode != (1 << 2)):
print("Please choose any mode. -h to check details")
return None
points = [coordinate
for coordinate in itertools.product(
range(number_of_points), repeat=2)]
points = numpy.array(points)
points = (points - 0) / (number_of_points - 1 - 0) # Normalization
points = points.tolist()
if args.kNN:
centroids = kNN(args)
labeled_points = [(point[0], point[1], kNN_predict()(point, centroids))
for point in points]
json.dump(centroids, open("{}.centroids.json".format(path), 'w'))
else:
if args.intersection:
separators = intersection(args)
elif args.orthogonal:
separators = orthogonal(args)
labeled_points = [(point[0], point[1], label(point, separators))
for point in points]
json.dump(separators, open("{}.separators.json".format(path), 'w'))
with open(path, 'w') as output:
output.writelines(['{}, {}, {}\n'.format(*point)
for point in labeled_points])
return labeled_points
def plot(points, args):
n = args.n # number of linear separators
randomise = args.random
path = args.save_image_to
if (not path.startswith('/')): # Using relative path instead of absolute
path = '{}/{}'.format(os.getcwd(), path)
data = [collections.defaultdict(list),
collections.defaultdict(list)]
for point in points:
data[point[2]]['x'].append(point[0])
data[point[2]]['y'].append(point[1])
additional_info = dict(
number_of_separators=n,
randomised_angles=randomise,
number_of_points=len(points),
ratio_of_zero_to_all=len(data[0]['x']) / len(points)
)
PlotGraph()(path, data, additional_info)
def parse_args():
parser = argparse.ArgumentParser(
description="Generate 2-D Artificial Datasets using linear separators"
)
parser.add_argument('-n', action='store', type=int, default=0,
help='The number of linear separators in the dataset')
parser.add_argument('--random', action='store_true',
help=' '.join(['state if you want to use randomised',
'angles (interval) for separators']))
parser.add_argument('-o', action='store', type=str, default='data.out',
help='Path to where the generated dataset is stored')
parser.add_argument('--save_image_to', action='store', type=str,
default="{}/data.png".format(os.getcwd()),
help='Path to where the graph plotted is stored')
parser.add_argument('-np', action='store', type=int,
default=900, # A random choice though
help='The number of data instance you want')
parser.add_argument('--intersection', action='store',
choices=['corner', 'side', 'centre'],
default='',
help=''.join([
'Use tilted separators. This indicates the point',
'of intersection of separators. Default: \'\'']))
parser.add_argument('--orthogonal', action='store_true',
help='Use orthogonal separators instead.')
parser.add_argument('-nh', action='store', type=int, default=-1,
help=' '.join([
'The number of horizontal linear separators',
'in the dataset for orthogonal mode only']))
parser.add_argument('-nv', action='store', type=int, default=-1,
help=' '.join([
'The number of vertical linear separators',
'in the dataset for orthogonal mode only']))
parser.add_argument('--kNN', action='store_true',
help=' '.join([
'Use full-NN based method to assign the class.',
'Assume --random by default.']))
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
points = main(args)
if points:
plot(points, args)
``` |
{
"source": "joeythegod/iot_final",
"score": 3
} |
#### File: joeythegod/iot_final/server.py
```python
from flask import Flask, render_template
from flask import jsonify
from flask import request
# from flask_pymongo import PyMongo
import json
import os
from detect_mask_image import mask_image
app= Flask(__name__)
app.config['UPLOAD_FOLDER'] = "images"
# app.config['MONGO_DBNAME']= 'coordinates'
# app.config['MONGO_URI']= 'mongodb://localhost:27017/coordinates'
# mongo= PyMongo(app)
# @app.route('/post', methods=['POST'])
# def add_coordinate():
# data = None
# label= None
# coordinate= mongo.db.coordinates
# data= request.json["data"]
# label= request.json["label"]
# print(label)
# coordinate.insert({'data': data, 'label': label})
# new_coordinate=coordinate.find_one({'data': data} and {'label':label})
# output = {'data': new_coordinate['data'], 'label':new_coordinate['label']}
# return jsonify({'result':output})
# @app.route('/get', methods=['GET'])
# def get_coordinate():
# coordinate= mongo.db.coordinates
# output= []
# for c in coordinate.find():
# output.append({'data': c['data'], 'label': c['label']})
# return jsonify({'result' : output})
@app.route('/post', methods=['POST'])
def infer():
file = request.files["image"]
save_path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file.save(save_path)
results = mask_image(save_path)
return jsonify({'results':results})
if __name__== '__main__':
# coordinate= mongo.db.coordinates
# coordinate.delete_many({})
app.run(debug=True, host="0.0.0.0", port= 80)
``` |
{
"source": "joeythesaint/EDAutopilot",
"score": 3
} |
#### File: joeythesaint/EDAutopilot/autopilot.py
```python
import tkinter as tk
import webbrowser
from tkinter import messagebox
import requests
from dev_autopilot import resource_path, RELEASE
from dev_tray import tray
def update():
releases_url = 'https://api.github.com/repos/skai2/EDAutopilot/releases'
response = requests.get(releases_url)
# Raise an exception if the API call fails.
response.raise_for_status()
data = response.json()
try:
latest_release = data[0]['tag_name']
if latest_release and latest_release != RELEASE:
message = "There is a new version of EDAutopilot available!\nWould you like to go to the release download page?"
root = tk.Tk()
root.withdraw()
root.tk.call('wm', 'iconphoto', root._w, tk.PhotoImage(file=resource_path('src/logo.png')))
go_to_update = messagebox.askyesno("ED - Autopilot Update", message)
if go_to_update:
webbrowser.open_new(data[0]['html_url'])
return True
except Exception as e:
print(e)
return False
if __name__ == '__main__':
if not update():
tray()
``` |
{
"source": "joeyuan19/flaming-bear",
"score": 2
} |
#### File: PersonalSite/blog/views.py
```python
from blog.models import Post
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import render_to_string
def create_post(post):
comments = post.comments.all()
comments.reverse()
comments = [
{
'username':comment.username,
'content':comment.content,
'date':comment.date,
'rating':comment.get_rating(),
} for comment in comments[:10]
]
context = {
'title':post.title,
'content':post.content,
'date':post.date,
'comments': comments,
'rating': post.rating_to_string(),
}
return render_to_string(
'blog/post_template.html',
context)
def get_posts(request):
posts = Post.objects.all()
posts = [create_post(post) for post in posts[:10]]
return render_to_response(
'blog/blog_content.html',
{'posts':posts},
context_instance=RequestContext(request)
)
```
#### File: PersonalSite/content/models.py
```python
from django.db import models
from django.utils import timezone
# Create your models here.
class Project(models.Model):
title = models.CharField(max_length=128,blank=True,null=True,default="")
body = models.TextField(blank=True,null=True,default="")
preview = models.ImageField(upload_to="img/project_previews/",blank=True,null=True,default=None)
rel_date = models.CharField(max_length=128,blank=True,null=True,default="")
url = models.URLField(max_length=128,blank=True,null=True,default="")
org_key = models.IntegerField(blank=True,null=True,default=0)
def __repr__(self):
return "<Project title:"+self.title+">"
def __unicode__(self):
return self.__repr__()
def __str__(self):
return self.__repr__()
class ProjectCategory(models.Model):
title = models.CharField(max_length=64)
entries = models.ManyToManyField(Project,blank=True)
org_key = models.IntegerField(blank=True,null=True,default=0)
def __repr__(self):
return "<Project Category title:"+self.title+">"
def __unicode__(self):
return self.__repr__()
def __str__(self):
return self.__repr__()
class Contact(models.Model):
pass
class Resume(models.Model):
title = models.CharField(max_length=128,blank=True,null=True,default="")
location = models.CharField(max_length=128,blank=True,null=True,default="")
description = models.TextField(blank=True,null=True,default="")
relevent_dates = models.CharField(max_length=128,blank=True,null=True,default="")
sort_date = models.DateTimeField(blank=True,null=True,default=timezone.now())
def __repr__(self):
return "<Resume title:"+self.title+">"
def __unicode__(self):
return self.__repr__()
def __str__(self):
return self.__repr__()
class ResumeCategory(models.Model):
title = models.CharField(max_length=64)
entries = models.ManyToManyField(Resume,blank=True)
org_key = models.IntegerField(blank=True,null=True,default=0)
def list_entries(self):
return "\n".join([i.__repr__() for i in self.entries.all()])
def get_entries_by_date(self,rev_chron=True):
if rev_chron:
return entries.all().order_by('-sort_date')
else:
return entries.all().order_by('sort_date')
def __repr__(self):
return "<Resume Category title:"+self.title+">"
def __unicode__(self):
return self.__repr__()
def __str__(self):
return self.__repr__()
class Friend(models.Model):
name = models.CharField(max_length=64,blank=True,null=True,default="")
title = models.CharField(max_length=128,blank=True,null=True,default="")
Description = models.CharField(max_length=256,blank=True,null=True,default="")
url = models.URLField(max_length=128,blank=True,null=True,default="")
org_key = models.IntegerField(blank=True,null=True,default=0)
def __repr__(self):
return "<Friend name:"+self.name+">"
def __unicode__(self):
return self.__repr__()
def __str__(self):
return self.__repr__()
```
#### File: PersonalSite/PersonalSite/views.py
```python
from django.shortcuts import render
from dajaxice.core import dajaxice_functions
def homepage(request):
return render(request,'index.html')
def test(request):
return render(request,'test2.html')
def presentation(request):
return render(request,'presentation.html')
def homepage_redirect(request):
return render(request,'error.html')
from django.template import RequestContext
from django.shortcuts import render_to_response
from content.models import ResumeCategory
def django_test(request):
categories = [cat for cat in ResumeCategory.objects.all()]
debug = str(len(categories))
canary = "canary"
return render_to_response(
'content/resume_django.html',
{
'debug':debug,
'canary':"Made it!",
'categories':categories,
},
context_instance=RequestContext(request)
)
```
#### File: projects/scripts/ascii.py
```python
from PIL import Image as I
import numpy as np
def asciify(img,width=False,invert=False,vert_scale=False):
tmp = img.convert('L')
X,Y = tmp.size
factor = float(width)/X
if vert_scale:
vert_scale = 0.5
else:
vert_scale = 1.0
if not width:
width = X
tmp = tmp.resize((int(X*factor),int(Y*factor*vert_scale)))
data = np.array(tmp)
buf = ""
for x in xrange(len(data)):
for y in xrange(len(data[x])):
buf += ascii_hash(data[x][y],invert)
buf += "\n"
return buf
def ascii_hash(n,invert=False):
char_map = "$@B%8&WM#6930QoahkbdpqwmZRO2JUYTKXCLzcvunxrjft/\|(){}[]?=-_+~<>I1li!;:,\"*^`'. "
if invert:
char_map = char_map[::-1]
index = int((float(n)/255.)*(len(char_map)-1))
return char_map[index]
if __name__ == '__main__':
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-w","--width",dest="width",default=80)
parser.add_option("-i","--invert",action="store_true",dest="invert",default=False)
parser.add_option("-s","--no-vertical-scale",action="store_false",dest="vert_scale",default=True)
opts,args = parser.parse_args()
if len(args) <= 0:
print "No image provided"
sys.exit(1)
img = I.open(args[0])
print asciify(img,opts.width,opts.invert,opts.vert_scale)
```
#### File: projects/scripts/sudoku3.py
```python
import time
import sys
def rowbyrow():
d = []
for i in range(9):
x = INPUT_PROCESS(i)
d.append(x)
for k,i in enumerate(d):
for j,c in enumerate(i):
d[k][j] = int(c)
return d
def INPUT_PROCESS(i,u=False):
while not u:
x = INPUT(i)
x, u = input_check(x,i)
return x
def INPUT(i):
x = list(raw_input("Row " + str(i+1) + ":\n"))
if ''.join(p for p in x) in ["Q","quit","q","Quit","QUIT"]:
sys.exit(1)
print(x)
return x
def input_check(x,i,u=False):
while not u:
x, u = entry_check(x,i)
x, c = length_check(x,i)
return x, u
def length_check(x,i):
while len(x) != 9:
print("Invalid entry. Please enter the 9 entries from the indicated row using zeroes for blank entries:")
x = INPUT(i)
x, c = input_error(x,i)
return x, c
def entry_check(x,i,c = False,u = True):
for p in x:
try:
h = int(p)
except ValueError:
print("Invalid entry. Each space must be an integer 0-9.")
u = False
return x,u
return x, u
def input_error(x,i):
c = raw_input("Is this correct? (y/n)\n")
while c == "n":
print("Please input the row again: ")
x = INPUT(i)
x,c = input_check(x,i)
return x,c
def puzzprint(n):
print '+ - - - + - - - + - - - +'
for p in range(3):
print '|',
for i in range(3):
print n[p][i],
print '|',
for i in range(3,6):
print n[p][i],
print '|',
for i in range(6,9):
print n[p][i],
print '|'
print '+ - - - + - - - + - - - +'
for p in range(3,6):
print '|',
for i in range(3):
print n[p][i],
print '|',
for i in range(3,6):
print n[p][i],
print '|',
for i in range(6,9):
print n[p][i],
print '|'
print '+ - - - + - - - + - - - +'
for p in range(6,9):
print '|',
for i in range(3):
print n[p][i],
print '|',
for i in range(3,6):
print n[p][i],
print '|',
for i in range(6,9):
print n[p][i],
print '|'
print '+ - - - + - - - + - - - +'
### Transforms
def transpose(n):
"""Takes a list-style Matrix and gives back the transpose"""
d = [[n[j][i] for j in range(len(n[0]))] for i in range(len(n))]
return d
def box(n):
d = [[] for i in range(len(n))]
m = 0
for Q in range(len(n)):
if 18 <= m < 27:
if 24 <= m < 27:
for i in range(6,9):
m = m + 1
for c in range(6,9):
d[Q].append(n[i][c])
elif 21 <= m < 24:
for i in range(3,6):
m = m + 1
for c in range(6,9):
d[Q].append(n[i][c])
elif 18 <= m < 21:
for i in range(3):
m = m + 1
for c in range(6,9):
d[Q].append(n[i][c])
elif 9 <= m < 18:
if 15 <= m < 18:
for i in range(6,9):
m = m + 1
for c in range(3,6):
d[Q].append(n[i][c])
elif 12 <= m < 15:
for i in range(3,6):
m = m + 1
for c in range(3,6):
d[Q].append(n[i][c])
elif 9 <= m < 12:
for i in range(3):
m = m + 1
for c in range(3,6):
d[Q].append(n[i][c])
elif m < 9:
if 6 <= m < 9:
for i in range(6,9):
m = m + 1
for c in range(3):
d[Q].append(n[i][c])
elif 3 <= m < 6:
for i in range(3,6):
m = m + 1
for c in range(3):
d[Q].append(n[i][c])
elif m < 3:
for i in range(3):
m = m + 1
for c in range(3):
d[Q].append(n[i][c])
return d
### useful functions
def ld(x, y):
pos = [i for i in x if i not in y]
return pos
def solved(n):
# Checks if each position has been made into an integer
d = 0
for i in n:
for c in i:
if not type(c) == int:
d = d + 1
if d == 0:
return True
else:
return False
def linecheck(n):
for k,i in enumerate(n):
for j,c in enumerate(i):
if type(c) == list:
n[k][j] = ld(c,i)
return n
def single(puzzle):
# Goes line by line finding variables then tests each possibility in a
# list of variables then takes each possibility and checks to see
# if that is the only variable spot in which that possibility appears.
for line_index, line in enumerate(puzzle):
for variable_index, variable1 in enumerate(line):
if type(variable1) == list:
for possibility in variable1:
count = 0
for variable2 in line:
if type(variable2) == list:
if possibility in variable2:
count = count + 1
if count > 1: break
if count == 1:
puzzle[line_index][variable_index] = possibility
break
return puzzle
def confirm(n):
# replaces the variables that have been knocked down to one possibility
for k,i in enumerate(n):
for j,c in enumerate(i):
if type(c) == list:
if len(c) == 1:
n[k][j] = int(c[0])
return n
def step(n):
# checks lines, eliminating variables and singularities
n = linecheck(n)
n = single(n)
n = confirm(n)
return n
def rc(n):
# column then row
for w in range(2):
n = transpose(n)
n = step(n)
return n
def boxxy(n):
# box
n = box(n)
n = step(n)
n = box(box(n))
return n
def solve(n):
n = rc(n)
n = boxxy(n)
n = confirm(n)
return n
def var(n,t=0):
# Gives coordinates for spot with the least number of variables.
vc = []
v = []
for x1,line in enumerate(n):
for x2,nums in enumerate(line):
if type(nums) == list:
vc.append([len(nums),[x1,x2]])
if len(nums) == 2:
return [len(nums),[x1,x2]]
vc.sort()
m = vc[t]
return m
def bruteforce1(n,xfs):
# First Brute force, this method does not incude a backtracking
# function as it is the first place for a source of error.
# Finds the variable with the lowest number of possiblities
# cycles through the variables until the correct one has been found.
m = var(n)
for i in range(m[0]):
n[m[1][0]][m[1][1]] = n[m[1][0]][m[1][1]][i]
u = False
while not solved(n):
n1 = n
n = solve(n)
if bfcondition(n):
# Backtrack: error raised
n = xfs[-1]
m = var(n)
break
if n == n1:
n2 = failsafe(n)
xfs.append(n2)
n, u = bruteforce2(n,xfs)
if solved(n):
break
m = var(n)
if solved(n):
break
return n
def bruteforce2(n,xfs):
# Finds the variable with the lowest number of possiblities
# cycles through the variables until the correct one has been found.
m = var(n)
for i in range(m[0]):
n[m[1][0]][m[1][1]] = n[m[1][0]][m[1][1]][i]
u = False
while not solved(n):
n1 = n
n = solve(n)
if bfcondition(n):
# backtrack: error raised
n = xfs[-1]
m = var(n)
break
elif n == n1:
# New forced solution needed
n2 = failsafe(n)
xfs.append(n2)
n, u = bruteforce2(n,xfs)
if solved(n):
break
elif bfcondition(n):
n = xfs[-1]
m = var(n)
break
if u:
break
if solved(n):
break
if solved(n):
return n, True
elif not bfcondition(n):
f = xfs[-1]
xfs.pop()
return f, False
else:
return n, True
def bfcondition(n):
for i in n:
for c in i:
if c == []:
return True
for i in n:
for c in i:
if type(c) == int:
if i.count(c) > 1:
return True
for i in box(n):
for c in i:
if type(c) == int:
if i.count(c) > 1:
return True
for i in transpose(n):
for c in i:
if type(c) == int:
if i.count(c) > 1:
return True
return False
def failsafe(n):
# Recreates list from scratch so that the failsafe does not get redefined later.
n1 = [i for i in n]
return n1
def puzzle_setup(x,v):
xc = [i for i in range(1,10)]
if v:
print "Here's your puzzle:\n"
puzzprint(x)
xgrid = []
for i in range(9):
dc = []
for i in range(9):
dc.append(xc)
xgrid.append(dc)
for i in range(9):
for p,c in enumerate(x[i]):
if c != 0:
xgrid[i][p] = c
return xgrid
def solve_puzzle(xgrid,v=False):
xgrid = puzzle_setup(xgrid,v)
start = time.clock()
t = 0
while not solved(xgrid):
xgrid1 = failsafe(xgrid)
xgrid = solve(xgrid)
if xgrid == xgrid1:
xgrid2 = failsafe(xgrid)
xfs = [xgrid2]
xgrid = bruteforce1(xgrid,xfs)
end = time.clock()
t = end - start
return t,xgrid
### RUNNING PORTION ###
if __name__ == "__main__":
print("Welcome!")
print("This program solves Sudoku problems \n")
print("Enter the digits in your puzzle row by row.")
print("At anytime hitting enter is ok instead of typing yes(y).\n")
print("Typing quit during the input process will end the program.")
print("Type a digit for a digit and a 0 (zero) for a blank entry: ")
exit = "y"
while exit != "n":
x = rowbyrow()
t,xgrid = solve_puzzle(x)
print "You're puzzle has been solved!\n"
print "It took " + str(t) + " secs."
puzzprint(xgrid)
print '\n'
exit = raw_input("Another puzzle? (y/n): ")
```
#### File: PersonalSite/projects/views.py
```python
from django.shortcuts import render_to_response
from django.template import RequestContext
def sudoku_view(request):
return render_to_response(
'sudoku/display.html',
{'alt_title':'Sudoku'},
context_instance=RequestContext(request)
)
def ascii_view(request):
return render_to_response(
'asciiart/display.html',
{'alt_title':'PyAsciiArt'},
context_instance=RequestContext(request)
)
def xkcd_clock_view(request):
return render_to_response(
'xkcd-clock.html',
{'alt_title':'XKCD Clock'},
context_instance=RequestContext(request)
)
def test_view(request):
return render_to_response(
'sudoku/test.html',
{},
context_instance=RequestContext(request)
)
``` |
{
"source": "joeyudongs/UoloNet",
"score": 2
} |
#### File: UNet/scripts/dicom_display.py
```python
import numpy as np
import dicom
import os
import matplotlib.pyplot as plt
from glob import glob
# from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# import scipy.ndimage
# from skimage import morphology
# from skimage import measure
# from skimage.transform import resize
# from sklearn.cluster import KMeans
# from plotly import __version__
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
# from plotly.tools import FigureFactory as FF
# from plotly.graph_objs import *
#init_notebook_mode(connected=True)
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key=lambda x: int(x.InstanceNumber))
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def get_pixels_hu(scans):
image = np.stack([s.pixel_array for s in scans])
# Convert to int16 (from sometimes int16),
# should be possible as values should always be low enough (<32k)
image = image.astype(np.int16)
# Set outside-of-scan pixels to 1
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
intercept = scans[0].RescaleIntercept
slope = scans[0].RescaleSlope
if slope != 1:
image = slope * image.astype(np.float64)
image = image.astype(np.int16)
image += np.int16(intercept)
return np.array(image, dtype=np.int16)
data_path = r'/data/LAMBDA/MDACC-ORC\Training\001\DICOM\CT'
output_path = working_path = r'/data/LAMBDA/MDACC-ORC\Training\001\DICOM\Test/'
g = glob(data_path + '/*.dcm')
# Print out the first 5 file names to verify we're in the right folder.
print ("Total of %d DICOM images.\nFirst 5 filenames:" % len(g))
print '\n'.join(g[:5])
#
# Loop over the image files and store everything into a list.
#
id = 0
patient = load_scan(data_path)
imgs = get_pixels_hu(patient)
np.save(output_path + "fullimages_%d.npy" % (id), imgs)
file_used=output_path+"fullimages_%d.npy" % id
imgs_to_process = np.load(file_used)
plt.hist(imgs_to_process.flatten(), bins=50, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
id = 0
imgs_to_process = np.load(output_path+'fullimages_{}.npy'.format(id))
def sample_stack(stack, rows=6, cols=6, start_with=10, show_every=3):
fig,ax = plt.subplots(rows,cols,figsize=[12,12])
for i in range(rows*cols):
ind = start_with + i*show_every
ax[int(i/rows),int(i % rows)].set_title('slice %d' % ind)
ax[int(i/rows),int(i % rows)].imshow(stack[ind],cmap='gray')
ax[int(i/rows),int(i % rows)].axis('off')
plt.show()
sample_stack(imgs_to_process)
``` |
{
"source": "joeyv120/vizLayer",
"score": 3
} |
#### File: vizLayer/src/VizLayer.py
```python
from pywinusb import hid
from PySimpleGUI import SystemTray
import ctypes
hllDll = ctypes.WinDLL("User32.dll")
def sample_handler(data):
# print("\nRaw data: {0}".format(data)) # Print raw data for debug
data = [item for item in data if item != 0] # remove blank characters
# print(data)
data = [chr(item) for item in data] # Convert int to chr
# print(data)
icon = 'data\\' + ''.join(data[-1]) + '.png' # read the last if multiple
# try:
tray_layers.Update(filename=icon) # Update the icon on the screen
# except:
# tray_layers.Update(filename='data\\default.png') # Use this on error
def hid_devices():
all_hids = hid.find_all_hid_devices() # Get a list of HID objects
# Convert to a dictionary of Names:Objects
hids_dict = {}
for device in all_hids:
device_name = str(
"{0.vendor_name} {0.product_name}"
"(vID=0x{1:04x}, pID=0x{2:04x})"
"".format(device, device.vendor_id, device.product_id)
)
hids_dict[device_name] = device
return hids_dict
def hid_read(hids_dict, menu_item):
device = hids_dict[menu_item] # Match the selection to the HID object
device.open() # Open the HID device for communication
device.set_raw_data_handler(sample_handler) # Set raw data callback
return device # Return the HID device
def menu_update():
hids_dict = hid_devices() # Get a dictionary of HID devices
device_names = list(hids_dict.keys()) # Pull the names to a list
# Generate a menu list to pass to the icon
menu_items = [
'BLANK',
[
'Refresh',
'Device List',
device_names,
'---',
'E&xit'
]
]
tray_layers.update(menu=menu_items) # Update the icon with the menu list
return hids_dict
# https://stackoverflow.com/questions/21160100/python-3-x-getting-the-state-of-caps-lock-num-lock-scroll-lock-on-windows
# https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes
# https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-getkeystate
def check_locks():
lock_keys = {
'CAP': 0x14,
'NUM': 0x90,
# 'VK_SCROLL': 0x91,
}
lock_states = {k: hllDll.GetKeyState(v) for k, v in lock_keys.items()}
return lock_states
def change_locks(lock_states):
message = ''
if lock_states['CAP'] != 0:
message += 'Caps Lock = ON'
else:
message += 'Caps Lock = OFF'
if lock_states['NUM'] != 0:
message += '\nNum Lock = ON'
else:
message += '\nNum Lock = OFF'
tray_layers.ShowMessage(
title='Lock States',
message=message,
time=(0, 1000),
filename='data\\locks.png',
)
# print(lock_states)
return
if __name__ == "__main__":
# Create the tray icon for layers
tray_layers = SystemTray(
menu=['BLANK', ['Refresh', '---', 'E&xit']],
filename='data\\default.png',
)
# Create the tray icon for locks
# tray_locks = SystemTray(
# # menu=['BLANK', ['Refresh', '---', 'E&xit']],
# filename='data\\default.png',
# )
# tray_locks.hide()
hids_dict = menu_update() # Populate the menu with HID devices
device = None
lock_states_old = None
while True: # The event loop
menu_item = tray_layers.read(timeout=100) # Read the systemtray
if menu_item == 'Exit':
break
elif menu_item == 'Refresh':
hids_dict = menu_update() # Refesh the list of HID devices
elif menu_item in [
None,
'__ACTIVATED__',
'__MESSAGE_CLICKED__',
'__DOUBLE_CLICKED__',
]:
continue # If there was no interaction of consequence
elif menu_item == '__TIMEOUT__':
lock_states_new = check_locks()
if lock_states_new != lock_states_old:
change_locks(lock_states_new)
lock_states_old = lock_states_new
else:
# Otherwise assume a device was selected
try:
device.close() # Try to close any open devices first
except Exception:
pass
finally:
device = hid_read(hids_dict, menu_item) # Open the device
try:
device.close() # Try to close any open devices first
except Exception:
pass
``` |
{
"source": "joeyv821/openpilot",
"score": 3
} |
#### File: car/honda/readconfig.py
```python
import configparser
config_path = '/data/honda_openpilot.cfg'
config_file_r = 'r'
config_file_w = 'wb'
def read_config_file(CS):
file_changed = False
configr = configparser.ConfigParser()
try:
configr.read(config_path)
except:
file_changed = True
print ("no config file, creating with defaults...")
config = configparser.RawConfigParser()
config.add_section('OP_CONFIG')
#use_tesla_radar -> CS.useTeslaRadar
try:
CS.useTeslaRadar = configr.getboolean('OP_CONFIG','use_tesla_radar')
except:
CS.useTeslaRadar = False
file_changed = True
config.set('OP_CONFIG', 'use_tesla_radar', CS.useTeslaRadar)
#radar_vin -> CS.radarVIN
try:
CS.radarVIN = configr.get('OP_CONFIG','radar_vin')
except:
CS.radarVIN = " "
file_changed = True
config.set('OP_CONFIG', 'radar_vin', CS.radarVIN)
#radar_offset -> CS.radarOffset
try:
CS.radarOffset = configr.getint('OP_CONFIG','radar_offset')
except:
CS.radarOffset = 0.
file_changed = True
config.set('OP_CONFIG', 'radar_offset', CS.radarOffset)
if file_changed:
with open(config_path, config_file_w) as configfile:
config.write(configfile)
class CarSettings(object):
def __init__(self):
### START OF MAIN CONFIG OPTIONS ###
### Do NOT modify here, modify in /data/bb_openpilot.cfg and reboot
self.useTeslaRadar = False
self.radarVIN = " "
self.radarOffset = 0
#read config file
read_config_file(self)
### END OF MAIN CONFIG OPTIONS ###
def get_value(self,name_of_variable):
return_val = None
exec("%s = self.%s" % ('return_val',name_of_variable))
return return_val
```
#### File: tesla/speed_utils/movingaverage.py
```python
import queue
class MovingAverage:
def __init__(self, length):
self.length = length
self.reset()
def reset(self):
self.queue = queue.Queue(maxsize=self.length)
self.sum = 0
def add(self, sample):
if self.queue.full():
self.sum -= self.queue.get_nowait()
self.queue.put_nowait(sample)
self.sum += sample
return self.sum / self.queue.qsize()
def full(self):
return self.queue.full()
```
#### File: car/tesla/teslacan.py
```python
import struct
from ctypes import create_string_buffer
from common.numpy_fast import clip
IC_LANE_SCALE = 2.0
def add_tesla_crc(msg, msg_len):
"""Calculate CRC8 using 1D poly, FF start, FF end"""
crc_lookup = [
0x00,
0x1D,
0x3A,
0x27,
0x74,
0x69,
0x4E,
0x53,
0xE8,
0xF5,
0xD2,
0xCF,
0x9C,
0x81,
0xA6,
0xBB,
0xCD,
0xD0,
0xF7,
0xEA,
0xB9,
0xA4,
0x83,
0x9E,
0x25,
0x38,
0x1F,
0x02,
0x51,
0x4C,
0x6B,
0x76,
0x87,
0x9A,
0xBD,
0xA0,
0xF3,
0xEE,
0xC9,
0xD4,
0x6F,
0x72,
0x55,
0x48,
0x1B,
0x06,
0x21,
0x3C,
0x4A,
0x57,
0x70,
0x6D,
0x3E,
0x23,
0x04,
0x19,
0xA2,
0xBF,
0x98,
0x85,
0xD6,
0xCB,
0xEC,
0xF1,
0x13,
0x0E,
0x29,
0x34,
0x67,
0x7A,
0x5D,
0x40,
0xFB,
0xE6,
0xC1,
0xDC,
0x8F,
0x92,
0xB5,
0xA8,
0xDE,
0xC3,
0xE4,
0xF9,
0xAA,
0xB7,
0x90,
0x8D,
0x36,
0x2B,
0x0C,
0x11,
0x42,
0x5F,
0x78,
0x65,
0x94,
0x89,
0xAE,
0xB3,
0xE0,
0xFD,
0xDA,
0xC7,
0x7C,
0x61,
0x46,
0x5B,
0x08,
0x15,
0x32,
0x2F,
0x59,
0x44,
0x63,
0x7E,
0x2D,
0x30,
0x17,
0x0A,
0xB1,
0xAC,
0x8B,
0x96,
0xC5,
0xD8,
0xFF,
0xE2,
0x26,
0x3B,
0x1C,
0x01,
0x52,
0x4F,
0x68,
0x75,
0xCE,
0xD3,
0xF4,
0xE9,
0xBA,
0xA7,
0x80,
0x9D,
0xEB,
0xF6,
0xD1,
0xCC,
0x9F,
0x82,
0xA5,
0xB8,
0x03,
0x1E,
0x39,
0x24,
0x77,
0x6A,
0x4D,
0x50,
0xA1,
0xBC,
0x9B,
0x86,
0xD5,
0xC8,
0xEF,
0xF2,
0x49,
0x54,
0x73,
0x6E,
0x3D,
0x20,
0x07,
0x1A,
0x6C,
0x71,
0x56,
0x4B,
0x18,
0x05,
0x22,
0x3F,
0x84,
0x99,
0xBE,
0xA3,
0xF0,
0xED,
0xCA,
0xD7,
0x35,
0x28,
0x0F,
0x12,
0x41,
0x5C,
0x7B,
0x66,
0xDD,
0xC0,
0xE7,
0xFA,
0xA9,
0xB4,
0x93,
0x8E,
0xF8,
0xE5,
0xC2,
0xDF,
0x8C,
0x91,
0xB6,
0xAB,
0x10,
0x0D,
0x2A,
0x37,
0x64,
0x79,
0x5E,
0x43,
0xB2,
0xAF,
0x88,
0x95,
0xC6,
0xDB,
0xFC,
0xE1,
0x5A,
0x47,
0x60,
0x7D,
0x2E,
0x33,
0x14,
0x09,
0x7F,
0x62,
0x45,
0x58,
0x0B,
0x16,
0x31,
0x2C,
0x97,
0x8A,
0xAD,
0xB0,
0xE3,
0xFE,
0xD9,
0xC4,
]
crc = 0xFF
for x in range(0, msg_len, 1):
crc = crc_lookup[crc ^ ord(msg[x])]
crc = crc ^ 0xFF
return crc
def add_tesla_checksum(msg_id, msg):
"""Calculates the checksum for the data part of the Tesla message"""
checksum = ((msg_id) & 0xFF) + int((msg_id >> 8) & 0xFF)
for i in range(0, len(msg), 1):
checksum = (checksum + ord(msg[i])) & 0xFF
return checksum
def create_pedal_command_msg(accelCommand, enable, idx, pedalcan):
"""Create GAS_COMMAND (0x551) message to comma pedal"""
msg_id = 0x551
msg_len = 6
msg = create_string_buffer(msg_len)
m1 = 0.050796813
m2 = 0.101593626
d = -22.85856576
if enable == 1:
int_accelCommand = int((accelCommand - d) / m1)
int_accelCommand2 = int((accelCommand - d) / m2)
else:
int_accelCommand = 0
int_accelCommand2 = 0
msg = create_string_buffer(msg_len)
struct.pack_into(
"BBBBB",
msg,
0,
int((int_accelCommand >> 8) & 0xFF),
int_accelCommand & 0xFF,
int((int_accelCommand2 >> 8) & 0xFF),
int_accelCommand2 & 0xFF,
((enable << 7) + idx) & 0xFF,
)
struct.pack_into("B", msg, msg_len - 1, add_tesla_checksum(msg_id, msg))
return [msg_id, 0, msg.raw, pedalcan]
def create_enabled_eth_msg(status):
msg_id = 0x018
msg_len = 1
msg = create_string_buffer(msg_len)
struct.pack_into("B", msg, 0, status)
return [msg_id, 0, msg.raw, 0]
def create_fake_IC_msg():
msg_id = 0x649
msg_len = 8
msg = create_string_buffer(msg_len)
struct.pack_into("BBBBBBBB", msg, 0, 0xFF, 0xFF, 0x01, 0x02, 0x03, 0x04, 0xFF, 0x00)
return [msg_id, 0, msg.raw, 0]
def create_radar_VIN_msg(
radarId,
radarVIN,
radarCAN,
radarTriggerMessage,
useRadar,
radarPosition,
radarEpasType,
):
msg_id = 0x560
msg_len = 8
msg = create_string_buffer(msg_len)
if radarId == 0:
struct.pack_into(
"BBBBBBBB",
msg,
0,
radarId,
radarCAN,
useRadar + (radarPosition << 1) + (radarEpasType << 3),
int((radarTriggerMessage >> 8) & 0xFF),
(radarTriggerMessage & 0xFF),
ord(radarVIN[0]),
ord(radarVIN[1]),
ord(radarVIN[2]),
)
if radarId == 1:
struct.pack_into(
"BBBBBBBB",
msg,
0,
radarId,
ord(radarVIN[3]),
ord(radarVIN[4]),
ord(radarVIN[5]),
ord(radarVIN[6]),
ord(radarVIN[7]),
ord(radarVIN[8]),
ord(radarVIN[9]),
)
if radarId == 2:
struct.pack_into(
"BBBBBBBB",
msg,
0,
radarId,
ord(radarVIN[10]),
ord(radarVIN[11]),
ord(radarVIN[12]),
ord(radarVIN[13]),
ord(radarVIN[14]),
ord(radarVIN[15]),
ord(radarVIN[16]),
)
return [msg_id, 0, msg.raw, 0]
def create_DAS_LR_object_msg(
lane, v1Class, v1Id, v1Dx, v1Dy, v1V, v2Class, v2Id, v2Dx, v2Dy, v2V
):
msg_id = 0x559
msg_len = 8
msg = create_string_buffer(msg_len)
important1 = 0
important2 = 0
if (v1Dx > 0) and (v1Id >= 0):
if lane == 0:
important1 = 1
v1Class += 1
if v1Class == 4:
v1Class = 5
if (v2Dx > 0) and (v2Id >= 0):
# important2 = 1
v2Class += 1
if v2Class == 4:
v2Class = 5
if v1Dx > 0:
v1x = int(clip(v1Dx, 0, 127) / 0.5 / IC_LANE_SCALE) & 0xFF
v1y = int((clip(v1Dy, -22.0, 22.0) + 22.05) / 0.35) & 0x7F
v1v = 0x0F
if v1Dx > 0:
v1v = int((clip(v1V, -30, 26) / IC_LANE_SCALE + 30) / 4) & 0x0F
else:
v1x = 0xFF
v1y = 0x7F
v1v = 0x0F
important1 = 0
v1Class = 0
if v2Dx > 0:
v2x = int(clip(v2Dx, 0, 127) / 0.5 / IC_LANE_SCALE) & 0xFF
v2y = int((clip(v2Dy, -22.0, 22.0) + 22.05) / 0.35) & 0x7F
v2v = 0x0F
if v2Dx > 0:
v2v = int((clip(v2V, -30, 26) / IC_LANE_SCALE + 30) / 4) & 0x0F
else:
v2x = 0xFF
v2y = 0x7F
v2v = 0x0F
important2 = 0
v2Class = 0
struct.pack_into(
"BBBBBBBB",
msg,
0,
lane + (v1Class << 3) + (important1 << 7),
v1x,
v1v + ((v1y << 4) & 0xF0),
int((v1y >> 4) & 0x07) + ((v1Id << 3) & 0xF8),
int((v1Id >> 5) & 0x03)
+ (v2Class << 2)
+ (important2 << 6)
+ ((v2x << 7) & 0x80),
int((v2x >> 1) & 0x7F) + ((v2v << 7) & 0x80),
int((v2v >> 1) & 0x07) + ((v2y << 3) & 0xF8),
int((v2y >> 5) & 0x03) + ((v2Id << 2) & 0xFC),
)
return [msg_id, 0, msg.raw, 0]
def create_fake_DAS_msg2(
hiLoBeamStatus, hiLoBeamReason, ahbIsEnabled, fleet_speed_state
):
msg_id = 0x65A
msg_len = 3
msg = create_string_buffer(msg_len)
struct.pack_into(
"BBB",
msg,
0,
hiLoBeamStatus,
hiLoBeamReason,
(1 if ahbIsEnabled else 0) + (fleet_speed_state << 1),
)
return [msg_id, 0, msg.raw, 0]
def create_fake_DAS_msg(
speed_control_enabled,
speed_override,
apUnavailable,
collision_warning,
op_status,
acc_speed_kph,
turn_signal_needed,
forward_collission_warning,
adaptive_cruise,
hands_on_state,
cc_state,
pcc_available,
alca_state,
acc_speed_limit, # IC cruise speed, kph or mph
legal_speed_limit,
apply_angle,
enable_steer_control,
park_brake_request,
):
msg_id = 0x659 # we will use DAS_udsRequest to send this info to IC
msg_len = 8
msg = create_string_buffer(msg_len)
units_included = 1
c_apply_steer = int(
((int(apply_angle * 10 + 0x4000)) & 0x7FFF) + (enable_steer_control << 15)
)
struct.pack_into(
"BBBBBBBB",
msg,
0,
int(
(speed_control_enabled << 7)
+ (speed_override << 6)
+ (apUnavailable << 5)
+ (collision_warning << 4)
+ op_status
),
int(acc_speed_kph),
int(
(turn_signal_needed << 6)
+ (units_included << 5)
+ (forward_collission_warning << 4)
+ (adaptive_cruise << 3)
+ hands_on_state
),
int((cc_state << 6) + (pcc_available << 5) + alca_state),
int(
acc_speed_limit + 0.5
), # IC rounds current speed, so we need to round cruise speed the same way
int(
(legal_speed_limit & 0x1F) + ((park_brake_request << 5) & 0x20)
), # positions 7 and 6 not used yet
int(c_apply_steer & 0xFF),
int((c_apply_steer >> 8) & 0xFF),
)
return [msg_id, 0, msg.raw, 0]
def create_fake_DAS_obj_lane_msg(
leadDx,
leadDy,
leadClass,
rLine,
lLine,
curv0,
curv1,
curv2,
curv3,
laneRange,
laneWidth,
):
msg_id = 0x557
msg_len = 8
f = IC_LANE_SCALE
f2 = f * f
f3 = f2 * f
if leadDx > 127:
leadDx = 127
if leadDx < 0:
leadDx = 0
if leadDy > 22:
leadDy = 22
if leadDy < -22:
leadDy = -22
tLeadDx = int(leadDx / 0.5)
tLeadDy = int((22.5 + leadDy) / 0.35)
tCurv0 = (int((curv0 + 3.5) / 0.035)) & 0xFF
tCurv1 = (int((clip(curv1 * f, -0.2, 0.2) + 0.2) / 0.0016)) & 0xFF
tCurv2 = (int((clip(curv2 * f2, -0.0025, 0.0025) + 0.0025) / 0.00002)) & 0xFF
tCurv3 = (int((clip(curv3 * f3, -0.00003, 0.00003) + 0.00003) / 0.00000024)) & 0xFF
lWidth = (int((laneWidth - 2.0) / 0.3125)) & 0x0F
msg = create_string_buffer(msg_len)
struct.pack_into(
"BBBBBBBB",
msg,
0,
tLeadDx,
tLeadDy,
(lWidth << 4) + (lLine << 2) + rLine,
tCurv0,
tCurv1,
tCurv2,
tCurv3,
((leadClass & 0x03) << 6) + int(laneRange / 4),
)
return [msg_id, 0, msg.raw, 0]
def create_fake_DAS_sign_msg(
roadSignType, roadSignStopDist, roadSignColor, roadSignControlActive
):
msg_id = 0x556
msg_len = 4
orientation = 0x00 # unknown
arrow = 0x04 # unknown
source = 0x02 # vision
roadSignStopDist_t = ((roadSignStopDist + 20) / 0.2) & 0x3FF
sign1 = ((roadSignType & 0x03) << 6) + (roadSignColor << 3) + 0x04
sign2 = ((roadSignStopDist_t & 0x03) << 6) + int((roadSignType >> 2) & 0xFF)
sign3 = int(roadSignStopDist_t >> 2)
sign4 = (orientation << 6) + (arrow << 3) + (source << 1) + roadSignControlActive
msg = create_string_buffer(msg_len)
struct.pack_into("BBBB", msg, 0, sign1, sign2, sign3, sign4)
return [msg_id, 0, msg.raw, 0]
def create_fake_DAS_warning(
DAS_211_accNoSeatBelt,
DAS_canErrors,
DAS_202_noisyEnvironment,
DAS_doorOpen,
DAS_notInDrive,
enableDasEmulation,
enableRadarEmulation,
stopSignWarning,
stopLightWarning,
DAS_222_accCameraBlind,
DAS_219_lcTempUnavailableSpeed,
DAS_220_lcTempUnavailableRoad,
DAS_221_lcAborting,
DAS_207_lkasUnavailable,
DAS_208_rackDetected,
DAS_025_steeringOverride,
ldwStatus,
useWithoutHarness,
usesApillarHarness,
):
msg_id = 0x554
msg_len = 3
fd = 0
rd = 0
if enableDasEmulation:
fd = 1
if enableRadarEmulation:
rd = 1
wh = 0
if useWithoutHarness:
wh = 1
aph = 0
if usesApillarHarness:
aph = 1
autoPilotAborting = 0 # not used at the moment
warn1 = (
(stopLightWarning << 7)
+ (rd << 6)
+ (fd << 5)
+ (DAS_211_accNoSeatBelt << 4)
+ (DAS_canErrors << 3)
+ (DAS_202_noisyEnvironment << 2)
+ (DAS_doorOpen << 1)
+ DAS_notInDrive
)
warn2 = (
stopSignWarning
+ (DAS_222_accCameraBlind << 1)
+ (DAS_219_lcTempUnavailableSpeed << 2)
+ (DAS_220_lcTempUnavailableRoad << 3)
+ (DAS_221_lcAborting << 4)
+ (DAS_207_lkasUnavailable << 5)
+ (DAS_208_rackDetected << 6)
+ (DAS_025_steeringOverride << 7)
)
warn3 = ldwStatus + (autoPilotAborting << 3) + (wh << 4) + (aph << 5)
msg = create_string_buffer(msg_len)
struct.pack_into("BBB", msg, 0, warn1, warn2, warn3)
return [msg_id, 0, msg.raw, 0]
def create_steering_wheel_stalk_msg(
real_steering_wheel_stalk,
spdCtrlLvr_stat=None,
turnIndLvr_stat=None,
hiBmLvr_stat=None,
hrnSw_psd=None,
):
"""Creates a CAN message from the steering wheel stalks.
Simluates pressing the cruise control stalk (STW_ACTN_RQ.SpdCtrlLvr_Stat)
and turn signal stalk (STW_ACTN_RQ.TurnIndLvr_Stat)
It is probably best not to flood these messages so that the real
stalk works normally.
Args:
spdCtrlLvr_stat: Int value of dbc entry STW_ACTN_RQ.SpdCtrlLvr_Stat
(allowing us to simulate pressing the cruise stalk up or down)
None means no change.
turnIndLvr_stat: Int value of dbc entry STW_ACTN_RQ.TurnIndLvr_Stat
(allowing us to simulate pressing the turn signal up or down)
None means no change.
hiBmLvr_stat: Int value of dbc entry STW_ACTN_RQ.HiBmLvr_Stat
(allowing us to simulate pressing the highbeam stalk)
None means no change.
hrnSw_psd: Int value of dbc entry STW_ACTN_RQ.hrnSw_Psd
(allowing us to simulate pressing the horn)
None means no change.
real_steering_wheel_stalk: Previous STW_ACTN_RQ message sent by the real
stalk. When sending these artifical messages for cruise control, we want
to mimic whatever windshield wiper etc settings the car is
currently sending.
"""
msg_id = 0x045 # 69 in hex, STW_ACTN_RQ
msg_len = 8
msg = create_string_buffer(msg_len)
# Do not send messages that conflict with the driver's actual actions on the
# steering wheel stalk. To ensure this, copy all the fields you can from the
# real cruise stalk message.
fake_stalk = real_steering_wheel_stalk.copy()
if spdCtrlLvr_stat is not None:
# if accelerating, override VSL_Enbl_Rq to 1.
if spdCtrlLvr_stat in [4, 16]:
fake_stalk["VSL_Enbl_Rq"] = 1
fake_stalk["SpdCtrlLvr_Stat"] = spdCtrlLvr_stat
if turnIndLvr_stat is not None:
fake_stalk["TurnIndLvr_Stat"] = turnIndLvr_stat
if hiBmLvr_stat is not None:
fake_stalk["HiBmLvr_Stat"] = hiBmLvr_stat
if hrnSw_psd is not None:
fake_stalk["HrnSw_Psd"] = hrnSw_psd
# message count should be 1 more than the previous (and loop after 16)
fake_stalk["MC_STW_ACTN_RQ"] = (int(round(fake_stalk["MC_STW_ACTN_RQ"])) + 1) % 16
# CRC should initially be 0 before a new one is calculated.
fake_stalk["CRC_STW_ACTN_RQ"] = 0
# Set the first byte, containing cruise control
struct.pack_into(
"B",
msg,
0,
int(round(fake_stalk["SpdCtrlLvr_Stat"]))
+ (int(round(fake_stalk["VSL_Enbl_Rq"])) << 6),
)
# Set the 2nd byte, containing DTR_Dist_Rq
struct.pack_into("B", msg, 1, int(fake_stalk["DTR_Dist_Rq"]))
# Set the 3rd byte, containing turn indicator, highbeams, and wiper wash
struct.pack_into(
"B",
msg,
2,
int(round(fake_stalk["TurnIndLvr_Stat"]))
+ (int(round(fake_stalk["HiBmLvr_Stat"])) << 2)
+ (int(round(fake_stalk["WprWashSw_Psd"])) << 4)
+ (int(round(fake_stalk["WprWash_R_Sw_Posn_V2"])) << 6),
)
# Set the 4th byte, containing the car horn (and steering wheel adjust lever)?
struct.pack_into(
"B",
msg,
3,
int(round(fake_stalk["StW_Lvr_Stat"]))
+ (int(round(fake_stalk["StW_Cond_Flt"])) << 3)
+ (int(round(fake_stalk["StW_Cond_Psd"])) << 4)
+ (int(round(fake_stalk["HrnSw_Psd"])) << 6),
)
# Set the 7th byte, containing the wipers and message counter.
struct.pack_into(
"B",
msg,
6,
int(round(fake_stalk["WprSw6Posn"])) + (fake_stalk["MC_STW_ACTN_RQ"] << 4),
)
# Finally, set the CRC for the message. Must be calculated last!
fake_stalk["CRC_STW_ACTN_RQ"] = add_tesla_crc(msg=msg, msg_len=7)
struct.pack_into("B", msg, msg_len - 1, int(fake_stalk["CRC_STW_ACTN_RQ"]))
return [msg_id, 0, msg.raw, 0]
```
#### File: test/process_replay/test_processes.py
```python
import argparse
import os
import requests
import sys
import tempfile
from selfdrive.car.car_helpers import interface_names
from selfdrive.test.process_replay.process_replay import replay_process, CONFIGS
from selfdrive.test.process_replay.compare_logs import compare_logs
from tools.lib.logreader import LogReader
INJECT_MODEL = 0
segments = [
("TESLA", "d3126df386f83c4d|2020-04-22--13-17-39--3"), # TESLA.MODELS
("HONDA", "0375fdf7b1ce594d|2019-06-13--08-32-25--3"), # HONDA.ACCORD
("HONDA", "99c94dc769b5d96e|2019-08-03--14-19-59--2"), # HONDA.CIVIC
("TOYOTA", "77611a1fac303767|2020-02-29--13-29-33--3"), # TOYOTA.COROLLA_TSS2
("GM", "7cc2a8365b4dd8a9|2018-12-02--12-10-44--2"), # GM.ACADIA
("CHRYSLER", "b6849f5cf2c926b1|2020-02-28--07-29-48--13"), # CHRYSLER.PACIFICA
("HYUNDAI", "5b7c365c50084530|2020-04-15--16-13-24--3"), # HYUNDAI.SONATA
#("CHRYSLER", "b6e1317e1bfbefa6|2020-03-04--13-11-40"), # CHRYSLER.JEEP_CHEROKEE
("SUBARU", "7873afaf022d36e2|2019-07-03--18-46-44--0"), # SUBARU.IMPREZA
("VOLKSWAGEN", "76b83eb0245de90e|2020-03-05--19-16-05--3"), # VW.GOLF
# Enable when port is tested and dascamOnly is no longer set
("NISSAN", "fbbfa6af821552b9|2020-03-03--08-09-43--0"), # NISSAN.XTRAIL
]
# ford doesn't need to be tested until a full port is done
excluded_interfaces = ["mock", "ford"]
BASE_URL = "https://commadataci.blob.core.windows.net/openpilotci/"
# run the full test (including checks) when no args given
FULL_TEST = len(sys.argv) <= 1
def get_segment(segment_name, original=True):
route_name, segment_num = segment_name.rsplit("--", 1)
if original:
rlog_url = BASE_URL + "%s/%s/rlog.bz2" % (route_name.replace("|", "/"), segment_num)
else:
process_replay_dir = os.path.dirname(os.path.abspath(__file__))
model_ref_commit = open(os.path.join(process_replay_dir, "model_ref_commit")).read().strip()
rlog_url = BASE_URL + "%s/%s/rlog_%s.bz2" % (route_name.replace("|", "/"), segment_num, model_ref_commit)
req = requests.get(rlog_url)
assert req.status_code == 200, ("Failed to download log for %s" % segment_name)
with tempfile.NamedTemporaryFile(delete=False, suffix=".bz2") as f:
f.write(req.content)
return f.name
def test_process(cfg, lr, cmp_log_fn, ignore_fields=[], ignore_msgs=[]):
if not os.path.isfile(cmp_log_fn):
assert False, ("Failed to open %s" % cmp_log_fn)
else:
print("Opening file [%s]" % cmp_log_fn)
cmp_log_msgs = list(LogReader(cmp_log_fn))
log_msgs = replay_process(cfg, lr)
# check to make sure openpilot is engaged in the route
# TODO: update routes so enable check can run
# failed enable check: honda bosch, hyundai, chrysler, and subaru
if cfg.proc_name == "controlsd" and FULL_TEST and False:
for msg in log_msgs:
if msg.which() == "controlsState":
if msg.controlsState.active:
break
else:
segment = cmp_log_fn.split("/")[-1].split("_")[0]
raise Exception("Route never enabled: %s" % segment)
return compare_logs(cmp_log_msgs, log_msgs, ignore_fields+cfg.ignore, ignore_msgs)
def format_diff(results, ref_commit):
diff1, diff2 = "", ""
diff2 += "***** tested against commit %s *****\n" % ref_commit
failed = False
for segment, result in list(results.items()):
diff1 += "***** results for segment %s *****\n" % segment
diff2 += "***** differences for segment %s *****\n" % segment
for proc, diff in list(result.items()):
diff1 += "\t%s\n" % proc
diff2 += "*** process: %s ***\n" % proc
if isinstance(diff, str):
diff1 += "\t\t%s\n" % diff
failed = True
elif len(diff):
cnt = {}
for d in diff:
diff2 += "\t%s\n" % str(d)
k = str(d[1])
cnt[k] = 1 if k not in cnt else cnt[k] + 1
for k, v in sorted(cnt.items()):
diff1 += "\t\t%s: %s\n" % (k, v)
failed = True
return diff1, diff2, failed
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Regression test to identify changes in a process's output")
# whitelist has precedence over blacklist in case both are defined
parser.add_argument("--whitelist-procs", type=str, nargs="*", default=[],
help="Whitelist given processes from the test (e.g. controlsd)")
parser.add_argument("--whitelist-cars", type=str, nargs="*", default=[],
help="Whitelist given cars from the test (e.g. HONDA)")
parser.add_argument("--blacklist-procs", type=str, nargs="*", default=[],
help="Blacklist given processes from the test (e.g. controlsd)")
parser.add_argument("--blacklist-cars", type=str, nargs="*", default=[],
help="Blacklist given cars from the test (e.g. HONDA)")
parser.add_argument("--ignore-fields", type=str, nargs="*", default=[],
help="Extra fields or msgs to ignore (e.g. carState.events)")
parser.add_argument("--ignore-msgs", type=str, nargs="*", default=[],
help="Msgs to ignore (e.g. carEvents)")
args = parser.parse_args()
cars_whitelisted = len(args.whitelist_cars) > 0
procs_whitelisted = len(args.whitelist_procs) > 0
process_replay_dir = os.path.dirname(os.path.abspath(__file__))
ref_files_dir = os.path.join(process_replay_dir,"ref_files")
try:
ref_commit = open(os.path.join(process_replay_dir, "ref_commit")).read().strip()
except:
print("couldn't find reference commit")
sys.exit(1)
print("***** testing against commit %s *****" % ref_commit)
# check to make sure all car brands are tested
if FULL_TEST:
tested_cars = set(c.lower() for c, _ in segments)
untested = (set(interface_names) - set(excluded_interfaces)) - tested_cars
assert len(untested) == 0, "Cars missing routes: %s" % (str(untested))
results = {}
for car_brand, segment in segments:
if (cars_whitelisted and car_brand.upper() not in args.whitelist_cars) or \
(not cars_whitelisted and car_brand.upper() in args.blacklist_cars):
continue
print("***** testing route segment %s *****\n" % segment)
results[segment] = {}
rlog_fn = get_segment(segment)
lr = LogReader(rlog_fn)
for cfg in CONFIGS:
if (procs_whitelisted and cfg.proc_name not in args.whitelist_procs) or \
(not procs_whitelisted and cfg.proc_name in args.blacklist_procs):
continue
cmp_log_fn = os.path.join(ref_files_dir, "%s_%s_%s.bz2" % (segment, cfg.proc_name, ref_commit))
results[segment][cfg.proc_name] = test_process(cfg, lr, cmp_log_fn, args.ignore_fields, args.ignore_msgs)
os.remove(rlog_fn)
diff1, diff2, failed = format_diff(results, ref_commit)
with open(os.path.join(process_replay_dir, "diff.txt"), "w") as f:
f.write(diff2)
print(diff1)
print("TEST", "FAILED" if failed else "SUCCEEDED")
print("\n\nTo update the reference logs for this test run:")
print("./update_refs.py")
sys.exit(int(failed))
```
#### File: selfdrive/tinklad/tinkla_interface.py
```python
from cereal import tinkla
import os
import zmq
import datetime
from selfdrive.tinklad import tinklad
import time
## For helpers:
import traceback
from selfdrive.car.tesla.readconfig import CarSettings
from common.params import Params
LOG_PREFIX = "tinklad client: "
tinklaClient = None
def now_iso8601():
return datetime.datetime.utcnow().replace(microsecond=0).isoformat()+"+0000"
class TinklaClient():
sock = None
pid = None
lastCanErrorTimestamp = 0
lastProcessErrorTimestamp = 0
eventCategoryKeys = tinklad.TinklaInterfaceEventCategoryKeys()
messageTypeKeys = tinklad.TinklaInterfaceMessageKeys()
actions = tinklad.TinklaInterfaceActions()
# Configurable:
# Note: If throttling, events are dropped
shouldThrottleCanErrorEvents = True
shouldThrottleProcessCommErrorEvents = True
# Setting to every 30min for now, because we're getting a bunch of plan, pathPlan issues.
# Should change to around every 1min in the future when this is resolved
throttlingPeriodInSeconds = (60*30) # One event every `throttlingPeriodInSeconds`
def start_client(self):
if os.getpid() == self.pid:
return
try:
self.zctx = zmq.Context()
self.sock = self.zctx.socket(zmq.PUSH)
self.sock.connect("ipc:///tmp/tinklad")
self.pid = os.getpid()
except zmq.ZMQError:
print("Unable to connect to tinklad")
self.sock = None
def setUserInfo(self, info):
self.start_client()
if self.sock is None:
return
info.timestamp = now_iso8601()
message = tinkla.Interface.new_message()
message.version = tinkla.interfaceVersion
message.message.userInfo = info
message.message.userInfo.version = tinkla.interfaceVersion
try:
self.sock.send(message.to_bytes(), zmq.NOBLOCK)
except zmq.error.Again:
# drop :/
pass
def logUserEvent(self, event):
self.start_client()
if self.sock is None:
return
event.timestamp = now_iso8601()
message = tinkla.Interface.new_message()
message.version = tinkla.interfaceVersion
message.message.event = event
message.message.event.version = tinkla.interfaceVersion
try:
self.sock.send(message.to_bytes(), zmq.NOBLOCK)
except zmq.error.Again:
# drop :/
pass
def attemptToSendPendingMessages(self):
self.start_client()
if self.sock is None:
return
message = tinkla.Interface.new_message()
message.version = tinkla.interfaceVersion
message.message.action = self.actions.attemptToSendPendingMessages
try:
self.sock.send(message.to_bytes(), zmq.NOBLOCK)
except zmq.error.Again:
# drop :/
pass
## Helpers:
def logCrashStackTraceEvent(self, openPilotId = None):
if openPilotId is None:
openPilotId = self.openPilotId
event = tinkla.Interface.Event.new_message(
openPilotId=openPilotId,
source="n/a",
category=self.eventCategoryKeys.crash,
name="crash",
)
trace = traceback.format_exc().replace('"', '`').replace("'", '`')
userInfo = "User Handle: %s \nOpenPilotId: %s" % (self.userHandle, self.openPilotId)
gitInfo = "Git Remote: %s\nBranch: %s\nCommit: %s" % (self.gitRemote, self.gitBranch, self.gitHash)
event.value.textValue="%s\n%s\n%s" % (userInfo, gitInfo, trace)
self.logUserEvent(event)
def logCANErrorEvent(self, source, canMessage, additionalInformation, openPilotId = None):
if not self.carSettings.shouldLogCanErrors:
return
if self.shouldThrottleCanErrorEvents:
now = time.time()
if now - self.lastCanErrorTimestamp < self.throttlingPeriodInSeconds:
return
self.lastCanErrorTimestamp = now
if openPilotId is None:
openPilotId = self.openPilotId
event = tinkla.Interface.Event.new_message(
openPilotId=openPilotId,
source=source,
category=self.eventCategoryKeys.canError,
name="CAN Error",
)
canInfo = "Can Message: {0}".format(hex(canMessage))
userInfo = "User Handle: %s \nOpenPilotId: %s" % (self.userHandle, self.openPilotId)
gitInfo = "Git Remote: %s\nBranch: %s\nCommit: %s" % (self.gitRemote, self.gitBranch, self.gitHash)
event.value.textValue="%s\n%s\n%s\n%s" % (userInfo, gitInfo, canInfo, additionalInformation)
self.logUserEvent(event)
def logProcessCommErrorEvent(self, source, processName, count, eventType, openPilotId = None):
if not self.carSettings.shouldLogProcessCommErrors:
return
if self.shouldThrottleProcessCommErrorEvents:
now = time.time()
if now - self.lastProcessErrorTimestamp < self.throttlingPeriodInSeconds:
return
self.lastProcessErrorTimestamp = now
if openPilotId is None:
openPilotId = self.openPilotId
event = tinkla.Interface.Event.new_message(
openPilotId=openPilotId,
source=processName,
category=self.eventCategoryKeys.processCommError,
name="Process Comm Error",
)
additionalInformation = "Process: '%s' \nType: '%s' \nCount: '%d' \nSource: '%s'" % (processName, eventType, count, source)
userInfo = "User Handle: %s \nOpenPilotId: %s" % (self.userHandle, self.openPilotId)
gitInfo = "Git Remote: %s\nBranch: %s\nCommit: %s" % (self.gitRemote, self.gitBranch, self.gitHash)
event.value.textValue="%s\n%s\n%s" % (userInfo, gitInfo, additionalInformation)
self.logUserEvent(event)
def print_msg(self, message):
print(message)
def __init__(self):
try:
params = Params()
except OSError:
params = Params(db="./params")
try:
self.carSettings = CarSettings()
except IOError:
self.carSettings = CarSettings(optional_config_file_path="./bb_openpilot.cfg")
self.openPilotId = params.get("DongleId")
self.userHandle = self.carSettings.userHandle
self.gitRemote = params.get("GitRemote")
self.gitBranch = params.get("GitBranch")
self.gitHash = params.get("GitCommit")
self.start_client()
tinklaClient = self
``` |
{
"source": "JoeyValentine/cardiac-mri-visualization",
"score": 2
} |
#### File: JoeyValentine/cardiac-mri-visualization/myocardial_perfusion.py
```python
import sys
import scipy
import pickle
import scipy.misc
import numpy as np
import SimpleITK as sitk
import PyQt5
import pyqtgraph.opengl as gl
import matplotlib.pyplot as plt
import pyqtgraph as pg
from scipy import ndimage
from PyQt5 import QtCore, QtGui
# from pyqtgraph.Qt import QtGui
from pyqtgraph.pgcollections import OrderedDict
Gradients = OrderedDict([
('bw', {'ticks': [(0.0, (0, 0, 0, 255)), (1, (255, 255, 255, 255))], 'mode': 'rgb'}),
('hot', {'ticks': [(0.3333, (185, 0, 0, 255)), (0.6666, (255, 220, 0, 255)), (1, (255, 255, 255, 255)), (0, (0, 0, 0, 255))], 'mode': 'rgb'}),
('jet', {'ticks': [(1, (166, 0, 0, 255)), (0.32247191011235954, (0, 255, 255, 255)), (0.11348314606741573, (0, 68, 255, 255)), (0.6797752808988764, (255, 255, 0, 255)), (0.902247191011236, (255, 0, 0, 255)), (0.0, (0, 0, 166, 255)), (0.5022471910112359, (0, 255, 0, 255))], 'mode': 'rgb'}),
('summer', {'ticks': [(1, (255, 255, 0, 255)), (0.0, (0, 170, 127, 255))], 'mode': 'rgb'} ),
('space', {'ticks': [(0.562, (75, 215, 227, 255)), (0.087, (255, 170, 0, 254)), (0.332, (0, 255, 0, 255)), (0.77, (85, 0, 255, 255)), (0.0, (255, 0, 0, 255)), (1.0, (255, 0, 127, 255))], 'mode': 'rgb'}),
('winter', {'ticks': [(1, (0, 255, 127, 255)), (0.0, (0, 0, 255, 255))], 'mode': 'rgb'})
])
def crop(img, rect):
'''
:param img: image which i want to crop
:param rect: [row_min, col_min, width, height]
:return: cropped image
'''
row_min, col_min, width, height = rect
new_img = img[row_min:row_min+height, col_min:col_min+width]
return new_img
def diff_mask(lhs_mask, rhs_mask):
"""
:param lhs_mask : 2d binary numpy array
:param rhs_mask : 2d binary numpy array
:return : lhs_mask - rhs_mask (vectorized operation)
"""
row_num, col_num = lhs_mask.shape
ret_mask = np.zeros(lhs_mask.shape)
for i in range(row_num):
for j in range(col_num):
if lhs_mask[i, j] == 1 and rhs_mask[i, j] == 0:
ret_mask[i, j] = 1
return ret_mask
def slice_interpolation(mask, num_of_inserted_picture):
mask_diastole_myo = []
_, _, slice_num = mask.shape
for i in range(0, slice_num-1):
mask1 = mask[:, :, i+1] > 0
mask1_1 = mask[:, :, i] > 0
mask1 = mask1.astype(float)
mask1_1 = mask1_1.astype(float)
Di_1_mask1 = scipy.ndimage.morphology.distance_transform_edt(mask1_1) - scipy.ndimage.morphology.distance_transform_edt(1 - mask1_1)
Di_mask1 = scipy.ndimage.morphology.distance_transform_edt(mask1) - scipy.ndimage.morphology.distance_transform_edt(1 - mask1)
mask_diastole_myo.append(mask1_1)
for j in range(1, num_of_inserted_picture + 1):
weight_Di = j / (num_of_inserted_picture + 1)
weight_Di_1 = 1 - weight_Di
image_1 = weight_Di_1 * Di_1_mask1 + weight_Di * Di_mask1
binary_1 = image_1 > 0
binary_1 = binary_1.astype(float)
mask_diastole_myo.append(binary_1)
mask_diastole_myo.append(mask1)
mask_diastole_myo = np.dstack(mask_diastole_myo)
return mask_diastole_myo
def registration(cine_pkl_file_name, lge_pkl_file_name):
"""
:param cine_pkl_file_name: cine pickle file name
:param lge_pkl_file_name: lge pickle file name
:return:
mask_epi_3d :
mask_endo_3d :
scar3d :
"""
SLNO = 6
cine_data = pickle.load(open(cine_pkl_file_name, 'rb'))
lge_data = pickle.load(open(lge_pkl_file_name, 'rb'))
lge_img = lge_data['lge_img']
nSD_N = lge_data['nSD_N']
mask_scar_nSD = lge_data['mask_scar_nSD']
mask_myoseg = lge_data['mask_myoseg']
cine_img = cine_data['cine_img']
mask_diastole_endo_cine = cine_data['mask_diastole_endo']
mask_diastole_epi_cine = cine_data['mask_diastole_epi']
frameno_diastole = cine_data['frameno_diastole']
cine_pixelspacing = cine_data['pixelspacing']
cine_spacingbetweenslices = cine_data['spacingbetweenslices']
# if it is True, then lge we will use scipy.misc.imresize.
# if it is False, we will not use use scipy.misc.imresize.
lge_resize_flag = True
nrow, ncol, _, loop_len = cine_img.shape
mask_scar_nSD_img_stack = []
_, _, loop_len, _ = mask_scar_nSD.shape
if lge_resize_flag:
# interpolation
ret = scipy.misc.imresize(lge_img[:, :, SLNO], 1.56 / cine_pixelspacing, interp='bicubic')
row_min = 30
col_min = 30
cropped_lge_img = crop(ret, [row_min, col_min, ncol, nrow])
mask_myoseg_img = scipy.misc.imresize(mask_myoseg[:, :, SLNO], 1.56 / cine_pixelspacing, interp='bicubic')
for i in range(loop_len):
mask_scar_nSD_img = scipy.misc.imresize(mask_scar_nSD[:, :, i, nSD_N - 2], 1.56 / cine_pixelspacing,
interp='bicubic')
cropped_mask_scar_nSD_img = crop(mask_scar_nSD_img, [row_min, col_min, ncol, nrow])
mask_scar_nSD_img_stack.append(cropped_mask_scar_nSD_img)
mask_scar_nSD_img = scipy.misc.imresize(mask_scar_nSD[:, :, SLNO, nSD_N - 2], 1.56 / cine_pixelspacing,
interp='bicubic')
else:
ret = scipy.misc.imresize(lge_img[:, :, SLNO], 1.56 / cine_pixelspacing, interp='bicubic')
cropped_lge_img = ret
fixed_image = cine_img[:, :, frameno_diastole, SLNO]
moving_image = cropped_lge_img
fixed_image = sitk.GetImageFromArray(fixed_image, isVector=True)
moving_image = sitk.GetImageFromArray(moving_image, isVector=True)
fixed_image_255 = sitk.Cast(sitk.RescaleIntensity(fixed_image), sitk.sitkUInt8)
moving_image_255 = sitk.Cast(sitk.RescaleIntensity(moving_image), sitk.sitkUInt8)
initial_transform = sitk.CenteredTransformInitializer(fixed_image_255,
moving_image_255,
sitk.Euler2DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY)
registration_method = sitk.ImageRegistrationMethod()
# Similarity metric settings.
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
# Optimizer settings.
learning_rate = 1.0 # default = 1.0
registration_method.SetOptimizerAsGradientDescent(learningRate=learning_rate, numberOfIterations=100,
convergenceMinimumValue=1e-6, convergenceWindowSize=10)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Don't optimize in-place, we would possibly like to run this cell multiple times.
registration_method.SetInitialTransform(initial_transform, inPlace=False)
# Connect all of the observers so that we can perform plotting during registration.
final_transform = registration_method.Execute(sitk.Cast(fixed_image_255, sitk.sitkFloat32),
sitk.Cast(moving_image_255, sitk.sitkFloat32))
moving_resampled_scar_255_stack = []
for i in range(len(mask_scar_nSD_img_stack)):
mask_scar_nSD_255 = sitk.GetImageFromArray(mask_scar_nSD_img_stack[i], isVector=True)
mask_scar_nSD_255 = sitk.Cast(sitk.RescaleIntensity(mask_scar_nSD_255), sitk.sitkUInt8)
moving_resampled_scar = sitk.Resample(mask_scar_nSD_255, fixed_image_255, final_transform, sitk.sitkLinear, 0.0,
mask_scar_nSD_255.GetPixelIDValue())
moving_resampled_scar_255 = sitk.GetArrayFromImage(moving_resampled_scar)
moving_resampled_scar_255_stack.append(moving_resampled_scar_255)
moving_resampled_scar_255_stack = np.dstack(moving_resampled_scar_255_stack)
mask_diastole_endo = mask_diastole_endo_cine
mask_diastole_epi = mask_diastole_epi_cine
numOfInsertedPicture = round(cine_spacingbetweenslices / cine_pixelspacing)
mask_diastole_epi = mask_diastole_epi.astype(float)
mask_diastole_endo = mask_diastole_endo.astype(float)
mask_epi_3d = slice_interpolation(mask_diastole_epi[:, :, 3:8], numOfInsertedPicture)
mask_endo_3d = slice_interpolation(mask_diastole_endo[:, :, 3:8], numOfInsertedPicture)
moving_resampled_scar_255_inter_stack = []
# scar interpolation
_, _, loop_len = moving_resampled_scar_255_stack.shape
for i in range(2, loop_len - 3):
scar = moving_resampled_scar_255_stack[:, :, i + 1] > 0
scar_before = moving_resampled_scar_255_stack[:, :, i] > 0
scar = scar.astype(float)
scar_before = scar_before.astype(float)
Di_1 = scipy.ndimage.morphology.distance_transform_edt(
scar_before) - scipy.ndimage.morphology.distance_transform_edt(1 - scar_before)
Di = scipy.ndimage.morphology.distance_transform_edt(scar) - scipy.ndimage.morphology.distance_transform_edt(
1 - scar)
moving_resampled_scar_255_inter_stack.append(scar_before)
for j in range(1, numOfInsertedPicture + 1):
weight_Di = j / (numOfInsertedPicture + 1)
weight_Di_1 = 1 - weight_Di
image = weight_Di_1 * Di_1 + weight_Di * Di
binary = image > 0
binary = binary.astype(float)
moving_resampled_scar_255_inter_stack.append(binary)
moving_resampled_scar_255_inter_stack.append(scar)
moving_resampled_scar_255_inter_stack = np.dstack(moving_resampled_scar_255_inter_stack)
scar3d = moving_resampled_scar_255_inter_stack
return mask_epi_3d, mask_endo_3d, scar3d
def planes_alignment(planes, zLVc, zRVi):
aligned_planes = []
num_of_surfaces = len(planes)
for i in range(num_of_surfaces):
idx = num_of_surfaces - i - 1
xLVc, yLVc = zLVc[idx].real, zLVc[idx].imag
xRVi, yRVi = zRVi[idx].real, zRVi[idx].imag
planes[i].translate(-yLVc, -xLVc, 0)
tan_val = (xRVi - xLVc) / (yRVi - yLVc)
if i != 0:
diff_angle = np.arctan((base_tan_val - tan_val) / (1 + base_tan_val*tan_val))
planes[i].rotate(diff_angle * 180. / np.pi, 0, 0, 1)
else:
base_tan_val = tan_val
aligned_planes.append(planes[i])
return aligned_planes
def get_multiple_planes_nonbinary_colorbar(myo_map, slice_locations, zLVc, zRVi, ymin, ymax, cm):
ret_planes = []
row_num, col_num, num_of_surfaces = myo_map.shape
midslice_loc_est = np.mean(slice_locations)
for i in range(num_of_surfaces-1, -1, -1):
xLVc, yLVc = zLVc[i].real, zLVc[i].imag
xRVi, yRVi = zRVi[i].real, zRVi[i].imag
upslope_values = myo_map[:, :, i]
upslope_values_scaled = np.zeros(upslope_values.shape)
for j in range(row_num):
for k in range(col_num):
upslope_values_scaled[j, k] = (upslope_values[j, k] - ymin) / (ymax - ymin)
colors_rgba = cm.mapToFloat(upslope_values_scaled)
for j in range(row_num):
for k in range(col_num):
if colors_rgba[j, k, 0] == 0 and colors_rgba[j, k, 1] == 0 and colors_rgba[j, k, 2] == 0:
colors_rgba[j, k, 3] = 0
colors_rgba[int(yRVi), int(xRVi), :] = np.array([1., 1., 1., 1.])
colors_rgba[int(yLVc), int(xLVc), :] = np.array([0., 1., 0., 1.])
z = -(slice_locations[i] - midslice_loc_est) * np.ones((row_num, col_num))
item = gl.GLSurfacePlotItem(z=z, colors=colors_rgba.reshape(row_num*col_num, 4), smooth=True, shader='balloon', glOptions='translucent')
ret_planes.append(item)
aligned_ret_planes = planes_alignment(ret_planes, zLVc, zRVi)
return aligned_ret_planes
if __name__ == '__main__':
# PyQt5.QtCore.QCoreApplication.addLibraryPath('.')
# os.environ['QT_QPA_PLATFORM_PLUGIN_PATH'] = 'D:\Anaconda3\envs\py34\Library\plugins\platforms'
directory = "D:\\SimpleITK-Notebooks\\Python"
sys.path.append(directory)
perf_pkl_file_name = 'pkl_data/perfusion_1143____.pkl'
perf_data = pickle.load(open(perf_pkl_file_name, 'rb'))
upslope_map = perf_data['upslope_map']
slice_locations = perf_data['slicelocation']
zLVc = perf_data['LVc']
zRVi = perf_data['RVi']
# Create an PyQT4 application object.
app = QtGui.QApplication(sys.argv)
win = QtGui.QWidget()
layout = QtGui.QGridLayout()
win.setLayout(layout)
cb = pg.GraphicsLayoutWidget()
ax = pg.AxisItem('left'); ymin = 0.0; ymax = 0.4; # upslope range.
ax.setRange(ymin, ymax)
cb.addItem(ax)
cmap = 'hot'
gw = pg.GradientEditorItem(orientation='right'); GradientMode = Gradients[cmap]; gw.restoreState(GradientMode)
cb.addItem(gw)
view = gl.GLViewWidget()
view.setSizePolicy(cb.sizePolicy())
layout.addWidget(view, 0, 0)
layout.addWidget(cb, 0, 1); layout.setColumnStretch(1, 0); layout.setColumnMinimumWidth(1, 120); layout.setColumnStretch(0, 1)
view.sizeHint = lambda: pg.QtCore.QSize(1700, 800)
cb.sizeHint = lambda: pg.QtCore.QSize(100, 800)
layout.setHorizontalSpacing(0)
win.resize(800, 800)
cm = gw.colorMap()
planes = get_multiple_planes_nonbinary_colorbar(upslope_map, slice_locations, zLVc, zRVi, ymin, ymax, cm)
for plane in planes:
view.addItem(plane)
dst = 100
view.setCameraPosition(distance=dst)
win.show()
print(slice_locations)
cmap0 = 'hot'
fig = plt.figure(3)
for j in range(4):
xLVc, yLVc = zLVc[j].real, zLVc[j].imag
xRVi, yRVi = zRVi[j].real, zRVi[j].imag
ax = plt.subplot(1, 4, j+1)
cax = ax.imshow(upslope_map[:, :, j], cmap=cmap0, clim=[0, 0.4])
plt.plot(xLVc, yLVc, 'ro')
plt.plot(xRVi, yRVi, 'bo')
ax.set_title('upslope map')
cbar = fig.colorbar(cax, ticks=[0, 0.1, 0.2, 0.3, 0.4])
cbar.ax.set_yticklabels(['0', '0.1', '0.2', '0.3', '0.4'])
plt.show()
sys.exit(app.exec_())
``` |
{
"source": "JoeyValentine/gen-centerlines",
"score": 2
} |
#### File: JoeyValentine/gen-centerlines/plot_mesh.py
```python
import pyvista as pv
def confirm():
if len(plotter.picked_path.points) == 2:
plotter.close()
if __name__ == '__main__':
vti_file_name = 'level_sets.vti'
data = pv.read(vti_file_name)
vol = data.threshold_percent(30, invert=1)
surf = vol.extract_geometry()
smooth_surf = surf.smooth(n_iter=1000)
plotter = pv.Plotter()
plotter.add_mesh(smooth_surf, style='wireframe', color='black')
plotter.add_key_event('a', confirm)
plotter.enable_path_picking(color='red')
plotter.show()
points = plotter.picked_path.points
print(points)
``` |
{
"source": "joeyvanlierop/battlesnake",
"score": 4
} |
#### File: joeyvanlierop/battlesnake/logic.py
```python
import random
from typing import List, Dict
def avoid_my_neck(my_head: Dict[str, int], my_body: List[dict], possible_moves: List[str]) -> List[str]:
"""
my_head: Dictionary of x/y coordinates of the Battlesnake head.
e.g. {"x": 0, "y": 0}
my_body: List of dictionaries of x/y coordinates for every segment of a Battlesnake.
e.g. [ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ]
possible_moves: List of strings. Moves to pick from.
e.g. ["up", "down", "left", "right"]
return: The list of remaining possible_moves, with the 'neck' direction removed
"""
my_neck = my_body[1] # The segment of body right after the head is the 'neck'
# Neck is left of head
if my_neck["x"] < my_head["x"]:
possible_moves = remove_direction("left", possible_moves)
# Neck is right of head
elif my_neck["x"] > my_head["x"]:
possible_moves = remove_direction("right", possible_moves)
# Neck is below head
elif my_neck["y"] < my_head["y"]:
possible_moves = remove_direction("down", possible_moves)
# Neck is above head
elif my_neck["y"] > my_head["y"]:
possible_moves = remove_direction("up", possible_moves)
return possible_moves
def avoid_body(my_head: Dict[str, int], body: List[dict], possible_moves: List[str]) -> List[str]:
"""
my_head: Dictionary of x/y coordinates of the Battlesnake head.
e.g. {"x": 0, "y": 0}
my_body: List of dictionaries of x/y coordinates for every segment of a Battlesnake.
e.g. [ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ]
possible_moves: List of strings. Moves to pick from.
e.g. ["up", "down", "left", "right"]
return: The list of remaining possible_moves which don't run into my snakes body
"""
up = {"x": my_head["x"], "y": my_head["y"] + 1}
down = {"x": my_head["x"], "y": my_head["y"] - 1}
left = {"x": my_head["x"] - 1, "y": my_head["y"]}
right = {"x": my_head["x"] + 1, "y": my_head["y"]}
# Body is left of head
if left in body:
possible_moves = remove_direction("left", possible_moves)
# Body is right of head
if right in body:
possible_moves = remove_direction("right", possible_moves)
# Body is below head
if down in body:
possible_moves = remove_direction("down", possible_moves)
# Body is above head
if up in body:
possible_moves = remove_direction("up", possible_moves)
return possible_moves
def avoid_board_edge(my_head: Dict[str, int], board_height: int, board_width: int, possible_moves: List[str]) -> List[str]:
"""
my_head: Dictionary of x/y coordinates of the Battlesnake head.
e.g. {"x": 0, "y": 0}
possible_moves: List of strings. Moves to pick from.
e.g. ["up", "down", "left", "right"]
return: The list of remaining possible_moves, with the 'board edge' direction removed
"""
# Head is on left edge of the board
if my_head["x"] == 0:
possible_moves = remove_direction("left", possible_moves)
# Head is on right edge of the board
elif my_head["x"] == board_width - 1:
possible_moves = remove_direction("right", possible_moves)
# Head is on bottom edge of the board
if my_head["y"] == 0:
possible_moves = remove_direction("down", possible_moves)
# Head is on top edge of the board
elif my_head["y"] == board_height - 1:
possible_moves = remove_direction("up", possible_moves)
return possible_moves
def find_closest_food(my_head: Dict[str, int], foods: List[Dict[str, int]]) -> Dict[str, int]:
"""
my_head: Dictionary of x/y coordinates of the Battlesnake head.
e.g. {"x": 0, "y": 0}
foods: List of dictionaries of x/y coordinates for every food location on the board.
e.g. [ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ]
possible_moves: List of strings. Moves to pick from.
e.g. ["up", "down", "left", "right"]
return: The closest
"""
closest_distance = None
closest_food = None
for food in foods:
x_offset = my_head["x"] - food["x"]
y_offset = my_head["y"] - food["y"]
distance = x_offset ** 2 + y_offset ** 2
if closest_distance is None or distance < closest_distance:
closest_distance = distance
closest_food = food
return closest_food
def move_towards_food(my_head: Dict[str, int], food: Dict[str, int], possible_moves: List[str]) -> List[str]:
"""
my_head: Dictionary of x/y coordinates of the Battlesnake head.
e.g. {"x": 0, "y": 0}
food: Dictionary of x/y coordinates of the food location.
e.g. {"x": 0, "y": 0}
possible_moves: List of strings. Moves to pick from.
e.g. ["up", "down", "left", "right"]
return: The move which moves towards the closest food location
"""
if food is None:
return random.choice(possible_moves)
towards_moves = []
# Head is left of the food
if my_head["x"] > food["x"]:
if "left" in possible_moves:
towards_moves.append("left")
# Head is right of the food
elif my_head["x"] < food["x"]:
if "right" in possible_moves:
towards_moves.append("right")
# Head is above the food
if my_head["y"] > food["y"]:
if "down" in possible_moves:
towards_moves.append("down")
# Head is below the food
elif my_head["y"] < food["y"]:
if "up" in possible_moves:
towards_moves.append("up")
if len(towards_moves) == 0:
return possible_moves
return towards_moves
def remove_direction(direction: str, possible_moves: List[str]) -> List[str]:
if direction in possible_moves:
possible_moves.remove(direction)
return possible_moves
def choose_move(data: dict) -> str:
"""
data: Dictionary of all Game Board data as received from the Battlesnake Engine.
For a full example of 'data', see https://docs.battlesnake.com/references/api/sample-move-request
return: A String, the single move to make. One of "up", "down", "left" or "right".
Use the information in 'data' to decide your next move. The 'data' variable can be interacted
with as a Python Dictionary, and contains all of the information about the Battlesnake board
for each move of the game.
"""
my_head = data["you"]["head"] # A dictionary of x/y coordinates like {"x": 0, "y": 0}
# A list of x/y coordinate dictionaries like [ {"x": 0, "y": 0}, {"x": 1, "y": 0}, {"x": 2, "y": 0} ]
my_body = data["you"]["body"]
# Log data
print(
f"~~~ Turn: {data['turn']} Game Mode: {data['game']['ruleset']['name']} ~~~")
print(f"All board data this turn: {data}")
print(f"My Battlesnakes head this turn is: {my_head}")
print(f"My Battlesnakes body this turn is: {my_body}")
possible_moves = ["up", "down", "left", "right"]
# Prevent snake from moving back in on it's own neck
possible_moves = avoid_my_neck(my_head, my_body, possible_moves)
# Prevent snake from hitting any snakes
snakes = data["board"]["snakes"]
for snake in snakes:
body = snake["body"]
possible_moves = avoid_body(my_head, body, possible_moves)
# Prevent snake from moving off the edge of the board
board_height = data["board"]["height"]
board_width = data["board"]["width"]
possible_moves = avoid_board_edge(
my_head, board_height, board_width, possible_moves)
# Move towards the closest food
food = data["board"]["food"]
closest_food = find_closest_food(my_head, food)
possible_moves = move_towards_food(my_head, closest_food, possible_moves)
# Choose a random direction from the remaining possible_moves to move in, and then return that move
move = random.choice(possible_moves)
# TODO: Explore new strategies for picking a move that are better than random
print(f"{data['game']['id']} MOVE {data['turn']}: {move} picked from all valid options in {possible_moves}")
return move
``` |
{
"source": "joeyw526/Personal",
"score": 2
} |
#### File: joeyw526/Personal/organization.py
```python
from sqlalchemy import *
from sqlalchemy import exc
from db import Base, Session
from datetime import datetime, date
from flask import json
from event import Event
from user import User
from orgmember import OrgMember
import organization
from werkzeug.security import generate_password_hash, check_password_hash
class Organization(User):
__tablename__ = 'organizations'
__mapper_args__ = {'polymorphic_identity' : 'organization'}
id = Column(Integer, ForeignKey('users.id'), primary_key=True, nullable=False)
address = Column(String(255), nullable=False)
city = Column(String(30), nullable=False)
state = Column(String(15), nullable=False)
zip = Column(String(5), nullable=False)
mission = Column(String(255), nullable=False)
poc = Column(String(60), nullable=False)
pics = Column(String(5000))
@classmethod
def fromdict(cls, d):
allowed = ('name', 'email', 'passwordhash', 'phone', 'last_active',
'address', 'city', 'state', 'zip', 'mission', 'poc', 'pics')
df = {k: e for k,e in d.items() if k in allowed}
return cls(**df)
def asdict(self):
dict_ = {}
for key in self.__mapper__.c.keys():
result = getattr(self, key)
if isinstance(result, date):
dict_[key] = str(result)
else:
dict_[key] = result
return dict_
def __init__(self, name, email, passwordhash, phone, address, city, state,
zip, mission, poc, pics=None):
# make sure th zip code is valid
if len(zip) != 5 or not(zip.isdigit()):
raise ValueError("a zip code must be 5 digits")
else:
self.zip = zip
self.name = name
self.email = email
self.set_password(<PASSWORD>)
if len(phone) > 10 :
raise ValueError("phone number is too long")
elif len(phone) < 10:
raise ValueError("phone number is too short")
elif phone.isdigit() == False:
raise ValueError("phone number must be a string of digits")
else:
self.phone = phone
self.permissions = 'organization'
self.address = address
self.city = city
self.state = state
self.mission = mission
self.poc = poc
self.last_activity = datetime.now()
self.pics = pics
def __repr__(self):
return "Organization(%s, %s)" % (self.id, self.name)
def set_password(self, password):
self.passwordhash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.passwordhash, password)
def deleteSelf(self, session):
events = session.query(Event).filter_by(org=self.id)
if events:
for e in events:
e.deleteSelf(session)
session.commit()
members = session.query(OrgMember).filter_by(org=self.id)
if members:
for m in members:
try:
session.delete(m)
except:
raise exc.SQLAlchemyError("failed to delete OrgMember " + m.id)
try:
session.delete(self)
session.commit()
except:
raise exc.SQLAlchemyError("failed to delete Organization " + self.id)
session.commit()
#def updateOrg(org_id, update_data):
# session = Session()
# try:
# session.query(Organization).filter_by(id=org_id).update(json.loads(update_data))
# session.commit()
# except:
# session.rollback()
# raise ValueError("id not found")
# finally:
# session.close()
# create an event from a json string
def createOrganization(json1):
e = Organization.fromdict(json1)
s = Session()
try:
s.add(e)
s.commit()
except:
print("here")
return False
finally:
s.close()
return True
```
#### File: joeyw526/Personal/orgmember.py
```python
from user import User
from db import Base, Session
from sqlalchemy import *
from sqlalchemy.orm import relation, sessionmaker
from datetime import datetime, date
from attendee import Attendee
from werkzeug.security import generate_password_hash, check_password_hash
from flask import json
from sqlalchemy import exc
from event import Event
import organization
class OrgMember(User):
__tablename__ = "orgmembers"
__mapper_args__ = {'polymorphic_identity': 'orgmember'}
id = Column(Integer, ForeignKey('users.id'), primary_key=True, nullable=False)
# the OrgMember will have all User fields
org = Column(Integer, ForeignKey('organizations.id'), nullable=False) # object or id?
poc = Column(Boolean, nullable=False)
@classmethod
def fromdict(cls, d):
allowed = ('name', 'email', 'passwordhash', 'phone', 'last_active', 'birthdate',
'bio', 'gender', 'org', 'poc')
df = {k: v for k, v in d.items() if k in allowed}
return cls(**df)
def asdict(self):
dict_ = {}
for key in self.__mapper__.c.keys():
result = getattr(self, key)
if isinstance(result, date):
dict_[key] = str(result)
else:
dict_[key] = result
return dict_
def __init__(self, name, email, passwordhash, phone, poc, org, birthdate=None,
bio=None, gender=None):
self.name = name
self.email = email
self.set_password(<PASSWORD>)
if len(phone) > 15 :
raise ValueError("phone number is too long")
elif len(phone) < 10:
raise ValueError("phone number is too short")
elif phone.isdigit() == False:
raise ValueError("phone number must be a string of digits")
else:
self.phone = phone
self.poc = poc
self.last_activity = datetime.now()
self.birthdate = birthdate
self.bio = bio
self.gender = gender
self.org = org
def set_password(self, password):
self.passwordhash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.passwordhash, password)
# create a volunteer from a json blob
def getOrgMember(self, id):
s = Session()
content = s.query(OrgMember).filter_by(id=id).first()
s.close()
if content:
return content
else:
raise ValueError("user does not exist")
def confirmAttendee(self, event, user):
s = Session()
attendee = s.query(Attendee).filter_by(event).filter_by(user).first()
if attendee:
attendee.confirmed = True
s.commit()
s.close()
return True
else:
return False
def validateHour(self, event, user):
s = Session()
attendee = s.query(Attendee).filter_by(event).filter_by(user).first()
if attendee:
attendee.hoursValidated = True
s.commit()
s.close()
return True
else:
return False
def deleteSelf(self, session):
s = session
try:
s.delete(self)
except:
raise exc.SQLAlchemyError("failed to delete orgMember " + self.id)
def link_org(orgmember):
s = Session()
o2_org = orgmember.org
org_m = s.query(OrgMember).filter_by(email=orgmember.email).first()
s.close()
if org_m:
org_id = org_m.id
else :
print (exc.InvalidRequestError("query failed"))
return False
json2 = json.dumps({'poc': org_id})
organization.updateOrg(o2_org, json2)
return True
def createMember(json):
o = OrgMember.fromdict(json)
s = Session()
try:
s.add(o)
s.commit()
except:
return False
finally:
s.close()
o2 = OrgMember.fromdict(json)
if link_org(o2):
return True
else:
return False
```
#### File: joeyw526/Personal/user.py
```python
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from datetime import datetime
from db import Base, Session
# abstract base class for Users
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(75), nullable=False)
email = Column(String(60), nullable=False, unique=True)
passwordhash = Column(String(255), nullable=False)
phone = Column(String(15), nullable=False)
permissions = Column(Enum('volunteer', 'organization', 'admin'), nullable=False)
last_active = Column(DateTime(timezone=False), nullable=True)
token = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'user',
'polymorphic_on': permissions
}
def __repr__(self):
return "User(%s, %s)" % (self.id, self.name)
```
#### File: joeyw526/Personal/volunteerNeighborhoods.py
```python
from db import Base, Session
from sqlalchemy import *
from sqlalchemy.orm import relation, sessionmaker, relationship
from sqlalchemy import ForeignKey
from sqlalchemy import Enum
from enums import NeighborhoodsEnum
class VolunteerNeighborhoods(Base):
__tablename__ = 'volunteerNeighborhoods'
id = Column(Integer, primary_key=True)
neighborhood = Column(Enum("allston",
"backbay",
"bayvillage",
"beaconhill",
"brighton",
"charlestown",
"chinatown",
"dorchester",
"downtown",
"eastboston",
"fenwaykenmore",
"hyde",
"jamaica",
"mattapan",
"middorchester",
"missionhill",
"northend",
"roslindale",
"roxbury",
"southboston",
"southend",
"westend",
"westroxbury",
"greater", name="neighborhoods_enum"), nullable=False)
volunteer_id = Column(Integer, ForeignKey('volunteers.id'))
# volunteers = relationship("Volunteer", back_populates="volunteerNeighborhoods")
def __init__(self, neighborhood, volunteer_id):
self.neighborhood = neighborhood
self.volunteer_id = volunteer_id
def __repr__(self):
return "<VolunteerNeighborhoods(neighborhood='%s')>" % (self.neighborhood)
def create_v_neighborhood(volunteer_id, neighborhoods):
s = Session()
try:
for n in neighborhoods:
v = VolunteerNeighborhoods(n, volunteer_id)
s.add(v)
s.commit()
except:
s.rollback()
return False
finally:
s.close()
return True
def get_neighborhoods(id):
s = Session()
result = []
q = s.query(VolunteerNeighborhoods).filter_by(volunteer_id=id)
for n in q:
result.append(n.neighborhood)
s.close()
return result
``` |
{
"source": "joey-wang123/SDML",
"score": 2
} |
#### File: SDML/Dataloader/Plantae.py
```python
import numpy as np
from PIL import Image
import os
import io
import json
import glob
import h5py
from torchmeta.transforms import Categorical, ClassSplitter
from torchmeta.utils.data import Dataset, ClassDataset, CombinationMetaDataset
from torchvision.datasets.utils import download_url
from torchmeta.datasets.utils import get_asset
from torchmeta.transforms import ClassSplitter, Categorical, Rotation
from torchvision.transforms import ToTensor, Resize, Compose
from torchmeta.utils.data import BatchMetaDataLoader
import pickle
import torch
class Plantaedata(CombinationMetaDataset):
def __init__(self, root, num_classes_per_task=None, meta_train=False,
meta_val=False, meta_test=False, meta_split=None,
transform=None, target_transform=None, dataset_transform=None,
class_augmentations=None, download=False):
dataset = PlantaeClassDataset(root, meta_train=meta_train, meta_val=meta_val,
meta_test=meta_test, meta_split=meta_split, transform=transform,
class_augmentations=class_augmentations, download=download)
super(Plantaedata, self).__init__(dataset, num_classes_per_task,
target_transform=target_transform, dataset_transform=dataset_transform)
class PlantaeClassDataset(ClassDataset):
folder = 'Plantae'
filename = '{0}_data.hdf5'
filename_labels = '{0}_labels.json'
def __init__(self, root, meta_train=False, meta_val=False, meta_test=False,
meta_split=None, transform=None, class_augmentations=None,
download=False):
super(PlantaeClassDataset, self).__init__(meta_train=meta_train,
meta_val=meta_val, meta_test=meta_test, meta_split=meta_split,
class_augmentations=class_augmentations)
self.root = os.path.join(os.path.expanduser(root), self.folder)
self.transform = transform
self.split_filename = os.path.join(self.root,
self.filename.format(self.meta_split))
self.split_filename_labels = os.path.join(self.root,
self.filename_labels.format(self.meta_split))
self._data_file = None
self._data = None
self._labels = None
self._num_classes = len(self.labels)
def __getitem__(self, index):
label = self.labels[index % self.num_classes]
transform = self.get_transform(index, self.transform)
target_transform = self.get_target_transform(index)
class_dict = torch.load(self.root + '/'+self.meta_split + '/' + '{}.pt'.format(label))
data = class_dict[label]
return PlantaeDataset(index, data, label, transform=transform,
target_transform=target_transform)
@property
def num_classes(self):
return self._num_classes
@property
def labels(self):
if self._labels is None:
with open(self.split_filename_labels, 'r') as f:
self._labels = json.load(f)
return self._labels
class PlantaeDataset(Dataset):
def __init__(self, index, data, label,
transform=None, target_transform=None):
super(PlantaeDataset, self).__init__(index, transform=transform,
target_transform=target_transform)
self.data = data
self.label = label
def __len__(self):
return len(self.data)
def __getitem__(self, index):
image = self.data[index]
target = self.label
if self.target_transform is not None:
target = self.target_transform(target)
return (image, target)
```
#### File: joey-wang123/SDML/model_filter.py
```python
import torch.nn as nn
import torch
def conv3x3(in_channels, out_channels, **kwargs):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, **kwargs),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.MaxPool2d(2)
)
def conv3x3nopool(in_channels, out_channels, **kwargs):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, **kwargs),
nn.BatchNorm2d(out_channels),
nn.ReLU()
)
def conv3x3nobatch(in_channels, out_channels, **kwargs):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, **kwargs),
nn.ReLU(),
nn.MaxPool2d(2)
)
def conv3x3_2(in_channels, out_channels, **kwargs):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, **kwargs),
nn.ReLU(),
nn.MaxPool2d(2)
)
class PrototypicalNetwork(nn.Module):
def __init__(self, in_channels, out_channels, hidden_size=64, num_tasks = 0, num_block = 1):
super(PrototypicalNetwork, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_size = hidden_size
self.taskcla = num_tasks
self.num_block = num_block
block_size = int(hidden_size/num_block)
self.conv1 = torch.nn.ModuleList()
for _ in range(self.num_block):
self.conv1.append(conv3x3nobatch(in_channels, block_size))
self.conv2 = torch.nn.ModuleList()
for _ in range(self.num_block):
self.conv2.append(conv3x3nobatch(hidden_size, block_size))
self.conv3 = torch.nn.ModuleList()
for _ in range(self.num_block):
self.conv3.append(conv3x3nobatch(hidden_size, block_size))
self.domain_out = torch.nn.ModuleList()
for _ in range(self.taskcla):
self.task = nn.Sequential(
conv3x3(hidden_size, hidden_size),
conv3x3(hidden_size, out_channels)
)
self.domain_out.append(self.task)
def forward(self, inputs, domain_id, s=1):
catlayer1 = []
for ind in range(self.num_block):
catlayer1.append(self.conv1[ind](inputs.view(-1, *inputs.shape[2:])))
h = torch.cat(catlayer1, 1)
catlayer2 = []
for ind in range(self.num_block):
catlayer2.append(self.conv2[ind](h))
h = torch.cat(catlayer2, 1)
catlayer3 = []
for ind in range(self.num_block):
catlayer3.append(self.conv3[ind](h))
h = torch.cat(catlayer3, 1)
h = self.domain_out[domain_id](h)
return h.view(*inputs.shape[:2], -1)
def set_req_grad(self, domain_id, req_grad):
for i in range(self.taskcla):
if i!= domain_id:
params = list(self.domain_out[i].parameters())
for ind in range(len(params)):
params[ind].requires_grad = req_grad
else:
params = list(self.domain_out[domain_id].parameters())
for ind in range(len(params)):
params[ind].requires_grad = True
return
class PrototypicalNetworkhead1(nn.Module):
def __init__(self, in_channels, out_channels, hidden_size=64, num_tasks = 0, num_block = 1):
super(PrototypicalNetworkhead1, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_size = hidden_size
self.taskcla = num_tasks
self.num_block = num_block
block_size = int(hidden_size/num_block)
self.conv1 = torch.nn.ModuleList()
for _ in range(self.num_block):
self.conv1.append(conv3x3nobatch(in_channels, block_size))
self.conv2 = torch.nn.ModuleList()
for _ in range(self.num_block):
self.conv2.append(conv3x3nobatch(hidden_size, block_size))
self.conv3 = torch.nn.ModuleList()
for _ in range(self.num_block):
self.conv3.append(conv3x3nobatch(hidden_size, block_size))
self.conv4 = torch.nn.ModuleList()
for _ in range(self.num_block):
self.conv4.append(conv3x3nobatch(hidden_size, block_size))
self.domain_out = torch.nn.ModuleList()
for _ in range(self.taskcla):
self.task = nn.Sequential(
conv3x3(hidden_size, out_channels)
)
self.domain_out.append(self.task)
def forward(self, inputs, domain_id, s=1):
catlayer1 = []
for ind in range(self.num_block):
catlayer1.append(self.conv1[ind](inputs.view(-1, *inputs.shape[2:])))
h = torch.cat(catlayer1, 1)
catlayer2 = []
for ind in range(self.num_block):
catlayer2.append(self.conv2[ind](h))
h = torch.cat(catlayer2, 1)
catlayer3 = []
for ind in range(self.num_block):
catlayer3.append(self.conv3[ind](h))
h = torch.cat(catlayer3, 1)
catlayer4 = []
for ind in range(self.num_block):
catlayer4.append(self.conv4[ind](h))
h = torch.cat(catlayer4, 1)
h = self.domain_out[domain_id](h)
return h.view(*inputs.shape[:2], -1)
def set_req_grad(self, domain_id, req_grad):
for i in range(self.taskcla):
if i!= domain_id:
params = list(self.domain_out[i].parameters())
for ind in range(len(params)):
params[ind].requires_grad = req_grad
else:
params = list(self.domain_out[domain_id].parameters())
for ind in range(len(params)):
params[ind].requires_grad = True
class PrototypicalNetworkinfer(nn.Module):
def __init__(self, in_channels, out_channels, hidden_size=64, num_tasks = 0, num_block = 1):
super(PrototypicalNetworkinfer, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.hidden_size = hidden_size
self.taskcla = num_tasks
self.num_block = num_block
block_size = int(hidden_size/num_block)
self.conv1 = torch.nn.ModuleList()
for _ in range(self.num_block):
self.conv1.append(conv3x3_2(in_channels, block_size))
self.conv2 = torch.nn.ModuleList()
for _ in range(self.num_block):
self.conv2.append(conv3x3_2(hidden_size, block_size))
self.conv3 = torch.nn.ModuleList()
for _ in range(self.num_block):
self.conv3.append(conv3x3_2(hidden_size, block_size))
self.conv4 = torch.nn.ModuleList()
for _ in range(self.num_block):
self.conv4.append(conv3x3_2(hidden_size, block_size))
self.domain_out = torch.nn.ModuleList()
for _ in range(self.taskcla):
self.task = nn.Sequential(
conv3x3(hidden_size, out_channels)
)
self.domain_out.append(self.task)
print('self.taskcla', self.taskcla)
def forward(self, inputs, domain_id, test= False):
catlayer1 = []
for ind in range(self.num_block):
catlayer1.append(self.conv1[ind](inputs.view(-1, *inputs.shape[2:])))
h = torch.cat(catlayer1, 1)
catlayer2 = []
for ind in range(self.num_block):
catlayer2.append(self.conv2[ind](h))
h = torch.cat(catlayer2, 1)
catlayer3 = []
for ind in range(self.num_block):
catlayer3.append(self.conv3[ind](h))
h = torch.cat(catlayer3, 1)
catlayer4 = []
for ind in range(self.num_block):
catlayer4.append(self.conv4[ind](h))
h = torch.cat(catlayer4, 1)
if test:
out_list = []
for id in range(domain_id+1):
htemp = self.domain_out[id](h)
out_list.append(htemp.view(*inputs.shape[:2], -1))
else:
h = self.domain_out[domain_id](h)
out_list = [h.view(*inputs.shape[:2], -1)]
return out_list
def set_req_grad(self, domain_id, req_grad):
for i in range(self.taskcla):
if i!= domain_id:
params = list(self.domain_out[i].parameters())
for ind in range(len(params)):
params[ind].requires_grad = req_grad
else:
params = list(self.domain_out[domain_id].parameters())
for ind in range(len(params)):
params[ind].requires_grad = True
```
#### File: srcbayes/networks/distributions.py
```python
import math
import torch
class VariationalPosterior(torch.nn.Module):
def __init__(self, mu, rho, device):
super(VariationalPosterior, self).__init__()
self.mu = mu.to(device)
self.rho = rho.to(device)
self.device = device
# gaussian distribution to sample epsilon from
self.normal = torch.distributions.Normal(0, 1)
self.sigma = torch.log1p(torch.exp(self.rho)).to(self.device)
def sample(self):
epsilon = self.normal.sample(self.rho.size()).to(self.device)
epsilon = 0.5*epsilon
# reparametrizarion trick for sampling from posterior
posterior_sample = (self.mu + self.sigma * epsilon).to(self.device)
return posterior_sample
def log_prob(self, input):
return (-math.log(math.sqrt(2 * math.pi))
- torch.log(self.sigma)
- ((input - self.mu) ** 2) / (2 * self.sigma ** 2)).sum()
class Prior(torch.nn.Module):
'''
Scaled Gaussian Mixtures for Priors
'''
def __init__(self, args):
super(Prior, self).__init__()
self.sig1 = args.sig1
self.sig2 = args.sig2
self.pi = args.pi
self.device = args.device
self.s1 = torch.tensor([math.exp(-1. * self.sig1)], dtype=torch.float32, device=self.device)
self.s2 = torch.tensor([math.exp(-1. * self.sig2)], dtype=torch.float32, device=self.device)
self.gaussian1 = torch.distributions.Normal(0,self.s1)
self.gaussian2 = torch.distributions.Normal(0,self.s2)
def log_prob(self, input):
input = input.to(self.device)
prob1 = torch.exp(self.gaussian1.log_prob(input))
prob2 = torch.exp(self.gaussian2.log_prob(input))
return (torch.log(self.pi * prob1 + (1.-self.pi) * prob2)).sum()
```
#### File: joey-wang123/SDML/train_domain_aware.py
```python
import os
import torch
from tqdm import tqdm
from model_filter import PrototypicalNetworkhead1
from torchmeta.utils.prototype import get_prototypes, prototypical_loss
from utils import *
import numpy as np
from datasets import *
import pickle
import random
from Meta_optimizer import *
import time
import copy
import logging
from Welford import Welford
from copy import deepcopy
datanames = ['Quickdraw', 'Aircraft', 'CUB', 'MiniImagenet', 'Omniglot', 'Plantae', 'Electronic', 'CIFARFS', 'Fungi', 'Necessities']
class SequentialMeta(object):
def __init__(self,model, args=None):
self.args = args
self.model=model
self.init_lr=args.lr
self.hyper_lr = args.hyper_lr
self.run_stat = Welford()
self.patience = 5
self.delta = 0.2
self.freeze = False
self.data_counter = {}
self.best_score = {}
self.data_stepdict = {}
self.memory_rep = []
self.patientstep = 100
for name in datanames:
self.data_stepdict[name] = 0
for name in datanames:
self.data_counter[name] = 0
for name in datanames:
self.best_score[name] = None
self.update_lr(domain_id=0, lr=1e-3)
self.meta_optim = Meta_Optimizer(self.optimizer, self.args.hyper_lr, self.args.device, self.args.clip_hyper, self.args.layer_filters)
str_save = '_'.join(datanames)
self.step = 0
self.ELBO = 0.0
self.filepath = os.path.join(self.args.output_folder, 'protonet_Meta_Optimizer{}'.format(str_save), 'Block{}'.format(self.args.num_block), 'shot{}'.format(self.args.num_shot), 'way{}'.format(self.args.num_way))
if not os.path.exists(self.filepath):
os.makedirs(self.filepath)
def train(self, Interval, dataloader_dict, domain_id = None):
self.model.train()
for dataname, dataloader in dataloader_dict.items():
with tqdm(dataloader, total=self.args.num_batches) as pbar:
for batch_idx, batch in enumerate(pbar):
self.model.zero_grad()
train_inputs, train_targets = batch['train']
train_inputs = train_inputs.to(device=self.args.device)
train_targets = train_targets.to(device=self.args.device)
if train_inputs.size(2) == 1:
train_inputs = train_inputs.repeat(1, 1, 3, 1, 1)
train_embeddings = self.model(train_inputs, domain_id)
test_inputs, test_targets = batch['test']
test_inputs = test_inputs.to(device=self.args.device)
test_targets = test_targets.to(device=self.args.device)
if test_inputs.size(2) == 1:
test_inputs = test_inputs.repeat(1, 1, 3, 1, 1)
test_embeddings = self.model(test_inputs, domain_id)
prototypes = get_prototypes(train_embeddings, train_targets, args.num_way)
loss = prototypical_loss(prototypes, test_embeddings, test_targets)
loss.backward(retain_graph=True)
#Reservoir sampling
if self.step < self.args.memory_limit:
savedict = batch
self.memory_rep.append(savedict)
else:
randind = random.randint(0, self.step)
if randind < self.args.memory_limit:
savedict = batch
self.memory_rep[randind] = savedict
self.step = self.step+1
grad_list = []
param_names = []
for name, v in self.model.named_parameters():
if 'domain_out' not in name:
if v.requires_grad:
grad_list.append(v.grad)
param_names.append(name)
first_grad = grad_list
count = self.args.sample
if self.memory_rep:
num_memory = len(self.memory_rep)
if num_memory<count:
selectmemory = self.memory_rep
else:
samplelist = random.sample(range(num_memory), count)
selectmemory = []
for ind in samplelist:
selectmemory.append(self.memory_rep[ind])
# Dynamical freeze mechanism
if self.memory_rep:
memory_dict, summemory_loss = rep_memory_dict(self.args, self.model, selectmemory)
loss += summemory_loss
memory_loss = 0.0
for key in memory_dict:
memory_loss += memory_dict[key]
flat = []
for name, param in self.model.named_parameters():
flat.append(param.view(-1))
flat = torch.cat(flat)
flat_np = flat.cpu().data.numpy()
self.run_stat(flat_np)
if self.data_stepdict[dataname] > 0:
logprob = loss.item()
memory_loss /= len(memory_dict)
logprob += memory_loss
count = self.data_stepdict[dataname]%30
self.ELBO = self.ELBO +(logprob-self.ELBO)/count
if self.data_stepdict[dataname] > 0 and self.data_stepdict[dataname]%30 ==0:
self.ELBO -= math.log2(np.sum(self.run_stat.std))
self.run_stat = Welford()
self.ELBO = 0.0
if self.data_stepdict[dataname] > self.patientstep:
if self.freeze == False:
if self.best_score[dataname] is None:
self.best_score[dataname] = ELBO
elif ELBO > self.best_score[dataname] + self.delta:
self.data_counter[dataname] = self.data_counter[dataname] + 1
if self.data_counter[dataname] >= self.patience:
self.freeze = True
description = 'Interval_{}_EarlyStopping counter dataname {}: {} out of {}'.format(Interval, dataname, self.data_counter[dataname], self.patience)
print('description', description)
self.update_lr(domain_id, lr=0.0)
else:
self.best_score[dataname] = ELBO
else:
self.freeze = False
val_graddict = {}
layer_name = []
for gradient, name in zip(first_grad, param_names):
split_name = name.split('.')
layer = split_name[0]
if layer not in self.args.layer_filters:
if layer not in layer_name:
layer_name.append(layer)
val_graddict[layer] = []
val_graddict[layer].append(gradient.clone().view(-1))
else:
val_graddict[layer].append(gradient.clone().view(-1))
else:
layer_sub = layer+'.'+split_name[1]+'.'+split_name[2]
if layer_sub not in layer_name:
layer_name.append(layer_sub)
val_graddict[layer_sub] = []
val_graddict[layer_sub].append(gradient.clone().view(-1))
else:
val_graddict[layer_sub].append(gradient.clone().view(-1))
for key in val_graddict:
val_graddict[key] = torch.cat(val_graddict[key])
self.optimizer.step()
if self.memory_rep:
self.meta_optim.optimizer = self.optimizer
self.meta_optim.meta_gradient(self.model, val_graddict)
count = self.args.sample
num_memory = len(self.memory_rep)
if num_memory<count:
selectmemory = self.memory_rep
else:
samplelist = random.sample(range(num_memory), count)
selectmemory = []
for ind in samplelist:
selectmemory.append(self.memory_rep[ind])
val_grad = self.rep_grad_new(self.args, selectmemory)
self.meta_optim.meta_step(val_grad)
self.model.zero_grad()
if batch_idx >= args.num_batches:
break
def rep_grad_new(self, args, selectmemory):
memory_loss =0
for dataidx, select in enumerate(selectmemory):
memory_train_inputs, memory_train_targets = select['train']
memory_train_inputs = memory_train_inputs.to(device=args.device)
memory_train_targets = memory_train_targets.to(device=args.device)
if memory_train_inputs.size(2) == 1:
memory_train_inputs = memory_train_inputs.repeat(1, 1, 3, 1, 1)
memory_train_embeddings = self.model(memory_train_inputs, dataidx)
memory_test_inputs, memory_test_targets = select['test']
memory_test_inputs = memory_test_inputs.to(device=args.device)
memory_test_targets = memory_test_targets.to(device=args.device)
if memory_test_inputs.size(2) == 1:
memory_test_inputs = memory_test_inputs.repeat(1, 1, 3, 1, 1)
memory_test_embeddings = self.model(memory_test_inputs, dataidx)
memory_prototypes = get_prototypes(memory_train_embeddings, memory_train_targets, args.num_way)
memory_loss += prototypical_loss(memory_prototypes, memory_test_embeddings, memory_test_targets)
param_list = []
param_names = []
for name, v in self.model.named_parameters():
if 'domain_out' not in name:
if v.requires_grad:
param_list.append(v)
param_names.append(name)
val_grad = torch.autograd.grad(memory_loss, param_list)
val_graddict = {}
layer_name = []
for gradient, name in zip(val_grad, param_names):
split_name = name.split('.')
layer = split_name[0]
if layer not in self.args.layer_filters:
if layer not in layer_name:
layer_name.append(layer)
val_graddict[layer] = []
val_graddict[layer].append(gradient.view(-1))
else:
val_graddict[layer].append(gradient.view(-1))
else:
layer_sub = layer+'.'+split_name[1]+'.'+split_name[2]
if layer_sub not in layer_name:
layer_name.append(layer_sub)
val_graddict[layer_sub] = []
val_graddict[layer_sub].append(gradient.view(-1))
else:
val_graddict[layer_sub].append(gradient.view(-1))
for key in val_graddict:
val_graddict[key] = torch.cat(val_graddict[key])
self.model.zero_grad()
memory_loss.detach_()
return val_graddict
def save(self, Interval):
if self.args.output_folder is not None:
filename = os.path.join(self.filepath, 'Interval{0}.pt'.format(Interval))
with open(filename, 'wb') as f:
state_dict = self.model.state_dict()
torch.save(state_dict, f)
def load(self, Interval):
filename = os.path.join(self.filepath, 'Interval{0}.pt'.format(Interval))
print('loading model filename', filename)
self.model.load_state_dict(torch.load(filename))
def valid(self, dataloader_dict, domain_id, Interval):
self.model.eval()
acc_dict = {}
acc_list = []
for dataname, dataloader in dataloader_dict.items():
with torch.no_grad():
with tqdm(dataloader, total=self.args.num_valid_batches) as pbar:
for batch_idx, batch in enumerate(pbar):
self.model.zero_grad()
train_inputs, train_targets = batch['train']
train_inputs = train_inputs.to(device=self.args.device)
train_targets = train_targets.to(device=self.args.device)
if train_inputs.size(2) == 1:
train_inputs = train_inputs.repeat(1, 1, 3, 1, 1)
train_embeddings = self.model(train_inputs, domain_id)
test_inputs, test_targets = batch['test']
test_inputs = test_inputs.to(device=self.args.device)
test_targets = test_targets.to(device=self.args.device)
if test_inputs.size(2) == 1:
test_inputs = test_inputs.repeat(1, 1, 3, 1, 1)
test_embeddings = self.model(test_inputs, domain_id)
prototypes = get_prototypes(train_embeddings, train_targets, self.args.num_way)
accuracy = get_accuracy(prototypes, test_embeddings, test_targets)
acc_list.append(accuracy.cpu().data.numpy())
pbar.set_description('dataname {} accuracy ={:.4f}'.format(dataname, np.mean(acc_list)))
if batch_idx >= self.args.num_valid_batches:
break
avg_accuracy = np.round(np.mean(acc_list), 4)
acc_dict = {dataname:avg_accuracy}
logging.debug('Interval_{}_{}_accuracy_{}'.format(Interval, dataname, avg_accuracy))
return acc_dict
def update_lr(self, domain_id, lr=None):
params_dict = []
if domain_id==0:
layer_params = {}
layer_name = []
fast_parameters = []
for name, p in self.model.named_parameters():
if p.requires_grad:
if 'conv' in name:
split_name = name.split('.')
layer = split_name[0]
if layer not in self.args.layer_filters:
if layer not in layer_name:
layer_name.append(layer)
layer_params[layer] = []
layer_params[layer].append(p)
else:
layer_params[layer].append(p)
else:
layer_sub = layer+'.'+split_name[1]+'.'+split_name[2]
if layer_sub not in layer_name:
layer_name.append(layer_sub)
layer_params[layer_sub] = []
layer_params[layer_sub].append(p)
else:
layer_params[layer_sub].append(p)
else:
fast_parameters.append(p)
params_list = []
for key in layer_params:
params_list.append({'params':layer_params[key], 'lr':self.init_lr})
params_list.append({'params':fast_parameters, 'lr':self.init_lr})
self.optimizer = torch.optim.Adam(params_list, lr=self.init_lr)
else:
layer_params = {}
layer_name = []
fast_parameters = []
for name, p in self.model.named_parameters():
if p.requires_grad:
if 'conv' in name:
split_name = name.split('.')
layer = split_name[0]
if layer not in self.args.layer_filters:
if layer not in layer_name:
layer_name.append(layer)
layer_params[layer] = []
layer_params[layer].append(p)
else:
layer_params[layer].append(p)
else:
layer_sub = layer+'.'+split_name[1]+'.'+split_name[2]
if layer_sub not in layer_name:
layer_name.append(layer_sub)
layer_params[layer_sub] = []
layer_params[layer_sub].append(p)
else:
layer_params[layer_sub].append(p)
else:
fast_parameters.append(p)
params_list = []
for key in layer_params:
params_list.append({'params':layer_params[key], 'lr':lr})
params_list.append({'params':fast_parameters, 'lr':self.init_lr})
self.optimizer = torch.optim.Adam(params_list, lr=self.init_lr)
def main(args):
all_accdict = {}
train_loader_list, valid_loader_list, test_loader_list = dataset(args, datanames)
model = PrototypicalNetworkhead1(3,
args.embedding_size,
hidden_size=args.hidden_size, num_tasks=len(datanames), num_block = args.num_block)
model.to(device=args.device)
num_data = len(train_loader_list)
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=1e-3)
each_Interval = args.num_Interval
seqmeta = SequentialMeta(model, args=args)
domain_acc = []
for loaderindex, train_loader in enumerate(train_loader_list):
model.set_req_grad(loaderindex, False)
seqmeta.update_lr(loaderindex, lr = args.lr)
for Interval in range(each_Interval*loaderindex, each_Interval*(loaderindex+1)):
print('Interval {}'.format(Interval))
seqmeta.train(Interval, train_loader, domain_id = loaderindex)
total_acc = 0.0
Interval_acc = []
for index, test_loader in enumerate(test_loader_list[:loaderindex+1]):
test_accuracy_dict = seqmeta.valid(test_loader, domain_id = index, Interval = Interval)
Interval_acc.append(test_accuracy_dict)
acc = list(test_accuracy_dict.values())[0]
total_acc += acc
if Interval == (each_Interval*(loaderindex+1)-1) and index == loaderindex:
domain_acc.append(test_accuracy_dict)
avg_acc = total_acc/(loaderindex+1)
print('average testing accuracy', avg_acc)
all_accdict[str(Interval)] = Interval_acc
with open(seqmeta.filepath + '/stats_acc.pickle', 'wb') as handle:
pickle.dump(all_accdict, handle, protocol=pickle.HIGHEST_PROTOCOL)
if loaderindex>0:
BWT = 0.0
for index, (best_domain, Interval_domain) in enumerate(zip(domain_acc, Interval_acc)):
best_acc = list(best_domain.values())[0]
each_acc = list(Interval_domain.values())[0]
BWT += each_acc - best_acc
avg_BWT = BWT/index
print('avg_BWT', avg_BWT)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser('Sequential domain meta learning')
parser.add_argument('--data_path', type=str, default='/data/',
help='Path to the folder the data is downloaded to.')
parser.add_argument('--output_folder', type=str, default='output/CVPR/',
help='Path to the output folder for saving the model (optional).')
parser.add_argument('--num-shot', type=int, default=5,
help='Number of examples per class (k in "k-shot", default: 5).')
parser.add_argument('--num-way', type=int, default=5,
help='Number of classes per task (N in "N-way", default: 5).')
parser.add_argument('--embedding-size', type=int, default=64,
help='Dimension of the embedding/latent space (default: 64).')
parser.add_argument('--hidden-size', type=int, default=64,
help='Number of channels for each convolutional layer (default: 64).')
parser.add_argument('--batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for each domain (default: 4).')
parser.add_argument('--MiniImagenet_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for MiniImagenet (default: 4).')
parser.add_argument('--CIFARFS_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for CIFARFS (default: 4).')
parser.add_argument('--CUB_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for CUB (default: 4).')
parser.add_argument('--Aircraft_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for Aircraft (default: 4).')
parser.add_argument('--Omniglot_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for Omniglot (default: 4).')
parser.add_argument('--Plantae_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for Aircraft (default: 4).')
parser.add_argument('--Quickdraw_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for Quickdraw (default: 4).')
parser.add_argument('--VGGflower_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for VGGflower (default: 4).')
parser.add_argument('--Fungi_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for Fungiflower (default: 4).')
parser.add_argument('--Logo_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for Logo (default: 4).')
parser.add_argument('--num_block', type=int, default=4,
help='Number of convolution block.')
parser.add_argument('--num-batches', type=int, default=200,
help='Number of batches the prototypical network is trained over (default: 200).')
parser.add_argument('--num_valid_batches', type=int, default=150,
help='Number of batches the model is tested over (default: 150).')
parser.add_argument('--num-workers', type=int, default=1,
help='Number of workers for data loading (default: 1).')
parser.add_argument('--num_query', type=int, default=10,
help='Number of query examples per class (k in "k-query", default: 10).')
parser.add_argument('--sample', type=int, default=1,
help='Number of memory tasks per iteration.')
parser.add_argument('--memory_limit', type=int, default=10,
help='Number of batches in the memory buffer.')
parser.add_argument('--num_Interval', type=int, default=25,
help='Number of Intervals for meta train.')
parser.add_argument('--valid_batch_size', type=int, default=3,
help='Number of tasks in a mini-batch of tasks for testing (default: 5).')
parser.add_argument('--lr', type=float, default=1e-3,
help='learning rate.')
parser.add_argument('--clip_hyper', type=float, default=10.0)
parser.add_argument('--hyper-lr', type=float, default=1e-4)
parser.add_argument('--layer_filters', type=int, nargs='+', default=['conv1', 'conv2', 'conv3', 'conv4'], help='0 = CPU.')
args = parser.parse_args()
args.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
main(args)
```
#### File: joey-wang123/SDML/train_MER.py
```python
import os
import torch
from tqdm import tqdm
import logging
from torchmeta.utils.prototype import get_prototypes, prototypical_loss
from torchvision.transforms import ToTensor, Resize, Compose
from model import PrototypicalNetworkJoint
from utils import get_accuracy
import numpy as np
from datasets import *
import pickle
import random
import time
from copy import deepcopy
datanames = ['Quickdraw', 'Aircraft', 'CUB', 'MiniImagenet', 'Omniglot', 'Plantae', 'Electronic', 'CIFARFS', 'Fungi', 'Necessities']
class PNetMER(object):
def __init__(self,model, args=None):
self.args = args
self.model=model
self.memory_rep = []
self.step = 0
self.str_save = '_'.join(datanames)
self.filepath = os.path.join(self.args.output_folder, 'protonet_MER{}'.format(self.str_save), 'shot{}'.format(self.args.num_shot), 'way{}'.format(self.args.num_way))
if not os.path.exists(self.filepath):
os.makedirs(self.filepath)
def train(self,optimizer, dataloader_dict, domain_id = None):
gamma = 1.0
num_steps = 5
beta = 0.03
for dataname, dataloader in dataloader_dict.items():
with tqdm(dataloader, total=args.num_batches) as pbar:
for batch_idx, batch in enumerate(pbar):
self.model.zero_grad()
before = deepcopy(self.model.state_dict())
#Reservoir sampling
if self.step < self.args.memory_limit:
savedict = batch
self.memory_rep.append(savedict)
else:
randind = random.randint(0, self.step)
if randind < self.args.memory_limit:
savedict = batch
self.memory_rep[randind] = savedict
self.step += 1
for step in range(0, num_steps):
weights_before = deepcopy(self.model.state_dict())
train_inputs, train_targets = batch['train']
train_inputs = train_inputs.to(device=self.args.device)
train_targets = train_targets.to(device=self.args.device)
if train_inputs.size(2) == 1:
train_inputs = train_inputs.repeat(1, 1, 3, 1, 1)
test_inputs, test_targets = batch['test']
test_inputs = test_inputs.to(device=self.args.device)
test_targets = test_targets.to(device=self.args.device)
if test_inputs.size(2) == 1:
test_inputs = test_inputs.repeat(1, 1, 3, 1, 1)
for (train_input, train_target, test_input, test_target) in zip(train_inputs, train_targets, test_inputs, test_targets):
self.model.zero_grad()
train_embedding = self.model(train_input.unsqueeze(0), domain_id)
test_embedding = self.model(test_input.unsqueeze(0), domain_id)
prototypes = get_prototypes(train_embedding, train_target.unsqueeze(0), args.num_way)
loss = prototypical_loss(prototypes, test_embedding, test_target.unsqueeze(0))
loss.backward()
optimizer.step()
if self.memory_rep:
select = random.choice(self.memory_rep)
memory_train_inputs, memory_train_targets = select['train']
memory_train_inputs = memory_train_inputs.to(device=self.args.device)
memory_train_targets = memory_train_targets.to(device=self.args.device)
if memory_train_inputs.size(2) == 1:
memory_train_inputs = memory_train_inputs.repeat(1, 1, 3, 1, 1)
memory_test_inputs, memory_test_targets = select['test']
memory_test_inputs = memory_test_inputs.to(device=self.args.device)
memory_test_targets = memory_test_targets.to(device=self.args.device)
if memory_test_inputs.size(2) == 1:
memory_test_inputs = memory_test_inputs.repeat(1, 1, 3, 1, 1)
index = -1
for (memory_train_input, memory_train_target, memory_test_input, memory_test_target) in zip(memory_train_inputs, memory_train_targets, memory_test_inputs, memory_test_targets):
index += 1
if index ==1:
break
self.model.zero_grad()
memory_train_embedding = self.model(memory_train_input.unsqueeze(0))
memory_test_embedding = self.model(memory_test_input.unsqueeze(0))
memory_prototypes = get_prototypes(memory_train_embedding, memory_train_target.unsqueeze(0), args.num_way)
memory_loss = prototypical_loss(memory_prototypes, memory_test_embedding, memory_test_target.unsqueeze(0))
memory_loss.backward()
optimizer.step()
weights_after = self.model.state_dict()
self.model.load_state_dict({name : weights_before[name] + ((weights_after[name] - weights_before[name]) * beta) for name in weights_before})
after = self.model.state_dict()
self.model.load_state_dict({name : before[name] + ((after[name] - before[name]) * gamma) for name in before})
if batch_idx >= args.num_batches:
break
def save(self, Interval):
# Save model
if self.args.output_folder is not None:
filename = os.path.join(self.filepath, 'Interval{0}.pt'.format(Interval))
with open(filename, 'wb') as f:
state_dict = self.model.state_dict()
torch.save(state_dict, f)
def load(self, Interval, model):
self.args.output_folder = 'output/datasset/'
str_save = '_'.join(datanames)
filepath = os.path.join(self.args.output_folder, 'protonet_{}'.format(str_save), 'shot{}'.format(args.num_shot), 'way{}'.format(args.num_way))
filename = os.path.join(filepath, 'Interval{0}.pt'.format(Interval))
self.model.load_state_dict(torch.load(filename))
def valid(self, Interval, dataloader_dict, domain_id):
acc_list = []
acc_dict = {}
for dataname, dataloader in dataloader_dict.items():
with torch.no_grad():
with tqdm(dataloader, total=self.args.num_valid_batches) as pbar:
for batch_idx, batch in enumerate(pbar):
self.model.zero_grad()
train_inputs, train_targets = batch['train']
train_inputs = train_inputs.to(device=args.device)
train_targets = train_targets.to(device=args.device)
if train_inputs.size(2) == 1:
train_inputs = train_inputs.repeat(1, 1, 3, 1, 1)
train_embeddings = self.model(train_inputs, domain_id)
test_inputs, test_targets = batch['test']
test_inputs = test_inputs.to(device=args.device)
test_targets = test_targets.to(device=args.device)
if test_inputs.size(2) == 1:
test_inputs = test_inputs.repeat(1, 1, 3, 1, 1)
test_embeddings = self.model(test_inputs, domain_id)
prototypes = get_prototypes(train_embeddings, train_targets,
self.args.num_way)
accuracy = get_accuracy(prototypes, test_embeddings, test_targets)
acc_list.append(accuracy.cpu().data.numpy())
pbar.set_description('dataname {} accuracy ={:.4f}'.format(dataname, np.mean(acc_list)))
if batch_idx >= self.args.num_valid_batches:
break
avg_accuracy = np.round(np.mean(acc_list), 4)
acc_dict = {dataname:avg_accuracy}
logging.debug('Interval_{}_{}_accuracy_{}'.format(Interval, dataname, avg_accuracy))
return acc_dict
def main(args):
all_accdict = {}
train_loader_list, valid_loader_list, test_loader_list = dataset(args, datanames)
model = PrototypicalNetworkJoint(3,
args.embedding_size,
hidden_size=args.hidden_size)
model.to(device=args.device)
model.train()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
each_Interval = args.num_Interval
savemode = 'PNet-MER'
str_save = '_'.join(datanames)
seqmeta = PNetMER(model, args=args)
dataname = []
domain_acc = []
for loaderindex, train_loader in enumerate(train_loader_list):
for Interval in range(each_Interval*loaderindex, each_Interval*(loaderindex+1)):
print('Interval {}'.format(Interval))
dataname.append(list(train_loader.keys())[0])
seqmeta.train(optimizer, train_loader, domain_id = loaderindex)
total_acc = 0.0
Interval_acc = []
for index, test_loader in enumerate(test_loader_list[:loaderindex+1]):
test_accuracy_dict = seqmeta.valid(Interval, test_loader, domain_id = index)
Interval_acc.append(test_accuracy_dict)
acc = list(test_accuracy_dict.values())[0]
total_acc += acc
if Interval == (each_Interval*(loaderindex+1)-1) and index == loaderindex:
domain_acc.append(test_accuracy_dict)
avg_acc = total_acc/(loaderindex+1)
print('average testing accuracy', avg_acc)
seqmeta.save(Interval)
all_accdict[str(Interval)] = Interval_acc
with open(seqmeta.filepath + '/stats_acc.pickle', 'wb') as handle:
pickle.dump(all_accdict, handle, protocol=pickle.HIGHEST_PROTOCOL)
if loaderindex>0:
BWT = 0.0
for index, (best_domain, Interval_domain) in enumerate(zip(domain_acc, Interval_acc)):
best_acc = list(best_domain.values())[0]
each_acc = list(Interval_domain.values())[0]
BWT += each_acc - best_acc
avg_BWT = BWT/index
print('avg_BWT', avg_BWT)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser('Prototypical Networks')
parser.add_argument('--data_path', type=str, default='/data/',
help='Path to the folder the data is downloaded to.')
parser.add_argument('--num-shot', type=int, default=5,
help='Number of examples per class (k in "k-shot", default: 5).')
parser.add_argument('--num-way', type=int, default=5,
help='Number of classes per task (N in "N-way", default: 5).')
parser.add_argument('--embedding-size', type=int, default=64,
help='Dimension of the embedding/latent space (default: 64).')
parser.add_argument('--hidden-size', type=int, default=64,
help='Number of channels for each convolutional layer (default: 64).')
parser.add_argument('--output_folder', type=str, default='output/newsavedir/',
help='Path to the output folder for saving the model (optional).')
parser.add_argument('--MiniImagenet_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for MiniImagenet (default: 4).')
parser.add_argument('--CIFARFS_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for CIFARFS (default: 4).')
parser.add_argument('--CUB_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for CUB (default: 4).')
parser.add_argument('--Aircraft_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for Aircraft (default: 4).')
parser.add_argument('--Omniglot_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for Omniglot (default: 4).')
parser.add_argument('--Plantae_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for Aircraft (default: 4).')
parser.add_argument('--VGGflower_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for VGGflower (default: 4).')
parser.add_argument('--Fungi_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for Fungiflower (default: 4).')
parser.add_argument('--Quickdraw_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for Quickdraw (default: 4).')
parser.add_argument('--Logo_batch_size', type=int, default=2,
help='Number of tasks in a mini-batch of tasks for Logo (default: 4).')
parser.add_argument('--num-batches', type=int, default=200,
help='Number of batches the prototypical network is trained over (default: 100).')
parser.add_argument('--num_valid_batches', type=int, default=150,
help='Number of batches the model is trained over (default: 150).')
parser.add_argument('--num_memory_batches', type=int, default=1,
help='Number of batches the model is trained over (default: 150).')
parser.add_argument('--num-workers', type=int, default=1,
help='Number of workers for data loading (default: 1).')
parser.add_argument('--num_query', type=int, default=10,
help='Number of query examples per class (k in "k-query", default: 15).')
parser.add_argument('--num_Interval', type=int, default=25,
help='Number of Intervals for meta train.')
parser.add_argument('--valid_batch_size', type=int, default=3,
help='Number of tasks in a mini-batch of tasks for validation (default: 4).')
parser.add_argument('--memory_limit', type=int, default=10,
help='Number of memory tasks.')
parser.add_argument('--gpu', type=int, default=0, help='GPU device')
args = parser.parse_args()
device = 'cuda:{}'.format(args.gpu) if torch.cuda.is_available() else 'cpu'
args.device = torch.device(device)
print('args.device', args.device)
main(args)
``` |
{
"source": "JoeyWangTW/gix-mkrfridays-iot",
"score": 3
} |
#### File: c2d_function/trigger_servo/__init__.py
```python
import logging
import azure.functions as func
from azure.iot.hub import IoTHubRegistryManager
# Note that Azure Key Vault doesn't support underscores and some other special chars
# We substitute with a hyphen for underscore
CONNECTION_STRING = "{IoTHubConnectionString}"
DEVICE_ID = "{deviceId}"
DEVICE_MESSAGE = "servo"
MESSAGE_COUNT = 1
def iothub_messaging_sample_run():
try:
# Create IoTHubRegistryManager
registry_manager = IoTHubRegistryManager(CONNECTION_STRING)
for i in range(0, MESSAGE_COUNT):
logging.info ( 'Sending message: {0}'.format(i) )
data = DEVICE_MESSAGE
props={}
# # optional: assign system properties
# props.update(messageId = "message_%d" % i)
# props.update(correlationId = "correlation_%d" % i)
# props.update(contentType = "application/json")
# optional: assign application properties
# prop_text = "PropMsg_%d" % i
# props.update(testProperty = prop_text)
registry_manager.send_c2d_message(DEVICE_ID, data, properties=props)
# try:
# # Try Python 2.xx first
# raw_input("Press Enter to continue...\n")
# except:
# pass
# # Use Python 3.xx in the case of exception
# input("Press Enter to continue...\n")
except Exception as ex:
logging.info ( "Unexpected error {0}" % ex )
return
except KeyboardInterrupt:
logging.info ( "IoT Hub C2D Messaging service sample stopped" )
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
logging.info('***NOW EXECUTING C2D***')
iothub_messaging_sample_run()
name = req.params.get('name')
if not name:
try:
req_body = req.get_json()
except ValueError:
pass
else:
name = req_body.get('name')
if name:
return func.HttpResponse(f"Hello, {name}. This HTTP triggered function executed successfully.")
else:
return func.HttpResponse(
"This HTTP triggered function executed successfully. Pass a name in the query string or in the request body for a personalized response.",
status_code=200
)
``` |
{
"source": "joeywhelan/montecarlo",
"score": 4
} |
#### File: joeywhelan/montecarlo/picalc.py
```python
import random
import pandas as pd
import matplotlib.pyplot as plt
import math
def simulation(numPoints):
"""Calculates an approximation to pi via the Monte Carlo method
Args:
text: Number of points
Returns:
Approximation to pi
Raises:
None
"""
in_circle = 0
total = 0
for _ in range(numPoints):
x = random.uniform(0,2)
y = random.uniform(0,2)
d = (x-1)**2 + (y-1)**2
if d <= 1.0:
in_circle = in_circle + 1
total = total + 1
ans = 4 * in_circle/total
return ans
if __name__ == '__main__':
random.seed()
results = {}
for numPoints in [10,100,1000,10000,100000,1000000]:
ans = simulation(numPoints)
results[numPoints] = ans
frame = pd.DataFrame(data=list(results.items()), columns=['NumPoints', 'Result'])
frame['PctError'] = ((frame['Result'] - math.pi) / math.pi).abs() * 100
del frame['Result']
frame.sort_values(by='NumPoints', inplace=True)
frame.reset_index(inplace=True, drop=True)
print(frame)
frame.plot(x='NumPoints', y='PctError', kind='bar', title='Monte Carlo Pi Calculation', color=['b'])
plt.show()
``` |
{
"source": "joeywhelan/Secure-DNS-Converter",
"score": 3
} |
#### File: joeywhelan/Secure-DNS-Converter/sdns.py
```python
import configparser
import socketserver
import requests
import logging.config
from apscheduler.schedulers.background import BackgroundScheduler
from stem import Signal
from stem.control import Controller
GOOGLE_DNS = 'https://dns.google.com/resolve'
PROXIES = {'http': 'socks5://127.0.0.1:9050',
'https': 'socks5://127.0.0.1:9050'}
class DNSHandler(socketserver.BaseRequestHandler):
def handle(self):
"""Main function.
Args:
self: Instance reference
Returns:
None
Raises:
None
"""
logging.debug('Entering handle()')
data = self.request[0]
socket = self.request[1]
response = self.__createResponse(data)
socket.sendto(response, self.client_address)
logging.debug('Exiting handle()')
def __processQuestion(self, quesData):
"""Parses the question portion of a DNS request into objects
Types are determined via attempts at casting and evaluating any exceptions raised.
Args:
self: Instance reference
quesData: Question portion of the DNS request, in a byte array
Returns:
name: the domain name (byte array)
queryType: DNS query type (string)
question: entire question portion of DNS request (byte array)
Raises:
None
"""
logging.debug('Entering __processQuestion()')
i = 0
name = ''
while True:
count = int.from_bytes(quesData[i:i+1], byteorder='big')
i = i+1
if count == 0:
break
else:
name = name + str(quesData[i:i+count],'utf-8') + '.'
i = i + count
name = name[:-1]
queryType = str(int.from_bytes(quesData[i:i+2], byteorder='big'))
question = quesData[0:i+4]
logging.debug('name: ' + name + ' queryType: ' + queryType)
logging.debug('Exiting __processQuestion()')
return name, queryType, question
def __getFlags(self, data):
"""Parses out the flag bits of the DNS request and creates a flag field for the response
Args:
self: Instance reference
data: Flags portion of the DNS request, in a byte array
Returns:
flags: new flags field for the DNS response (byte array)
Raises:
None
"""
logging.debug('Entering __getFlags()')
flags = 0b100000 #qr=1, opcode=0000, aa=0
flags = (flags << 1) | data['TC'] #set tc bit
flags = (flags << 1) | data['RD'] #set rd bit
flags = (flags << 1) | data['RA'] #set ra bit
flags = flags << 1 #One zero
flags = (flags << 1) | data['AD'] #set ad bit
flags = (flags << 1) | data['CD'] #set cd bit
flags = ((flags << 4) | data['Status']).to_bytes(2, byteorder='big')
logging.debug('flags: ' + str(flags))
logging.debug('Exiting __getFlags()')
return flags
def __getRecords(self, name):
"""Issues a DNS over HTTPS request to Google with the name from the original DNS request
Args:
self: Instance reference
data: name in DNS request, in a byte array
Returns:
flags: new flags field for the DNS response (byte array)
numbers: number of records field for the DNS response (byte array)
records: DNS response records (byte array)
Raises:
None
"""
logging.debug('Entering __getRecords()')
payload = {'name' : name, 'type' : '1'}
#data = requests.get(GOOGLE_DNS, params=payload).json()
data = requests.get(GOOGLE_DNS, params=payload, proxies=PROXIES).json()
logging.debug(data)
flags = self.__getFlags(data)
records = bytes(0)
count = 0
if 'Answer' in data:
for answer in data['Answer']:
if answer['type'] == 1:
count = count + 1
name = (0xc00c).to_bytes(2, byteorder='big') #RFC departure. Hard-coded offset to domain name in initial question.
rectype = (1).to_bytes(2, byteorder='big')
classtype = (1).to_bytes(2, byteorder='big')
ttl = answer['TTL'].to_bytes(4, byteorder='big')
length = (4).to_bytes(2, byteorder='big') #4 byte IP addresses only
quad = list(map(int, answer['data'].split('.')))
res = bytes(0)
for i in quad:
res = res + i.to_bytes(1, byteorder='big')
records = records + name + rectype + classtype + ttl + length + res
nques = (1).to_bytes(2, byteorder='big') #hard coded to 1
nans = (count).to_bytes(2, byteorder='big')
nath = (0).to_bytes(2, byteorder='big') #hard coded to 0
nadd = (0).to_bytes(2, byteorder='big') #hard coded to 0
numbers = nques + nans + nath + nadd
logging.debug('numbers: ' + str(numbers))
logging.debug('records: ' + str(records))
logging.debug('Exiting __getRecords()')
return flags, numbers, records
def __createResponse(self, data):
"""Verifies the request is a standard, A query and then creates a DNS response
Args:
self: Instance reference
data: byte array respresenting the DNS request
Returns:
response: byte array respresenting the DNS response
Raises:
None
"""
logging.debug('Entering __createResponse()')
tid = data[0:2] #transaction id
opcode = data[2] & 0b01111000 #data[2] is the flags field. bits 2-5 is the opcode
name, queryType, question = self.__processQuestion(data[12:])
if opcode == 0 and queryType == '1': #RFC departure. Only processing standard queries (0) and 'A' query types.
flags, numbers, records = self.__getRecords(name)
response = tid + flags + numbers + question + records
else:
#qr (response), recursion desired, recursion avail bits set. set the rcode to 'not implemented'
flags = ((0b100000011000 << 4) | 4).to_bytes(2, byteorder='big')
numbers = (0).to_bytes(8, byteorder='big')
response = tid + flags + numbers
logging.debug('response: ' + str(response))
logging.debug('Exiting __createResponse()')
return response
def renew():
"""Changes IP Address of Tor circuit
Sends a signal to the local Tor controller to force it to obtain a new IP address
Args:
None
Returns:
None
Raises:
None
"""
with Controller.from_port(port = 9051) as controller:
controller.authenticate(password="<PASSWORD>")
controller.signal(Signal.NEWNYM)
if __name__ == '__main__':
#Set up a background scheduler to periodically change the Tor IP address
scheduler = BackgroundScheduler()
scheduler.add_job(renew, 'interval', hours=1)
scheduler.start()
#Launch UDP server
cfgParser = configparser.ConfigParser()
cfgParser.optionxform = str
cfgParser.read('sdns.cfg')
host = cfgParser.get('ConfigData', 'host')
port = int(cfgParser.get('ConfigData', 'port'))
logging.config.fileConfig('./logging.conf')
logging.debug('Starting server on port {}'.format(port))
server = socketserver.UDPServer((host, port), DNSHandler)
server.serve_forever()
``` |
{
"source": "Joey-Wondersign/Staffjoy-suite-Joey",
"score": 2
} |
#### File: internal/caches/cache.py
```python
from flask import current_app, g, abort
from flask_restful import Resource
from app.apiv2.decorators import permission_sudo
from app.caches import ShiftsCache, SchedulesCache, Schedules2Cache, \
Shifts2Cache, IncidentCache, SessionCache
from app.constants import API_ENVELOPE
KEY_TO_CACHES = {
"shifts": [ShiftsCache, Shifts2Cache],
"schedules": [SchedulesCache, Schedules2Cache],
"incidents": [IncidentCache],
"sessions": [SessionCache]
}
class CacheApi(Resource):
method_decorators = [permission_sudo]
def get(self, cache_key):
"""View number of active keys in cache"""
if cache_key not in KEY_TO_CACHES.keys():
abort(404)
count = 0
for cache in KEY_TO_CACHES[cache_key]:
count += cache.get_key_count()
return {API_ENVELOPE: {"active_keys": count}}
def delete(self, cache_key):
"""Flush target cache"""
if cache_key not in KEY_TO_CACHES.keys():
abort(404)
for cache in KEY_TO_CACHES[cache_key]:
cache.delete_all()
current_app.logger.info("User %s flushed the %s cache through API" %
(g.current_user.id, cache_key))
return {}, 204
```
#### File: internal/tasking/mobius_task.py
```python
from flask_restful import marshal, abort, Resource
from app.models import Schedule2
from app.apiv2.decorators import permission_sudo
from app.apiv2.marshal import tasking_schedule_fields
class MobiusTaskApi(Resource):
method_decorators = [permission_sudo]
def get(self, schedule_id):
""" Peek at a schedule """
s = Schedule2.query.get_or_404(schedule_id)
return marshal(s, tasking_schedule_fields)
def delete(self, schedule_id):
""" Mark a task as done """
s = Schedule2.query.get_or_404(schedule_id)
if s.state != "mobius-processing":
abort(400)
s.transition_to_published()
return "{}", 204
```
#### File: locations/managers/manager.py
```python
from flask import current_app
from flask_restful import marshal, abort, reqparse, Resource, inputs
from app import db
from app.constants import API_ENVELOPE
from app.models import User, Organization, Location
from app.apiv2.decorators import permission_location_manager
from app.apiv2.marshal import user_fields
from app.apiv2.email import alert_email
class LocationManagerApi(Resource):
@permission_location_manager
def delete(self, org_id, location_id, user_id):
"""removes the user from the management position"""
organization = Organization.query.get_or_404(org_id)
location = Location.query.get_or_404(location_id)
user = User.query.get_or_404(user_id)
if not user.is_location_manager(location_id):
return {"message": "User does not exist or is not a manager"}, 404
location.managers.remove(user)
try:
db.session.commit()
except Exception as exception:
db.session.rollback()
current_app.logger.exception(str(exception))
abort(400)
alert_email(
user, "You have been removed as a %s manager at %s" %
(location.name, organization.name),
"You have been removed as a manager at the %s location of %s" %
(location.name, organization.name))
return {}, 204
@permission_location_manager
def get(self, org_id, location_id, user_id):
user = User.query.get_or_404(user_id)
if not user.is_location_manager(location_id):
return {"message": "User does not exist or not a manager"}, 404
response = {
API_ENVELOPE: marshal(user, user_fields),
"resources": [],
}
return response
@permission_location_manager
def patch(self, org_id, location_id, user_id):
organization = Organization.query.get_or_404(org_id)
user = User.query.get_or_404(user_id)
if not user.is_location_manager(location_id):
return {"message": "user does not exist or is not a manager"}, 404
parser = reqparse.RequestParser()
parser.add_argument("activateReminder", type=inputs.boolean)
changes = parser.parse_args(strict=True)
# Filter out null values
changes = dict((k, v) for k, v in changes.iteritems() if v is not None)
if len(changes) == 0:
return {}, 204
if "activateReminder" in changes:
if user.active:
return {"message": "This user is already active"}, 400
user.send_activation_reminder(user, organization.name)
return {}, 204
```
#### File: schedules/shifts/shifts.py
```python
import datetime
import iso8601
from flask import g
from flask_restful import marshal, reqparse, Resource, inputs
from sqlalchemy import asc
from copy import deepcopy
from app.constants import API_ENVELOPE
from app.models import Organization, RoleToUser, Schedule2, Shift2
from app.caches import Shifts2Cache
from app.apiv2.decorators import verify_org_location_role_schedule, \
permission_location_member
from app.apiv2.marshal import shift_fields
class ScheduleShiftsApi(Resource):
@verify_org_location_role_schedule
@permission_location_member
def get(self, org_id, location_id, role_id, schedule_id):
"""
gets data for this schedule - via caching!
"""
parser = reqparse.RequestParser()
parser.add_argument(
"include_summary", type=inputs.boolean, default=False)
parser.add_argument(
"filter_by_published", type=inputs.boolean, default=False)
parser.add_argument("claimable_by_user", type=int)
parameters = parser.parse_args()
# Filter out null values
parameters = dict((k, v) for k, v in parameters.iteritems()
if v is not None)
# claimable_by_user must be the only parameter
if (parameters.get("claimable_by_user") and
(parameters.get("include_summary") or
parameters.get("filter_by_published"))):
return {
"message":
"Cannot return claimable shifts and summary or published shifts in the same query"
}, 400
org = Organization.query.get(org_id)
schedule = Schedule2.query.get_or_404(schedule_id)
shifts = Shifts2Cache.get(schedule_id)
if shifts is None:
shifts = Shift2.query \
.filter(
Shift2.role_id == role_id,
Shift2.stop >= schedule.start,
Shift2.start < schedule.stop,
) \
.order_by(asc(Shift2.start)) \
.all()
shifts = map(lambda shift: marshal(shift, shift_fields), shifts)
Shifts2Cache.set(schedule_id, shifts)
# Filter shifts by ones a user can claim
if "claimable_by_user" in parameters and parameters.get(
"claimable_by_user") > 0:
# 1) Check if user in role
role_to_user = RoleToUser.query\
.filter_by(
user_id=parameters["claimable_by_user"],
role_id=role_id, archived=False)\
.first()
if role_to_user is None:
return {"message": "user is not in the role"}, 400
# 2) reduce shifts to unassigned ones that don't overlap with user in question
if g.current_user.is_sudo(
) or g.current_user.is_org_admin_or_location_manager(org_id,
location_id):
allow_past = True
else:
allow_past = False
shifts = self._filter_overlapping_shifts(
shifts,
role_id,
parameters["claimable_by_user"],
allow_past=allow_past, )
# 3) If org does not allow claiming in excess of caps, filter by hourly caps
if org.is_plan_boss(
) and not org.workers_can_claim_shifts_in_excess_of_max:
shifts = self._filter_allowed_shifts(
shifts, role_id, parameters["claimable_by_user"], schedule)
if parameters.get("filter_by_published"):
shifts = filter(lambda shift: shift.get("published") == True,
shifts)
result = {
API_ENVELOPE: shifts,
}
if parameters.get("include_summary"):
users_summary = {}
for shift in shifts:
user_id = shift.get("user_id")
duration = int((
iso8601.parse_date(shift.get("stop")) - iso8601.parse_date(
shift.get("start"))).total_seconds() / 60)
if user_id in users_summary.keys():
users_summary[user_id]["shifts"] += 1
users_summary[user_id]["minutes"] += duration
else:
if user_id > 0:
name = shift.get("user_name")
else:
name = "Unassigned shifts"
users_summary[user_id] = {
"user_id": user_id,
"user_name": name,
"shifts": 1,
"minutes": duration,
}
result["summary"] = users_summary.values()
return result
def _filter_overlapping_shifts(self,
available_shifts,
role_id,
user_id,
allow_past=False):
"""
Return shifts that do not overlap with existing user shifts
Given available shifts and existing user shifts for a given schedule.
Also filter past shifts
available_shifts must come from cache or be marshalled 1st
"""
filtered_shifts = []
for shift in available_shifts:
start = iso8601.parse_date(shift.get("start")).replace(tzinfo=None)
# check if shift in past
if not allow_past and (datetime.datetime.utcnow() > start):
continue
# shift must be published
if not shift["published"]:
continue
# Check whether the shift is unassigned
if shift["user_id"] > 0:
continue
shift_model_copy = deepcopy(Shift2.query.get(shift["id"]))
shift_model_copy.user_id = user_id
if shift_model_copy.has_overlaps():
continue
filtered_shifts.append(shift)
return filtered_shifts
def _filter_allowed_shifts(self, shifts, role_id, user_id, schedule):
"""
filters a list of shifts to only those that would not exceed the user's caps
needs filter_overlapping_shifts to be run 1st
"""
filtered_shifts = []
for shift in shifts:
shift_model = Shift2.query.get(shift["id"])
if shift_model.is_within_caps(user_id):
filtered_shifts.append(shift)
return filtered_shifts
```
#### File: roles/shiftquery/shiftquery.py
```python
import iso8601
from flask import g
from flask_restful import marshal, reqparse, Resource
from app.constants import MAX_SHIFT_LENGTH, SECONDS_PER_HOUR, API_ENVELOPE
from app.models import Shift2
from app.apiv2.decorators import verify_org_location_role, permission_location_manager
from app.apiv2.marshal import user_fields
class ShiftQueryApi(Resource):
@verify_org_location_role
@permission_location_manager
def get(self, org_id, location_id, role_id):
parser = reqparse.RequestParser()
parser.add_argument("start", type=str, required=True)
parser.add_argument("stop", type=str, required=True)
parameters = parser.parse_args()
allow_past = g.current_user.is_sudo(
) or g.current_user.is_org_admin_or_location_manager(org_id,
location_id)
# start time
try:
start = iso8601.parse_date(parameters.get("start"))
except iso8601.ParseError:
return {
"message": "Start time needs to be in ISO 8601 format"
}, 400
else:
start = (start + start.utcoffset()).replace(tzinfo=None)
# stop time
try:
stop = iso8601.parse_date(parameters.get("stop"))
except iso8601.ParseError:
return {"message": "Stop time needs to be in ISO 8601 format"}, 400
else:
stop = (stop + stop.utcoffset()).replace(tzinfo=None)
# stop can't be before start
if start >= stop:
return {"message": "Stop time must be after start time"}, 400
# shifts are limited to 23 hours in length
if int((stop - start).total_seconds()) > MAX_SHIFT_LENGTH:
return {
"message":
"Shifts cannot be more than %s hours long" %
(MAX_SHIFT_LENGTH / SECONDS_PER_HOUR)
}, 400
# create a shift object - do NOT add to db session though
shift = Shift2(role_id=role_id, start=start, stop=stop)
within_caps, exceeds_caps = shift.get_all_eligible_users(
allow_past=allow_past)
marshal_within = [marshal(user, user_fields) for user in within_caps]
marshal_exceeds = [marshal(user, user_fields) for user in exceeds_caps]
for user in marshal_within:
user["within_caps"] = True
for user in marshal_exceeds:
user["within_caps"] = False
return {API_ENVELOPE: marshal_within + marshal_exceeds}
```
#### File: shifts/users/users.py
```python
from flask import g
from flask_restful import marshal, Resource
from app.constants import API_ENVELOPE
from app.models import Shift2
from app.apiv2.decorators import verify_org_location_role_shift, \
permission_location_manager
from app.apiv2.marshal import user_fields
class ShiftEligibleUsersApi(Resource):
@verify_org_location_role_shift
@permission_location_manager
def get(self, org_id, location_id, role_id, shift_id):
shift = Shift2.query.get(shift_id)
allow_past = g.current_user.is_sudo(
) or g.current_user.is_org_admin_or_location_manager(org_id,
location_id)
within_caps, exceeds_caps = shift.get_all_eligible_users(
allow_past=allow_past)
marshal_within_caps = map(lambda user: marshal(user, user_fields),
within_caps)
marshal_exceeds_caps = map(lambda user: marshal(user, user_fields),
exceeds_caps)
for user in marshal_within_caps:
user["within_caps"] = True
for user in marshal_exceeds_caps:
user["within_caps"] = False
return {API_ENVELOPE: marshal_within_caps + marshal_exceeds_caps}
```
#### File: users/apikeys/apikey.py
```python
from flask import current_app
from flask_restful import marshal, abort, Resource
from app.apiv2.marshal import api_key_fields
from app import db
from app.constants import API_ENVELOPE
from app.models import ApiKey
from app.apiv2.decorators import permission_self, verify_user_api_key
class ApiKeyApi(Resource):
method_decorators = [permission_self, verify_user_api_key]
def get(self, user_id, key_id):
apikey = ApiKey.query.get(key_id)
return {API_ENVELOPE: marshal(apikey, api_key_fields)}
def delete(self, user_id, key_id):
apikey = ApiKey.query.get(key_id)
try:
db.session.delete(apikey)
db.session.commit()
except Exception as exception:
db.session.rollback()
current_app.logger.exception(str(exception))
abort(400)
return {}, 204
```
#### File: Staffjoy-suite-Joey/app/helpers.py
```python
from datetime import datetime
import math
import iso8601
import pytz
from flask import current_app, request
from app.constants import DAY_LENGTH, DAYS_OF_WEEK
def is_native():
"""Tell whether request comes from native"""
if request.cookies.get(
current_app.config.get("NATIVE_COOKIE_NAME")) == "1":
return True
return False
def date_duration(date_obj):
"""
takes an Iso8601 timestamp string and returns a string of
the amount of time that has passed (e.g. "3 minutes ago")
"""
default_tz = get_default_tz()
if type(date_obj) is not datetime:
date_obj = iso8601.parse_date(date_obj)
date_obj = (date_obj + date_obj.utcoffset()).replace(tzinfo=default_tz)
else:
date_obj = default_tz.localize(date_obj)
now = default_tz.localize(datetime.utcnow())
seconds = int((now - date_obj).total_seconds())
if seconds < 6:
return "Just now"
days = math.floor(seconds / 86400)
seconds = seconds - (days * 86400)
hours = math.floor(seconds / 3600)
seconds = seconds - (hours * 3600)
minutes = math.floor(seconds / 60)
seconds = seconds - (minutes * 60)
result = ""
# Don't show seconds if it's been over 10 min
if minutes < 10 and hours == 0 and days == 0:
result = ("%d sec" % seconds) + result
if minutes > 0 and days == 0:
result = ("%d min " % minutes) + result
if hours > 0:
result = ("%d hr " % hours) + result
if days > 0:
result = ("%d days " % days) + result
return "%s ago" % result
def sorted_sessions(sessions):
"""takes sessions dictionary and returns a sorted list"""
for key in sessions.keys():
sessions[key]["key"] = key
return sorted(
sessions.values(),
key=lambda session: session["last_used"],
reverse=True)
def verify_days_of_week_struct(week, binary=False):
""" Given a dictionary, verify its keys are the correct days of the week and values are lists of 24 integers greater than zero."""
if set(DAYS_OF_WEEK) != set(week.keys()):
return False
# Each day must be a list of ints
for _, v in week.iteritems():
if not isinstance(v, list):
return False
if len(v) != DAY_LENGTH:
return False
# Every item should be an int >= 0
for d in v:
if not isinstance(d, int):
return False
if d < 0:
return False
if d > 1 and binary is True:
return False
return True
def normalize_to_midnight(dt_obj):
"""sets the datetime obj to midnight time"""
return dt_obj.replace(hour=0, minute=0, second=0, microsecond=0)
def check_datetime_is_midnight(dt_obj):
"""returns True if a given datetime object is set for midnight"""
return dt_obj.hour + dt_obj.minute + dt_obj.second + dt_obj.microsecond == 0
def timespans_overlap(a_start, a_stop, b_start, b_stop):
"""
returns True if A intersects with B
A and B are defiend as abstract start and ends and can be as follows
- Integers
- Datetimes
"""
# Case 1: B is within A
if a_start <= b_start and a_stop >= b_stop:
return True
# Case 2: B ends during A
if a_start >= b_start and a_start < b_stop:
return True
# Case 3: B starts during A
if a_stop > b_start and a_stop <= b_stop:
return True
return False
def get_default_tz():
"""returns a pytz instance of the default timezone"""
return pytz.timezone(current_app.config.get("DEFAULT_TIMEZONE"))
```
#### File: staffjoy/resources/worker.py
```python
from staffjoy.resource import Resource
from staffjoy.resources.timeclock import Timeclock
from staffjoy.resources.time_off_request import TimeOffRequest
class Worker(Resource):
"""Organization administrators"""
PATH = "organizations/{organization_id}/locations/{location_id}/roles/{role_id}/users/{user_id}"
ID_NAME = "user_id"
def get_timeclocks(self, **kwargs):
return Timeclock.get_all(parent=self, **kwargs)
def get_timeclock(self, id):
return Timeclock.get(parent=self, id=id)
def create_timeclock(self, **kwargs):
return Timeclock.create(parent=self, **kwargs)
def get_time_off_requests(self, **kwargs):
return TimeOffRequest.get_all(parent=self, **kwargs)
def get_time_off_request(self, id):
return TimeOffRequest.get(parent=self, id=id)
def create_time_off_request(self, **kwargs):
return TimeOffRequest.create(parent=self, **kwargs)
```
#### File: organizations/admins/test_admins.py
```python
import pytest
from tests.smoke.organizations.admins.base_admin import BaseAdmin
class TestAdmins(BaseAdmin):
ADDITIONAL_ADMIN_EMAIL = "<EMAIL>"
def setUp(self):
# (if you are copying and pasting, update class title below)
super(TestAdmins, self).setUp()
def tearDown(self):
# (if you are copying and pasting, update class title below)
super(TestAdmins, self).tearDown()
def test_admin_crud_sudo(self):
# sudo creates an admin in the setup, so no need to test again
# delete
self.admin.delete()
with pytest.raises(Exception):
self.organization.get_admin(self.admin.get_id())
def test_admin_crud_admin(self):
self.update_permission_admin()
new_admin = self.organization.create_admin(
email=self.ADDITIONAL_ADMIN_EMAIL)
assert new_admin.data.get("email") == self.ADDITIONAL_ADMIN_EMAIL
# delete
new_admin.delete()
with pytest.raises(Exception):
self.organization.get_admin(new_admin.get_id())
def test_admin_crud_manager(self):
self.update_permission_manager()
with pytest.raises(Exception):
self.organization.create_admin(email=self.ADDITIONAL_ADMIN_EMAIL)
with pytest.raises(Exception):
self.admin.delete()
def test_admin_crud_worker(self):
self.update_permission_worker()
with pytest.raises(Exception):
self.organization.create_admin(email=self.ADDITIONAL_ADMIN_EMAIL)
with pytest.raises(Exception):
self.admin.delete()
```
#### File: roles/recurring_shifts/base_recurring_shift.py
```python
from tests.smoke.organizations.locations.roles.base_role import BaseRole
class BaseRecurringShift(BaseRole):
def setUp(self):
# (if you are copying and pasting, update class title below)
super(BaseRecurringShift, self).setUp()
self.recurring_shift = self.role.create_recurring_shift(
start_day="monday",
start_hour=8,
start_minute=0,
duration_minutes=300,
user_id=self.worker.get_id())
self.other_recurring_shift = self.other_role.create_recurring_shift(
start_day="monday",
start_hour=8,
start_minute=0,
duration_minutes=300, )
def tearDown(self):
# (if you are copying and pasting, update class title below)
super(BaseRecurringShift, self).tearDown()
```
#### File: schedules/shifts/test_schedule_shifts.py
```python
from tests.smoke.organizations.locations.roles.schedules.shifts.base_schedule_shift import BaseScheduleShift
class TestScheduleShifts(BaseScheduleShift):
def setUp(self):
# (if you are copying and pasting, update class title below)
super(TestScheduleShifts, self).setUp()
def tearDown(self):
# (if you are copying and pasting, update class title below)
super(TestScheduleShifts, self).tearDown()
def test_schedule_shifts_crud_sudo(self):
self.update_permission_sudo()
all_shifts = self.schedule.get_schedule_shifts()
claimable_shifts = self.schedule.get_schedule_shifts(
claimable_by_user=self.worker.get_id())
assert len(all_shifts) == 3
assert len(claimable_shifts) == 2
def test_schedule_shifts_crud_admin(self):
self.update_permission_admin()
all_shifts = self.schedule.get_schedule_shifts()
claimable_shifts = self.schedule.get_schedule_shifts(
claimable_by_user=self.worker.get_id())
assert len(all_shifts) == 3
assert len(claimable_shifts) == 2
def test_schedule_shifts_crud_manager(self):
self.update_permission_manager()
all_shifts = self.schedule.get_schedule_shifts()
claimable_shifts = self.schedule.get_schedule_shifts(
claimable_by_user=self.worker.get_id())
assert len(all_shifts) == 3
assert len(claimable_shifts) == 2
def test_schedule_shifts_crud_worker(self):
self.update_permission_worker()
all_shifts = self.schedule.get_schedule_shifts()
claimable_shifts = self.schedule.get_schedule_shifts(
claimable_by_user=self.worker.get_id())
assert len(all_shifts) == 3
assert len(claimable_shifts) == 2
```
#### File: roles/schedules/test_schedules.py
```python
import json
import pytest
from tests.smoke.organizations.locations.roles.schedules.base_schedule import BaseSchedule
class TestSchedules(BaseSchedule):
def setUp(self):
# (if you are copying and pasting, update class title below)
super(TestSchedules, self).setUp()
def tearDown(self):
# (if you are copying and pasting, update class title below)
super(TestSchedules, self).tearDown()
def test_schedules_crud_sudo(self):
self.update_permission_sudo()
# read some schedules in main role
schedules = self.role.get_schedules(
start=self.range_start, end=self.range_stop)
assert 0 < len(schedules) <= 2
# read some schedules in another role
other_schedules = self.other_role.get_schedules(
start=self.range_start, end=self.range_stop)
assert 0 < len(other_schedules) <= 2
# now some patches
schedule = schedules.pop(0)
# schedule starts as initial
schedule.patch(state="unpublished")
assert schedule.data.get("state") == "unpublished"
# add some data to it
schedule.patch(
min_shift_length_hour=4,
max_shift_length_hour=8,
demand=json.dumps(self.demand))
assert schedule.data.get("max_shift_length_hour") == 8
assert schedule.data.get("min_shift_length_hour") == 4
assert schedule.data.get("demand") == self.demand
schedule.patch(state="published")
assert schedule.data.get("state") == "published"
# more schedules to test sending to different automation systems
more_schedules = self.role.get_schedules(start=self.range_stop)
first_schedule = more_schedules.pop(0)
second_schedule = more_schedules.pop(0)
first_schedule.patch(state="unpublished")
second_schedule.patch(state="unpublished")
first_schedule.patch(
state="chomp-queue",
min_shift_length_hour=4,
max_shift_length_hour=8,
demand=json.dumps(self.demand))
second_schedule.patch(state="mobius-queue")
def test_schedules_crud_admin(self):
self.update_permission_admin()
# read some schedules
schedules = self.role.get_schedules(
start=self.range_start, end=self.range_stop)
assert 0 < len(schedules) <= 2
# read some schedules in another role
other_schedules = self.other_role.get_schedules(
start=self.range_start, end=self.range_stop)
assert 0 < len(other_schedules) <= 2
# now some patches
schedule = schedules.pop(0)
# schedule starts as initial
schedule.patch(state="unpublished")
assert schedule.data.get("state") == "unpublished"
# add some data to it
schedule.patch(
min_shift_length_hour=4,
max_shift_length_hour=8,
demand=json.dumps(self.demand))
assert schedule.data.get("max_shift_length_hour") == 8
assert schedule.data.get("min_shift_length_hour") == 4
assert schedule.data.get("demand") == self.demand
schedule.patch(state="published")
assert schedule.data.get("state") == "published"
# more schedules to test sending to different automation systems
more_schedules = self.role.get_schedules(start=self.range_stop)
first_schedule = more_schedules.pop(0)
second_schedule = more_schedules.pop(0)
first_schedule.patch(state="unpublished")
second_schedule.patch(state="unpublished")
first_schedule.patch(
state="chomp-queue",
min_shift_length_hour=4,
max_shift_length_hour=8,
demand=json.dumps(self.demand))
second_schedule.patch(state="mobius-queue")
def test_schedules_crud_manager(self):
self.update_permission_manager()
# read some schedules
schedules = self.role.get_schedules(
start=self.range_start, end=self.range_stop)
assert 0 < len(schedules) <= 2
with pytest.raises(Exception):
self.other_role.get_schedules(
start=self.range_start, end=self.range_stop)
# now some patches
schedule = schedules.pop(0)
# schedule starts as initial
schedule.patch(state="unpublished")
assert schedule.data.get("state") == "unpublished"
# add some data to it
schedule.patch(
min_shift_length_hour=4,
max_shift_length_hour=8,
demand=json.dumps(self.demand))
assert schedule.data.get("max_shift_length_hour") == 8
assert schedule.data.get("min_shift_length_hour") == 4
assert schedule.data.get("demand") == self.demand
schedule.patch(state="published")
assert schedule.data.get("state") == "published"
# more schedules to test sending to different automation systems
more_schedules = self.role.get_schedules(start=self.range_stop)
first_schedule = more_schedules.pop(0)
second_schedule = more_schedules.pop(0)
first_schedule.patch(state="unpublished")
second_schedule.patch(state="unpublished")
first_schedule.patch(
state="chomp-queue",
min_shift_length_hour=4,
max_shift_length_hour=8,
demand=json.dumps(self.demand))
second_schedule.patch(state="mobius-queue")
def test_schedules_crud_worker(self):
self.update_permission_worker()
# read some schedules
schedules = self.role.get_schedules(
start=self.range_start, end=self.range_stop)
assert 0 < len(schedules) <= 2
with pytest.raises(Exception):
self.other_role.get_schedules(
start=self.range_start, end=self.range_stop)
# now some patches
schedule = schedules.pop(0)
# can't patch a schedule as a worker
with pytest.raises(Exception):
schedule.patch(state="unpublished")
with pytest.raises(Exception):
schedule.patch(
min_shift_length_hour=4,
max_shift_length_hour=8,
demand=json.dumps(self.demand))
```
#### File: roles/shift_query/base_shift_query.py
```python
from datetime import datetime, timedelta
from app.helpers import normalize_to_midnight
from tests.smoke.organizations.locations.roles.base_role import BaseRole
class BaseShiftQuery(BaseRole):
def setUp(self):
# (if you are copying and pasting, update class title below)
super(BaseShiftQuery, self).setUp()
today = normalize_to_midnight(datetime.utcnow())
self.query_start = (today + timedelta(days=2, hours=8)).isoformat()
self.query_stop = (today + timedelta(days=2, hours=15)).isoformat()
self.long_stop = (today + timedelta(days=3, hours=8)).isoformat()
# the other assigned
self.coworker = self.role.create_worker(
email="<EMAIL>",
min_hours_per_workweek=20,
max_hours_per_workweek=40)
def tearDown(self):
# (if you are copying and pasting, update class title below)
super(BaseShiftQuery, self).tearDown()
```
#### File: roles/shifts/test_shifts.py
```python
import pytest
from tests.smoke.organizations.locations.roles.shifts.base_shift import BaseShift
class TestShifts(BaseShift):
def setUp(self):
# (if you are copying and pasting, update class title below)
super(TestShifts, self).setUp()
def tearDown(self):
# (if you are copying and pasting, update class title below)
super(TestShifts, self).tearDown()
def test_shifts_crud_sudo(self):
self.update_permission_sudo()
# sudo can view shifts
shifts = self.role.get_shifts(
start=self.range_start, end=self.range_stop)
assert len(shifts) == 2
# sudo can create a new shift in other roles
other_role_shift = self.other_role.create_shift(
start=self.assigned_shift.data.get("start"),
stop=self.assigned_shift.data.get("stop"))
assert other_role_shift.data.get(
"start") == self.assigned_shift.data.get("start")
assert other_role_shift.data.get(
"stop") == self.assigned_shift.data.get("stop")
# all the roles
new_shift = self.role.create_shift(
start=self.assigned_shift.data.get("start"),
stop=self.assigned_shift.data.get("stop"))
assert new_shift.data.get("start") == self.assigned_shift.data.get(
"start")
assert new_shift.data.get("stop") == self.assigned_shift.data.get(
"stop")
# sudo can assign shifts in future
self.unassigned_shift.patch(user_id=self.worker.get_id())
assert self.unassigned_shift.data.get(
"user_id") == self.worker.get_id()
# ... and the past
self.assigned_shift.patch(user_id=self.worker.get_id())
assert self.assigned_shift.data.get("user_id") == self.worker.get_id()
# sudo can modify the start and stop of a shift
new_shift.patch(
start=self.unassigned_shift.data.get("start"),
stop=self.unassigned_shift.data.get("stop"))
assert new_shift.data.get("start") == self.unassigned_shift.data.get(
"start")
assert new_shift.data.get("stop") == self.unassigned_shift.data.get(
"stop")
# patch description
new_description = "This is a good description"
self.assigned_shift.patch(description=new_description)
assert self.assigned_shift.data.get("description") == new_description
# can't be too long
with pytest.raises(Exception):
self.assigned_shift.patch(description=(new_description * 100))
# shifts can be deleted
deleted = self.unassigned_shift.delete()
assert deleted is None
with pytest.raises(Exception):
self.role.get_shift(self.unassigned_shift.get_id())
def test_shifts_crud_admin(self):
self.update_permission_admin()
# admin can view shifts
shifts = self.role.get_shifts(
start=self.range_start, end=self.range_stop)
assert len(shifts) == 2
# admins can create a new shift in other roles
other_role_shift = self.other_role.create_shift(
start=self.assigned_shift.data.get("start"),
stop=self.assigned_shift.data.get("stop"))
assert other_role_shift.data.get(
"start") == self.assigned_shift.data.get("start")
assert other_role_shift.data.get(
"stop") == self.assigned_shift.data.get("stop")
# all the roles
new_shift = self.role.create_shift(
start=self.assigned_shift.data.get("start"),
stop=self.assigned_shift.data.get("stop"))
assert new_shift.data.get("start") == self.assigned_shift.data.get(
"start")
assert new_shift.data.get("stop") == self.assigned_shift.data.get(
"stop")
# admins can assign shifts in future
self.unassigned_shift.patch(user_id=self.worker.get_id())
assert self.unassigned_shift.data.get(
"user_id") == self.worker.get_id()
# ... and the past
self.assigned_shift.patch(user_id=self.worker.get_id())
assert self.assigned_shift.data.get("user_id") == self.worker.get_id()
# admins can modify the start and stop of a shift
new_shift.patch(
start=self.unassigned_shift.data.get("start"),
stop=self.unassigned_shift.data.get("stop"))
assert new_shift.data.get("start") == self.unassigned_shift.data.get(
"start")
assert new_shift.data.get("stop") == self.unassigned_shift.data.get(
"stop")
# patch description
new_description = "This is a good description"
self.assigned_shift.patch(description=new_description)
assert self.assigned_shift.data.get("description") == new_description
# can't be too long
with pytest.raises(Exception):
self.assigned_shift.patch(description=(new_description * 100))
# shifts can be deleted
deleted = self.unassigned_shift.delete()
assert deleted is None
with pytest.raises(Exception):
self.role.get_shift(self.unassigned_shift.get_id())
def test_shifts_crud_manager(self):
self.update_permission_manager()
# worker can view shifts
shifts = self.role.get_shifts(
start=self.range_start, end=self.range_stop)
assert len(shifts) == 2
# managers cannot create a new shift in other roles
with pytest.raises(Exception):
self.other_role.create_shift(
start=self.assigned_shift.data.get("start"),
stop=self.assigned_shift.data.get("stop"))
# but managers can create within their own role
new_shift = self.role.create_shift(
start=self.assigned_shift.data.get("start"),
stop=self.assigned_shift.data.get("stop"))
assert new_shift.data.get("start") == self.assigned_shift.data.get(
"start")
assert new_shift.data.get("stop") == self.assigned_shift.data.get(
"stop")
# managers can assign shifts in future
self.unassigned_shift.patch(user_id=self.worker.get_id())
assert self.unassigned_shift.data.get(
"user_id") == self.worker.get_id()
# ... and the past
self.assigned_shift.patch(user_id=self.worker.get_id())
assert self.assigned_shift.data.get("user_id") == self.worker.get_id()
# managers can modify the start and stop of a shift
new_shift.patch(
start=self.unassigned_shift.data.get("start"),
stop=self.unassigned_shift.data.get("stop"))
assert new_shift.data.get("start") == self.unassigned_shift.data.get(
"start")
assert new_shift.data.get("stop") == self.unassigned_shift.data.get(
"stop")
# patch description
new_description = "This is a good description"
self.assigned_shift.patch(description=new_description)
assert self.assigned_shift.data.get("description") == new_description
# can't be too long
with pytest.raises(Exception):
self.assigned_shift.patch(description=(new_description * 100))
# shifts can be deleted
deleted = self.unassigned_shift.delete()
assert deleted is None
with pytest.raises(Exception):
self.role.get_shift(self.unassigned_shift.get_id())
def test_shifts_crud_worker(self):
self.update_permission_worker()
# worker cannot create a new shift
with pytest.raises(Exception):
self.role.create_shift(
start=self.assigned_shift.data.get("start"),
stop=self.assigned_shift.data.get("stop"))
# worker can view shifts
shifts = self.role.get_shifts(
start=self.range_start, end=self.range_stop)
assert len(shifts) == 2
# worker can claim an unassigned shift
self.unassigned_shift.patch(user_id=self.worker.get_id())
assert self.unassigned_shift.data.get(
"user_id") == self.worker.get_id()
# worker cannot modify a start or stop of a shift
with pytest.raises(Exception):
self.unassigned_shift.patch(
start=self.assigned_shift.data.get("start"),
stop=self.assigned_shift.data.get("stop"))
# worker cannot reassign a shift
with pytest.raises(Exception):
self.assigned_shift.patch(user_id=self.worker.get_id())
# patch description banned too
with pytest.raises(Exception):
self.assigned_shift.patch(description="hello")
# deleting is banned too
with pytest.raises(Exception):
self.unassigned_shift.delete()
```
#### File: locations/roles/test_roles.py
```python
import pytest
from tests.smoke.organizations.locations.roles.base_role import BaseRole
class TestRoles(BaseRole):
def setUp(self):
# (if you are copying and pasting, update class title below)
super(TestRoles, self).setUp()
def tearDown(self):
# (if you are copying and pasting, update class title below)
super(TestRoles, self).tearDown()
# Modify Role
def test_role_crud_sudo(self):
self.update_permission_sudo()
new_min_hours_per_workday = 2
self.role.patch(min_hours_per_workday=new_min_hours_per_workday)
assert self.role.data.get(
"min_hours_per_workday") == new_min_hours_per_workday
# delete
self.role.delete()
refetch = self.location.get_role(self.role.get_id())
assert refetch.data.get("archived") is True
def test_role_crud_admin(self):
self.update_permission_admin()
# post
new_name = "Fisherman"
new_role = self.location.create_role(name=new_name)
assert new_role.data.get("name") == new_name
# in other location
new_role = self.other_location.create_role(name=new_name)
assert new_role.data.get("name") == new_name
# patch - works within all locations
new_min_hours_per_workday = 2
self.role.patch(min_hours_per_workday=new_min_hours_per_workday)
assert self.role.data.get(
"min_hours_per_workday") == new_min_hours_per_workday
self.other_role.patch(min_hours_per_workday=new_min_hours_per_workday)
assert self.other_role.data.get(
"min_hours_per_workday") == new_min_hours_per_workday
# delete
self.role.delete()
refetch = self.location.get_role(self.role.get_id())
assert refetch.data.get("archived") is True
self.other_role.delete()
refetch = self.other_location.get_role(self.other_role.get_id())
assert refetch.data.get("archived") is True
def test_role_crud_manager(self):
self.update_permission_manager()
# post - only in managed locations
new_name = "Fisherman"
new_role = self.location.create_role(name=new_name)
assert new_role.data.get("name") == new_name
# fails in other location
with pytest.raises(Exception):
self.other_location.create_role(name=new_name)
# patch - only in managed locations
new_min_hours_per_workday = 2
self.role.patch(min_hours_per_workday=new_min_hours_per_workday)
assert self.role.data.get(
"min_hours_per_workday") == new_min_hours_per_workday
with pytest.raises(Exception):
self.other_role.patch(
min_hours_per_workday=new_min_hours_per_workday)
# delete
self.role.delete()
refetch = self.location.get_role(self.role.get_id())
assert refetch.data.get("archived") is True
with pytest.raises(Exception):
self.other_role.delete()
def test_role_crud_worker(self):
self.update_permission_worker()
# post
with pytest.raises(Exception):
self.location.create_role(name="Fisherman")
# patch
with pytest.raises(Exception):
self.role.patch(min_hours_per_workday=2)
# delete
with pytest.raises(Exception):
self.role.delete()
with pytest.raises(Exception):
self.other_role.delete()
```
#### File: users/timeclocks/base_timeclock.py
```python
from tests.smoke.organizations.locations.roles.users.base_user import BaseWorker
class BaseTimeclock(BaseWorker):
def setUp(self):
# (if you are copying and pasting, update class title below)
super(BaseTimeclock, self).setUp()
def tearDown(self):
# (if you are copying and pasting, update class title below)
super(BaseTimeclock, self).tearDown()
```
#### File: users/timeclocks/test_timeclocks.py
```python
from datetime import datetime, timedelta
import pytest
from tests.smoke.organizations.locations.roles.users.timeclocks.base_timeclock import BaseTimeclock
class TestTimeclocks(BaseTimeclock):
def setUp(self):
# (if you are copying and pasting, update class title below)
super(TestTimeclocks, self).setUp()
def tearDown(self):
# (if you are copying and pasting, update class title below)
super(TestTimeclocks, self).tearDown()
def test_timeclock_crud_sudo(self):
self.update_permission_sudo()
# posts
# clock the worker in
timeclock = self.worker.create_timeclock()
assert timeclock.data.get("start") is not None
assert timeclock.data.get("stop") is None
# cannot clock in twice
with pytest.raises(Exception):
self.worker.create_timeclock()
# manager cannot clock in workers at other locations
other_timeclock = self.other_worker.create_timeclock()
assert other_timeclock.data.get("start") is not None
assert other_timeclock.data.get("stop") is None
# manager can create a complete timeclock
start_dt = (datetime.utcnow() - timedelta(days=1)).replace(
microsecond=0)
start = start_dt.isoformat()
stop = (start_dt + timedelta(hours=6, minutes=15)).isoformat()
timeclock2 = self.worker.create_timeclock(start=start, stop=stop)
assert timeclock2.data.get("start") == start
assert timeclock2.data.get("stop") == stop
# cannot create a timeclock in the future
utcnow = datetime.utcnow()
start_future = (utcnow + timedelta(hours=1)).isoformat()
stop_future = (utcnow + timedelta(hours=9)).isoformat()
with pytest.raises(Exception):
self.worker.create_timeclock(start=start_future, stop=stop_future)
# get
active_timeclocks = self.worker.get_timeclocks(active=True)
assert len(active_timeclocks) == 1
# patch
timeclock.patch(close=True)
assert timeclock.data.get("stop") is not None
new_start = (
datetime.utcnow() - timedelta(hours=4, minutes=8)).replace(
microsecond=0).isoformat()
timeclock.patch(start=new_start)
assert timeclock.data.get("start") == new_start
# mixed parameters needs to fail
with pytest.raises(Exception):
timeclock.patch(start=new_start, close=True)
# cannot modify timeclock to be in the future
with pytest.raises(Exception):
timeclock.patch(start=start_future, stop=stop_future)
# delete
timeclock.delete()
with pytest.raises(Exception):
self.worker.get_timeclock(timeclock.get_id())
def test_timeclock_crud_admin(self):
self.update_permission_admin()
# posts
# clock the worker in
timeclock = self.worker.create_timeclock()
assert timeclock.data.get("start") is not None
assert timeclock.data.get("stop") is None
# cannot clock in twice
with pytest.raises(Exception):
self.worker.create_timeclock()
# admin can clock in workers at other locations
other_timeclock = self.other_worker.create_timeclock()
assert other_timeclock.data.get("start") is not None
assert other_timeclock.data.get("stop") is None
# admin can create a complete timeclock
start_dt = (datetime.utcnow() - timedelta(days=1)).replace(
microsecond=0)
start = start_dt.isoformat()
stop = (start_dt + timedelta(hours=6, minutes=15)).isoformat()
timeclock2 = self.worker.create_timeclock(start=start, stop=stop)
assert timeclock2.data.get("start") == start
assert timeclock2.data.get("stop") == stop
# cannot create a timeclock in the future
utcnow = datetime.utcnow()
start_future = (utcnow + timedelta(hours=1)).isoformat()
stop_future = (utcnow + timedelta(hours=9)).isoformat()
with pytest.raises(Exception):
self.worker.create_timeclock(start=start_future, stop=stop_future)
# get
active_timeclocks = self.worker.get_timeclocks(active=True)
assert len(active_timeclocks) == 1
# patch
timeclock.patch(close=True)
assert timeclock.data.get("stop") is not None
new_start = (
datetime.utcnow() - timedelta(hours=4, minutes=8)).replace(
microsecond=0).isoformat()
timeclock.patch(start=new_start)
assert timeclock.data.get("start") == new_start
# mixed parameters needs to fail
with pytest.raises(Exception):
timeclock.patch(start=new_start, close=True)
# cannot modify timeclock to be in the future
with pytest.raises(Exception):
timeclock.patch(start=start_future, stop=stop_future)
# delete
timeclock.delete()
with pytest.raises(Exception):
self.worker.get_timeclock(timeclock.get_id())
def test_timeclock_crud_manager(self):
self.update_permission_manager()
# posts
# clock the worker in
timeclock = self.worker.create_timeclock()
assert timeclock.data.get("start") is not None
assert timeclock.data.get("stop") is None
# cannot clock in twice
with pytest.raises(Exception):
self.worker.create_timeclock()
# manager cannot clock in workers at other locations
with pytest.raises(Exception):
self.other_worker.create_timeclock()
# manager can create a complete timeclock
start_dt = (datetime.utcnow() - timedelta(days=1)).replace(
microsecond=0)
start = start_dt.isoformat()
stop = (start_dt + timedelta(hours=6, minutes=15)).isoformat()
timeclock2 = self.worker.create_timeclock(start=start, stop=stop)
assert timeclock2.data.get("start") == start
assert timeclock2.data.get("stop") == stop
# cannot create a timeclock in the future
utcnow = datetime.utcnow()
start_future = (utcnow + timedelta(hours=1)).isoformat()
stop_future = (utcnow + timedelta(hours=9)).isoformat()
with pytest.raises(Exception):
self.worker.create_timeclock(start=start_future, stop=stop_future)
# get
active_timeclocks = self.worker.get_timeclocks(active=True)
assert len(active_timeclocks) == 1
# patch
timeclock.patch(close=True)
assert timeclock.data.get("stop") is not None
new_start = (
datetime.utcnow() - timedelta(hours=4, minutes=8)).replace(
microsecond=0).isoformat()
timeclock.patch(start=new_start)
assert timeclock.data.get("start") == new_start
# mixed parameters needs to fail
with pytest.raises(Exception):
timeclock.patch(start=new_start, close=True)
# cannot modify timeclock to be in the future
with pytest.raises(Exception):
timeclock.patch(start=start_future, stop=stop_future)
# delete
timeclock.delete()
with pytest.raises(Exception):
self.worker.get_timeclock(timeclock.get_id())
def test_timeclock_crud_worker(self):
self.update_permission_worker()
# posts
# worker clocks in
timeclock = self.worker.create_timeclock()
assert timeclock.data.get("start") is not None
assert timeclock.data.get("stop") is None
# cannot clock in twice
with pytest.raises(Exception):
self.worker.create_timeclock()
# worker cannot clock in a buddy
with pytest.raises(Exception):
self.other_worker.create_timeclock()
# worker cannot make a complete timeclock
start_dt = (datetime.utcnow() - timedelta(days=1)).replace(
microsecond=0)
start = start_dt.isoformat()
stop = (start_dt + timedelta(hours=6, minutes=15)).isoformat()
with pytest.raises(Exception):
self.worker.create_timeclock(start=start, stop=stop)
# get
active_timeclocks = self.worker.get_timeclocks(active=True)
assert len(active_timeclocks) == 1
# patch
# can only pass close=true param
timeclock.patch(close=True)
assert timeclock.data.get("stop") is not None
# cannot adjust start or stop
with pytest.raises(Exception):
timeclock.patch(start=start, stop=stop)
# mixed parameters needs to fail
with pytest.raises(Exception):
timeclock.patch(start=start, close=True)
# delete
with pytest.raises(Exception):
timeclock.delete()
```
#### File: smoke/organizations/test_organizations.py
```python
import pytest
from tests.smoke.organizations.base_organization import BaseOrganization
class TestOrganizations(BaseOrganization):
def setUp(self):
# (if you are copying and pasting, update class title below)
super(TestOrganizations, self).setUp()
def tearDown(self):
# (if you are copying and pasting, update class title below)
super(TestOrganizations, self).tearDown()
# Org Crud
def test_org_crud_sudo(self):
self.update_permission_sudo()
# an org was created in the setup
# patch
new_name = "Fred"
self.organization.patch(name=new_name)
assert self.organization.data.get("name") == new_name
# orgs cannot be deleted
with pytest.raises(Exception):
self.organization.delete()
def test_org_crud_admin(self):
self.update_permission_admin()
# post
with pytest.raises(Exception):
self.root_client.create_organization(name="Failure Test Org")
# patch
new_name = "Fred"
self.organization.patch(name=new_name)
assert self.organization.data.get("name") == new_name
# orgs cannot be deleted
with pytest.raises(Exception):
self.organization.delete()
def test_org_crud_manager(self):
self.update_permission_manager()
# post
with pytest.raises(Exception):
self.root_client.create_organization(name="Failure Test Org")
# patch
with pytest.raises(Exception):
self.organization.patch(name="fred")
# orgs cannot be deleted
with pytest.raises(Exception):
self.organization.delete()
def test_org_crud_worker(self):
self.update_permission_worker()
# post
with pytest.raises(Exception):
self.root_client.create_organization(name="Failure Test Org")
# patch
with pytest.raises(Exception):
self.organization.patch(name="fred")
# orgs cannot be deleted
with pytest.raises(Exception):
self.organization.delete()
``` |
{
"source": "JoeyWord/common_op",
"score": 2
} |
#### File: JoeyWord/common_op/baiduAi.py
```python
import requests
import json
from urllib.request import urlopen,Request
from urllib.parse import urlencode
import ssl
from flask import Flask,jsonify,g,request
import logging
import os
API_KEY = 'aQRWZp00xQ4aX56D03gzuo7Y'
SECRET_KEY = '<KEY>'
URL = 'https://aip.baidubce.com/oauth/2.0/token'
access_token = '24.<PASSWORD>'
app = Flask(__name__)
def get_log(log_path):
logger = logging.getLogger(log_path)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(log_path)
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s -%(message)s')
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(sh)
logger.addHandler(fh)
return logger
class BaiduAi():
def __init__(self,app_key,app_secret,url,logger):
self.app_key = app_key
self.app_secret = app_secret
self.url = url
self.logger = logger
#self.access_token = self.getAuth(self.url,self.app_key,self.app_secret)
self.data = {}
self.headers = {"content-type": "application/json; charset=UTF-8"}
def setParams(self,data,key,value):
data[key] = value
return data
def urlGen(self,host,data):
url = host + '?'
param_ls = []
for key,value in data.items():
param = str(key) + '=' + str(value)
param_ls.append(param)
params = '&'.join(param_ls)
return url + params
def getAuth(self,url,app_key,app_secret):
params = {"grant_type":"client_credentials","client_id":app_key,"client_secret":app_secret}
headers = {"content-type": "application/json; charset=UTF-8"}
"""
r = requests.get(url,data=params,headers=headers)
print(r.text)
datas = json.loads(r.text)
access_token = datas.get("access_token",None)
"""
#pdb.set_trace()
data = urlencode(params).encode('utf-8')
req = Request(url,data=data,headers=headers,method='POST')
#url_req = url + '?grant_type=client_credentials' + '&client_id=' + app_key + '&client_secret=' + app_secret
#url_req = self.urlGen(url)
#print('url_req result: ',url_req)
#req = Request(url_req)
#req.add_header("content-type","application/json; charset=UTF-8")
content = urlopen(req).read().decode('utf-8')
if not type(content) == str:
self.logger.info('type change')
content = str(content,encoding='utf-8')
datas = json.loads(content)
access_token = datas.get("access_token")
self.logger.info("access_token result: {}".format(access_token))
return access_token
@staticmethod
def requestRes(access_token,host,data,headers):
data_js = json.dumps(data)
info = host.split('/')[-1]
if access_token:
url = host + '?' + "access_token=" + access_token
r = requests.post(url,data=data_js,headers=headers,timeout=15)
r.encoding = "gbk"
#print("\n{} request info:{}\n".format(info,r.text))
return json.loads(r.text)
else:
print("access_token can't get successfully")
return None
#词义分析于短文本相似度评估
def wordAnalysis(self,host,word1,word2):
if len(word1) > 32 or len(word2)> 32:
self.logger.debug("input info too long for word analysis")
return "invalid input for word analysis"
self.setParams(self.data,"word_1",word1)
self.setParams(self.data,"word_2",word2)
req = self.requestRes(access_token,host,self.data,self.headers)
#self.logger.info("{} request info:{}".format("info1",req))
self.data = {}
return req
def textSim(self,host,text1,text2,model='BOW'):
if len(text1) > 256 or len(text2) > 256:
self.logger.debug("input info too long for text sim")
return "invalid input for text sim"
self.setParams(self.data, "text_1", text1)
self.setParams(self.data, "text_2", text2)
self.setParams(self.data, "model", model)
req = self.requestRes(access_token, host, self.data, self.headers)
self.data = {}
return req
#评论点抽取
def commentPoint(self,host,text,type=4):
if len(text) > 5120:
self.logger.debug("input info too long for comment point extract")
return "invalid input for comment point"
self.setParams(self.data,'text',text)
self.setParams(self.data,'type',type)
type_value = range(1,14)
req = self.requestRes(access_token, host, self.data, self.headers)
self.data = {}
return req
def emotionTrend(self,host,text):
if len(text) > 1024:
self.logger.debug("input info too long for emotion trend")
return "invalid input for emotion trend"
self.setParams(self.data,'text',text)
req = self.requestRes(access_token, host, self.data, self.headers)
self.data = {}
return req
def articleTags(self,host,title,content):
if len(title) <= 40 and len(content) <= 32767:
self.setParams(self.data,'title',title)
self.setParams(self.data,'content',content)
else:
self.logger.debug("input length overpass the limit")
return "invalid input info for article tags"
req = self.requestRes(access_token, host, self.data, self.headers)
self.data = {}
return req
def topicClassify(self,host,title,content):
if len(title) <= 40 and len(content) <= 32767:
self.setParams(self.data,'title',title)
self.setParams(self.data,'content',content)
else:
self.logger.debug("input length overpass the limit")
return "invalid input info for topic classify"
req = self.requestRes(access_token, host, self.data, self.headers)
self.data = {}
return req
def textCheck(self,host,text):
if len(text) <= 10000:
self.setParams(self.data,'content',text)
else:
self.logger.debug("input length overpass the limit")
return "invalid input info for text check"
headers = {'content-type':'x-www-form-urlencoded'}
info = host.split('/')[-1]
url = host + "?" + "access_token=" + access_token
req = requests.post(url,data=self.data,headers=headers,timeout=5)
if not req.encoding:
req.encoding = 'gbk'
self.data = {}
return req.text
def word2vec(self,host,word):
if len(word) <= 32:
self.setParams(self.data,'word',word)
else:
self.logger.debug("input length overpass the limit")
return "invalid input info for word2vec"
req = self.requestRes(access_token,host,self.data,self.headers)
self.data = {}
return req
def DNNsequence(self,host,text):
if len(text) <= 128:
self.setParams(self.data,'text',text)
else:
self.logger.debug("input length overpass the limit")
return "invalid input info for DNNseq"
req = self.requestRes(access_token,host,self.data,self.headers)
self.data = {}
return req
#text check fault need privillage from work form
def checkFault(self,host,text):
if len(text) <= 255:
self.setParams(self.data,'text',text)
else:
self.logger.debug("input length overpass the limit")
return "invalid input info for fault chek"
req = self.requestRes(access_token,host,self.data,self.headers)
self.data = {}
return req
def dependencyParsing(self,host,sentence,mode=0):
if len(sentence) <= 128:
self.setParams(self.data,'text',sentence)
self.setParams(self.data,'mode',mode)
else:
self.logger.debug("input length overpass the limit")
return "invalid input info for denpendecy parsing"
req = self.requestRes(access_token,host,self.data,self.headers)
self.data = {}
return req
def wordAnalysisCommon(self,host,sentence):
if len(sentence) <= 10000:
self.setParams(self.data,'text',sentence)
else:
self.logger.debug("input length overpass the limit")
return "invalid input info for word analysis common edition"
req = self.requestRes(access_token,host,self.data,self.headers)
self.data = {}
return req
def get_response(res,name):
response = {}
status = 0
if res and str(res).startswith("invalid"):
status = 1
elif not res:
status = 2
elif "error_code" in res:
status = 3
#print(type(res))
if type(res) == dict:
res.update({"status":status})
return res
response.update({name:res,'status':status})
return response
if not os.path.exists('log'):
os.makedirs('log')
logger = get_log(os.path.join('log','api_post.log'))
baiduAi = BaiduAi(API_KEY,SECRET_KEY,URL,logger)
@app.route('/nlp/<string:func_type>',methods=['POST'])
def somaoNLP(func_type):
params = request.get_json()
if func_type == "word_sim":
logger.info("begain analyse the word mean")
emb_sim_host = "https://aip.baidubce.com/rpc/2.0/nlp/v2/word_emb_sim"
word_ls = ["word_1","word_2"]
if all(word in params for word in word_ls):
word_emb = baiduAi.wordAnalysis(emb_sim_host,params.get(word_ls[0]),params.get(word_ls[1]))
response = get_response(word_emb,"word_emb_res")
#logger.info("response result:{}".format(response))
return jsonify(response),100
else:
return "invalid params input",101
elif func_type == "text_sim":
logger.info("begain text sim compute")
text_sim_host = "https://aip.baidubce.com/rpc/2.0/nlp/v2/simnet"
text_ls = ["text_1","text_2"]
if all(word in params for word in text_ls):
text_sim = baiduAi.textSim(text_sim_host,params.get(text_ls[0]),params.get(text_ls[1]))
response = get_response(text_sim,"text_sim_res")
return jsonify(response),200
else:
return "invalid params input", 201
elif func_type == "comment_point":
logger.info("begain comment point extract")
comment_point_host = "https://aip.baidubce.com/rpc/2.0/nlp/v2/comment_tag"
if "text" in params:
comment_point = baiduAi.commentPoint(comment_point_host,params.get("text"))
if "type" in params:
comment_point = baiduAi.commentPoint(comment_point_host,params.get("text"),params.get("type"))
response = get_response(comment_point,"comment_point_extract")
return jsonify(response),300
else:
return "invalid params input", 301
elif func_type == "sentiment_classify":
logger.info("begain emotion trend analysis")
emotion_trend_host = "https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify"
if "text" in params:
emotion_trend = baiduAi.emotionTrend(emotion_trend_host,params.get("text"))
response = get_response(emotion_trend,"emotion_trend_result")
return jsonify(response),400
else:
return "invalid params input", 401
elif func_type == "article_tag":
logger.info("begain tag extract")
info = ['title','content']
article_tag_host = "https://aip.baidubce.com/rpc/2.0/nlp/v1/keyword"
if all(key in params for key in info):
article_tag = baiduAi.articleTags(article_tag_host,params.get(info[0]),params.get(info[1]))
response = get_response(article_tag,"article_tag_extract")
return jsonify(response),500
else:
return "invalid params input", 501
elif func_type == "article_classify":
logger.info("begain article classify")
info = ['title','content']
article_classify_host = "https://aip.baidubce.com/rpc/2.0/nlp/v1/topic"
if all(key in params for key in info):
article_classify = baiduAi.articleTags(article_classify_host,params.get(info[0]),params.get(info[1]))
response = get_response(article_classify,"article_classify_res")
return jsonify(response),600
else:
return "invalid params input", 601
elif func_type == "text_check":
logger.info("begain text check")
text_check_host = 'https://aip.baidubce.com/rest/2.0/antispam/v2/spam'
if "text" in params:
text_check = baiduAi.textCheck(text_check_host,params.get('text'))
response = get_response(text_check,"text_check_res")
return jsonify(response),700
else:
return "invalid params input", 701
elif func_type == "word2vec":
logger.info("begain transform word to vector")
word_vector_host = 'https://aip.baidubce.com/rpc/2.0/nlp/v2/word_emb_vec'
if "word" in params:
word2vec = baiduAi.word2vec(word_vector_host,params.get("word"))
response = get_response(word2vec,"word2vec_res")
return jsonify(response),800
else:
return "invalid params input", 801
elif func_type == "wordAnalysisCommon":
logger.info("begain word analysis(contain word tokens and segment symbol) from sentence")
word_analysis_host = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/lexer'
if "text" in params:
wordAna = baiduAi.wordAnalysisCommon(word_analysis_host,params.get("text"))
response = get_response(wordAna,"wordAnalysis_res")
return jsonify(response),900
else:
return "invalid params input", 901
elif func_type == "dependency_parsing":
logger.info("begain syntax dependency parse")
dependency_parsing_host = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/depparser'
if "sentence" in params:
dependency_parse = baiduAi.dependencyParsing(dependency_parsing_host,params.get("sentence"))
if "mode" in params:
dependency_parse = baiduAi.dependencyParsing(dependency_parsing_host, params.get("sentence"),params.get("mode"))
response = get_response(dependency_parse,"dependency_parse_res")
logger.info("dependency parse: {}".format(response))
return jsonify(response),1000
else:
return "invalid params input", 1001
elif func_type == "check_fault":
logger.info("begain check the word appear place right or not")
check_fault_host = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/ecnet'
if "text" in params:
check_faulting = baiduAi.checkFault(check_fault_host,params.get("text"))
response = get_response(check_faulting,"check_fault_res")
logger.info("check fault: {}".format(response))
return jsonify(response),1100
else:
return "invalid params input", 1101
elif func_type == "DNNsequence":
logger.info("begain analyse the word prob from sequnce text")
DNN_seq_host = 'https://aip.baidubce.com/rpc/2.0/nlp/v2/dnnlm_cn'
if "text" in params:
seq_prob = baiduAi.DNNsequence(DNN_seq_host,params.get("text"))
response = get_response(seq_prob,"seq_prob_res")
logger.info("DNN result:{}".format(response))
return jsonify(response),1200
else:
return "invalid params input", 1201
if __name__ == '__main__':
app.run(host='0.0.0.0',port=5000)
'''
baiduAi = BaiduAi(API_KEY,SECRET_KEY,URL,logger)
#词义识别
word1 = "栋梁"
word2 = "北京"
host = "https://aip.baidubce.com/rpc/2.0/nlp/v2/word_emb_sim"
baiduAi.wordAnalysis(host,word1,word2)
#文本相似识别
text1 = '海阔凭鱼跃'
text2 = '天高任鸟飞'
host = "https://aip.baidubce.com/rpc/2.0/nlp/v2/simnet"
baiduAi.textSim(host,text1,text2,model='BOW')
#评论点抽取
text = "三星电脑电池不给力"
host = "https://aip.baidubce.com/rpc/2.0/nlp/v2/comment_tag"
baiduAi.commentPoint(host,text)
#情感倾向分析
text = "这是一次伟大的尝试"
host = "https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify"nlp
baiduAi.emotionTrend(host,text)
#标签提取
host = "https://aip.baidubce.com/rpc/2.0/nlp/v1/keyword"
title = '隐形的翅膀'
comment = '每一次都在徘徊中坚强,每一次就算很受伤也会有泪光,我知道我一直有双隐形的翅膀,带我飞向更远的地方'
baiduAi.articleTags(host,title,comment)
#文章分类
host = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/topic'
baiduAi.topicClassify(host,title,comment)
#文本审核
text = '老吾老以及人之老,幼吾幼以及人之幼'
host = 'https://aip.baidubce.com/rest/2.0/antispam/v2/spam'
baiduAi.textCheck(host,text)
'''
```
#### File: JoeyWord/common_op/docParse.py
```python
from bs4 import BeautifulSoup
import re
def unstandard_count(soup,tag_name,tag,standard_format):
subjects=soup.select(tag_name)
print("length subs info: ",len(subjects))
sum_all = 0
for sub in subjects:
tags=sub.find_all(tag)
style_tag=sub.find_all(tag,{"style":re.compile(standard_format)})
print("subs length:{} and length style_tag:{}".format(len(tags),len(style_tag)))
tag_standards=len(style_tag)
sum_all+= len(tags)-tag_standards
print("在查找到的标签范围内不匹配的值为:",sum_all)
#unstandard_count(html,"table","col",col_style)
#check levels title
def unstandard_title(soup,tag_name,child_tag,levels,standard_format_num,standard_format_char,standard_format_num2=None):
subjects=soup.select('%s[class="%d a DocDefaults "]' %(tag_name,levels))
print("{} level title select nums: {}".format(levels,len(subjects)))
total_items = 0
cur_level_num = 0
cur_level_char = 0
for sub in subjects:
sub_tags = sub.select(child_tag)
total_items += len(sub_tags)
child_tag_nums=sub.find_all(child_tag,{"style":re.compile(standard_format_num)})
if levels > 1:
standard_format_num2 = highLevel_num_format
child_tag_nums2 = sub.find_all(child_tag,{"style":re.compile(standard_format_num2)})
for child_tag_num in child_tag_nums:
if len(re.sub('\w','',child_tag_num.text))<=1:
cur_level_num += 1
for child_tag_num in child_tag_nums2:
if len(re.sub('\w','',child_tag_num.text))<len(child_tag_num.text):
cur_level_num += 1
child_tag_chars = sub.find_all(child_tag,{"style":standard_format_char})
for _ in child_tag_chars:
cur_level_char += 1
#print("match the length:{} and length style_tag:{}".format(len(tags),len(style_tag)))
#tag_standards=len(style_tag)
#sum_all+= len(tags)-tag_standards
non_match_items = total_items - cur_level_char - cur_level_num
print("当前标题级别{}--总的查找条目:{},在查找到的标签范围内不匹配的值为:{}".format(levels,total_items,non_match_items))
#return subjects
"""
#check table font
span_info=[];ss_info=[]
style_info = re.compile('color: #000000;font-size: 11.0pt;;font-family: "SimSun";')
pattern = re.compile(".*color.")
style_info = 'color'
count = 0;count_style=0
td_style = "background-color: #FFC000;border-bottom-style: \
solid;border-bottom-width: 1px;border-bottom-color: \
#000000;border-left-style: solid;border-left-width: \
1px;border-left-color: #000000;border-right-style: \
solid;border-right-width: 1px;border-right-color: \
#000000;border-top-style: solid;border-top-width: \
1px;border-top-color: #000000;vertical-align: bottom;"
col_style = "width: 13.85%;"
tr_style = "height: 0.19in;"
sum_all = 0
#check col style:width,#check tr standard
tables = html.select('table[id^="docx4j"]')
print("length table",len(tables))
for table in tables:
childs = table.colgroup.children
style_col = table.find_all("col",{"style":re.compile("width: 13.85%;")})
print("length style_col:",len(style_col))
col_standards = len(style_col)
#print("childs",childs)
col_nums = 0
for child in childs:
col_nums += 1
print("col_standard={} and col_nums={}".format(col_standard,col_nums))
sum_all += col_nums-col_standards
print("all tables non-standard col numbers: ",sum_all)
#check td font-size
for table in table_info:
table_style = table.select('[id^="docx4j"]')
table_style = table.find({"id":re.compile('^docx4j')})
if table_style:
count += 1
td_style = table_style.find({"style":td_style})
print("td_style",td_style)
col_style = table_style.find(style=col_style)
print("col_style",col_style)
tr_style = table_style.find(attrs={"style":tr_style})
print("tr_style",tr_style)
if td_style and col_style and tr_style:
count_style += 1
spans = table.find_all('span')
spans_standards = table.find_all('span',attrs={"style":re.compile('font-size: 11.0pt;;font-family: ')})
#print(spans[0])
for span in spans:
span_info.append(span.text)
for ss in spans_standards:
ss_info.append(ss.text)
print("count={},count_style={} and span_info length={},span_style length={}".format(count,count_style,len(span_info),len(ss_info)))
non_standards = count-count_style + len(span_info) - len(ss_info)
print("表格式不符合规范的记录数:",non_standards)
"""
if __name__ == "__main__":
#check title
loc_format = "text-align: center;margin-top: 5mm;margin-bottom: 0.43in;"
title_font = "font-weight: bold;font-size: 16.0pt;"
html = BeautifulSoup(open('data/doc2html.html','r',encoding='utf-8'),'lxml')
title_tag = html.find("p")
standard_title_loc = html.find(attrs={"style":loc_format})
count_title = False
if standard_title_loc:
standard_title = standard_title_loc.find("span",{"style":title_font})
if standard_title:
count_title = True
print("the title match the standard")
#levels title check
title_char_format = "font-size: 12.0pt;"
title_num_format = "font-size: 12.0pt;;font-family: 'Calibri';"
highLevel_num_format = "font-size: 12.0pt;;font-family: 'Cambria';white-space:pre-wrap;"
unstandard_title(html,"p","span",2,title_num_format,title_char_format)
```
#### File: JoeyWord/common_op/gen_data.py
```python
import argparse
def splitData(input_file, split_tr, split_va,split_te, train_file, valid_file, test_file):
with open(input_file, 'r') as fr:
lines = fr.readlines()
lines_num = len(lines)
split_train = int(lines_num*split_tr)
split_valid = int(lines_num*split_va)
split_test = int(lines_num * split_te)
count = 0
train = open(train_file,'w')
test = open(test_file,'w')
valid = open(valid_file,'w')
for line in lines:
if line.startswith('_') or line.startswith('36') or line.startswith('氪'):
continue
count += 1
if count < split_train:
train.write(line)
elif count >= split_train and count < split_valid:
valid.write(line)
elif count >= split_valid and count < split_test:
test.write(line)
else:
pass
train.close()
test.close()
valid.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', type=str, help='input file to be split', required=True)
parser.add_argument('--split_tr', type=float, help='define the size of train set', required=True)
parser.add_argument('--split_va', type=float, help='define the size of valid set', required=True)
parser.add_argument('--split_te', type=float, help='define the size of test set', default=1.0)
parser.add_argument('--train_file', type=str, help='train set output', default='train_text.txt')
parser.add_argument('--valid_file', type=str, help='valid set output', default='valid_text.txt')
parser.add_argument('--test_file', type=str, help='test set output', default='test_text.txt')
args = parser.parse_args()
print("params info:--input_file={}\n\t--split_tr={}\n\t--split_va={}\n\t--split_te={}".format(args.input_file, args.split_tr, args.split_va, args.split_te))
splitData(args.input_file, args.split_tr, args.split_va, args.split_te, args.train_file, args.valid_file, args.test_file)
```
#### File: JoeyWord/common_op/pachong_pic.py
```python
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
from urllib.parse import urljoin
from urllib.request import urlretrieve
url="https://www.xmanhua.com/10xm/"
headers={"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163"}
# params = {"start":start_idx,"filter":""}
req = requests.get(url,headers=headers,timeout=10)
domain_url = "https://www.xmanhua.com"
if not req.encoding:
req.encoding = 'utf-8'
html = req.text
soup = BeautifulSoup(html, 'lxml')
cl = soup.select("div#chapterlistload")
lis = cl[0].select("a")
print(f'cl len: {len(cl)} and lis: {len(lis)}')
folds = []
for idx, li in tqdm(enumerate(lis), desc='chapter:'):
f_nm = "_".join(li.text.split())
path = li.attrs["href"]
folds.append(f_nm)
inner_url = urljoin(domain_url, path)
print(f"{f_nm} href:{path} and inner url is:{}")
# get_pic(f_nm,inner_url)
if idx > 1:
break
def get_pic(url,tag,restore):
"""
:param url:
:param tag:
:param restore:
:return:
"""
req=requests.get(url,headers=headers,timeout=5)
if req.status_code==200:
req.encoding='utf-8'
html = req.text
soup = BeautifulSoup(html, 'lxml')
pic_links=soup.select(tag)
print(f"pic link nums:{len(pic_links)}")
for pic in pic_links:
urlretrieve(pic,restore)
else:
print(f"not normal req with code:{req.status_code}")
raise RuntimeError()
```
#### File: JoeyWord/common_op/test_flask.py
```python
from flask import Flask
app = Flask(__name__)
@app.route('/')
def test():
return "hello flask"
if __name__ == '__main__':
app.run('0.0.0.0',8080)
```
#### File: JoeyWord/common_op/vetibe.py
```python
import numpy as np
def vertibeAlgth(obs,transform_prob,gen_prob,start_prob):
"""
compute the most suitable route to construct the obs
:param obs: generate the observation sequence
:param transform_prob: the prob which is from i state to j state,i,j belong the number of states number format is array with two same dimension
:param gen_prob: generate the obs prob.array with dim1 is same to state number and dim2 is same to obs value
:param start_prob: origin state prob with length same to the length of states
:return: list with best route
"""
n_states = transform_prob.shape[0]
n_obs = len(obs)
obs_res = {}
index = 0
for x in obs:
if x not in obs_res:
obs_res[x] = index
index += 1
cache_prob_state = {}
prob_0 = []
state_start = [0 for _ in range(n_states)]
best_route = [None]*n_obs
for i in range(n_states):
prob_0.append(gen_prob[i,obs_res[obs[0]]]*start_prob[i])
cache_prob_state[0] = (prob_0,state_start)
for t in range(1,n_obs-1):
index_obs = obs_res[obs[t]]
prob_t = [];prev_state_chooses = []
for cur_state in range(n_states):
prob_cur_t = []
for prev_state in range(n_states):
tmp = cache_prob_state[t - 1][0][cur_state] * transform_prob[prev_state, cur_state]
prob_cur_t.append(tmp*gen_prob[cur_state,index_obs])
prob_cur = max(prob_cur_t)
prev_state_choose = np.argmax(prob_cur_t)
prob_t.append(prob_cur)
prev_state_chooses.append(prev_state_choose)
cache_prob_state[t] = (prob_t,prev_state_chooses)
prob_last = [];state_last = []
obs_last = obs_res[obs[n_obs - 1]]
for s in range(n_states):
prob_last_s = max(cache_prob_state[n_obs-2][0][s]*transform_prob[:,s]*gen_prob[s,obs_last])
state_last_s = np.argmax(cache_prob_state[n_obs-2][0][s]*transform_prob[:,s])
prob_last.append(prob_last_s)
state_last.append(state_last_s)
cache_prob_state[n_obs-1] = (prob_last,state_last)
p_start = max(prob_last)
last_state = np.argmax(prob_last)
best_route[n_obs-1] = last_state
i = n_obs-1
while(i >= 1):
prev_state = cache_prob_state[i][1][last_state]
#route = last_state
best_route[i-1] = prev_state
last_state = prev_state
i -= 1
return best_route
if __name__ == '__main__':
obs = ['红','白','红']
A = np.array([[0.5,0.2,0.3],[0.3,0.5,0.2],[0.2,0.3,0.5]])
B = np.array([[0.5,0.5],[0.4,0.6],[0.7,0.3]])
pi = [0.2,0.4,0.4]
best_route = vertibeAlgth(obs,A,B,pi)
print(best_route)
``` |
{
"source": "JoeyWord/mlp_encoder",
"score": 3
} |
#### File: JoeyWord/mlp_encoder/data_manager.py
```python
from tensorflow.examples.tutorials.mnist import input_data
from numpy.random import random
class DataManager():
def __init__(self,mnist_path,config={}):
self.image = config.get("image")
self.image_size = None
self.channel = None
self.mnist = input_data.read_data_sets(mnist_path,one_hot=True)
self.validation_labels = self.mnist.validation.labels
self.test_labels = self.mnist.test.labels
if not self.image:
self.validation_images = self.mnist.validation.images
self.test_images = self.mnist.test.images
else:
self.image_size = config['image_size']
self.channel = config['channel']
valid_size = self.mnist.validation.images.shape[0]
test_size = self.mnist.test.images.shape[0]
self.validation_images = self.mnist.validation.images.reshape([valid_size,self.image_size,self.image_size,self.channel])
self.test_images = self.mnist.test.images.reshape([test_size,self.image_size,self.image_size,self.channel])
def iterbatch(self,batch_size):
batch_ls = [self.mnist.train.next_batch(batch_size) for _ in random(100)]
for images,labels in batch_ls:
if not self.image:
yield images,labels
else:
if self.image_size == 28:
images_rs = images.reshape([batch_size,self.image_size,self.image_size,self.channel])
yield images_rs,labels
else:
raise ValueError("no suitable image_size for train_data reshape")
```
#### File: JoeyWord/mlp_encoder/tensor_tes.py
```python
import tensorflow as tf
from numpy.random import RandomState
def playGame(layers,lr=0.02,batch_size=100,loss_type=0):
x_ = tf.placeholder(dtype=tf.float32,shape=[None,2],name="input_x")
y_ = tf.placeholder(tf.float32,[None,1],name="input_y")
w_dict = {};b_dict = {}
input_shape = 2
output_shape = 1
for layer in layers:
dimension_shape = [input_shape,layers[layer]]
w_dict[layer],b_dict[layer] = get_var(dimension_shape,0.01)
#w_dict[layer] = tf.Variable(tf.random_normal([input_shape,layers[layer]]),name='w_' + layer)
#b_dict[layer] = tf.Variable(tf.zeros(layers[layer],name='b_' + layer))
input_shape = layers[layer]
hidden_out = hidden(x_,w_dict,b_dict)
w_dict['output'] = tf.Variable(tf.random_normal([input_shape,output_shape]),name='w_output')
b_dict['bias'] = tf.Variable(tf.zeros(output_shape),name='b_output')
output = tf.nn.sigmoid(tf.matmul(hidden_out,w_dict['output'])+b_dict['bias'])
if loss_type == 0: #0:represent the base cross_entroy loss
loss = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(output,0.001,0.999)) + (1-y_) * tf.log(1-tf.clip_by_value(output,0.001,0.999)))
train_op = tf.train.AdamOptimizer(lr).minimize(loss)
else:
loss = tf.nn.softmax_cross_entropy_with_logits(labels=y_,logits=output)
global_step = tf.Variable(0.0,name='global_step')
#cross by the decay expotienal to change learning_rate with global_step zoom up
learning_rate = tf.train.exponential_decay(lr,global_step,decay_steps=100,decay_rate=0.99)
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
tf.add_to_collection('losses',loss)
loss_all = tf.add_n(tf.get_collection('losses'))
rdm = RandomState(1)
X = rdm.randn(1000,2)
Y = [[float(int(x1 + x2 +rdm.randint(-10,10)/batch_size < 2))] for x1,x2 in X]
init = tf.global_variables_initializer()
with tf.Session(graph=tf.get_default_graph()) as sess:
sess.run(init)
for i in range(100):
batch_start = i*batch_size % X.shape[0]
batch_end = min(batch_start + batch_size,X.shape[0])
w_res,b_res,loss_value,_ = sess.run([w_dict,b_dict,loss_all,train_op],feed_dict={x_:X[batch_start:batch_end],y_:Y[batch_start:batch_end]})
print("iteration {} loss={}".format(i,loss_value))
for layer in w_res:
print("layer {} weight update:\n{}".format(layer,w_res[layer]))
def hidden(input_x,w,b):
input_layer = input_x
for layer in w:
input_layer = tf.nn.relu(tf.matmul(input_layer,w[layer])+b[layer])
return input_layer
def get_var(dimension_shape,lambda_param):
w = tf.Variable(tf.random_normal(shape=dimension_shape))
b = tf.Variable(tf.zeros(dimension_shape[-1]))
tf.add_to_collection("losses",tf.contrib.layers.l2_regularizer(lambda_param)(w))
return w,b
def move_averge(sess,decay,init,name='move_avg'):
var = tf.Variable(0,dtype=tf.int32,name=name)
step = tf.Variable(0,trainable=False)
exp_avg = tf.train.ExponentialMovingAverage(decay,num_updates=step)
move_average_op = exp_avg.apply([var])
sess.run(init)
sess.run(move_average_op)
step,v,exp_avg_val = sess.run([step,var,exp_avg.average(var)])
print("initial current step {} variable value={} and exp_avg value={}".format(step,v,exp_avg_val))
sess.run([tf.assign(step,100),tf.assign(var,5)])
sess.run(move_average_op)
step,v,exp_avg_val = sess.run([step,var,exp_avg.average(var)])
print("update current step {} variable value={} and exp_avg value={}".format(step,v,exp_avg_val))
if __name__ == '__main__':
hidden_layers = {"layer1":2,"layer2":3,"layer3":3}
playGame(hidden_layers)
``` |
{
"source": "joeyworld/algo",
"score": 4
} |
#### File: datastructure/queue/boj_10845.py
```python
import sys
queue = {
'front': 0,
'back': 0
}
def push(X):
queue[queue['back']] = X
queue['back'] += 1
def pop():
if size() == 0:
return -1
element = queue[queue['front']]
del queue[queue['front']]
queue['front'] += 1
return element
def size():
return queue['back'] - queue['front']
def empty():
return 1 if size() == 0 else 0
def front():
if size() == 0:
return - 1
return queue[queue['front']]
def back():
if size() == 0:
return - 1
return queue[queue['back'] - 1]
def operate(command):
action = command[0]
if action == 'push':
push(int(command[1]))
elif action == 'pop':
print(pop())
elif action == 'size':
print(size())
elif action == 'empty':
print(empty())
elif action == 'front':
print(front())
elif action == 'back':
print(back())
def main():
N = int(sys.stdin.readline())
for _ in range(N):
command = sys.stdin.readline().rstrip().split(' ')
operate(command)
if __name__ == '__main__':
main()
```
#### File: datastructure/stack/boj_10828.py
```python
stack = []
top = -1
def push(element):
global stack
global top
stack.append(element)
top += 1
def pop():
global stack
global top
if top == -1:
return top
else:
element = stack[top]
del stack[top]
top -= 1
return element
def operate(command):
global stack
global top
action = command[0]
if action == 'push':
value = int(command[1])
push(value)
elif action == 'pop':
print(pop())
elif action == 'top':
print(-1 if top == -1 else stack[top])
elif action == 'size':
print(top + 1)
elif action == 'empty':
print(1 if top == -1 else 0)
def main():
N = int(input())
for _ in range(N):
command = input().split(' ')
operate(command)
if __name__ == '__main__':
main()
```
#### File: datastructure/stack/boj_2504.py
```python
def solve(sentence):
stack = []
top = -1
depth = 0
total = []
for element in sentence:
if element == '(':
stack.append(2)
top += 1
elif element == '[':
stack.append(3)
top += 1
else:
if top == -1:
return 0
if element == ')' and stack[top] != 2:
return 0
if element == ']' and stack[top] != 3:
return 0
else:
pair = stack[top]
del stack[top]
top -= 1
# TODO multiplication or addition ?
current_depth = top + 1
if current_depth >= depth:
product = 1
for i in range(current_depth):
product *= stack[i]
# print(pair, product)
total.append(pair * product)
depth = current_depth
# print(stack, total, top, depth)
if top != -1:
return 0
else:
return sum(total)
def main():
sentence = input()
print(solve(sentence))
if __name__ == '__main__':
main()
```
#### File: src/divide_conquer/boj_1074.py
```python
def solve(x, y):
# base case
x = int(x)
y = int(y)
if x < 2 and y < 2:
return 2 * x + y
e = 2
while True:
if x // e == 0 and y // e == 0:
e /= 2
break
else:
e *= 2
return int(2 * e * e * (x // e) + e * e * (y // e)) + (solve(x % e, y % e))
if __name__ == '__main__':
N, r, c = input().split(' ')
print(solve(r, c))
```
#### File: src/dynamic_programming/boj_10844.py
```python
previous_answers = [1, 2, 2, 2, 2, 2, 2, 2, 2, 1]
next_answers = [1] * 10
def solve(n):
if n == 1:
return
elif n == 2:
next_answers[:] = previous_answers
return
for i in range(3, n + 1):
next_answers[0] = (previous_answers[1]) % 1000000000
for j in range(1, 9):
next_answers[j] = (((previous_answers[j - 1]) % 1000000000) +
(previous_answers[j + 1] % 1000000000)) % 1000000000
next_answers[9] = (previous_answers[8]) % 1000000000
previous_answers[:] = next_answers
if __name__ == '__main__':
N = int(input())
solve(N)
print(sum(next_answers[1:]) % 1000000000)
```
#### File: src/dynamic_programming/boj_11053.py
```python
def find_ideal_pos(sorted_list, begin, end, element):
mid = (begin + end) // 2
if element <= sorted_list[begin]:
return begin
if element >= sorted_list[end]:
return end
if end - begin == 1:
return end
if sorted_list[mid - 1] < element <= sorted_list[mid]:
return mid
elif sorted_list[mid] < element:
return find_ideal_pos(sorted_list, mid, end, element)
else:
return find_ideal_pos(sorted_list, begin, mid, element)
def find_lis(given, length):
lis = [given[0]]
top = given[0]
lis_length = 1
for i in range(1, length):
if top < given[i]:
top = given[i]
lis.append(given[i])
lis_length += 1
else:
ideal_pos = find_ideal_pos(lis, 0, lis_length - 1, given[i])
lis[ideal_pos] = given[i]
top = lis[lis_length - 1]
return lis_length
def main():
N = int(input())
input_sequence = input().split(' ')
input_sequence = [int(element) for element in input_sequence]
print(find_lis(input_sequence, N))
if __name__ == '__main__':
main()
```
#### File: src/dynamic_programming/boj_2156.py
```python
previous = [[0 for _ in range(10000)] for _ in range(3)]
# 다음 두개다 / 앞에꺼만 / 뒤에꺼만
def solve(wine, n, total):
if n == total:
previous[0][n - 1] = wine[n - 1]
previous[1][n - 1] = wine[n - 1]
previous[2][n - 1] = 0
return
if n == total - 1:
previous[0][n - 1] = wine[n - 1] + wine[n]
previous[1][n - 1] = wine[n - 1]
previous[2][n - 1] = wine[n]
return
if n == total - 2:
previous[0][n - 1] = wine[n - 1] + wine[n]
previous[1][n - 1] = wine[n - 1] + wine[n + 1]
previous[2][n - 1] = wine[n] + wine[n + 1]
return
previous[0][n - 1] = wine[n - 1] + wine[n] + \
max(previous[0][n + 2], previous[1][n + 2], previous[2][n + 2])
previous[1][n - 1] = wine[n - 1] + \
max(previous[0][n + 1], previous[1][n + 1], previous[2][n + 1])
previous[2][n - 1] = max(previous[0][n],
previous[1][n], previous[2][n])
if __name__ == '__main__':
n = int(input())
wine = [int(input()) for _ in range(n)]
for i in range(n):
solve(wine, (n - i), n)
print(max(previous[0][0], previous[1][0], previous[2][0]))
```
#### File: src/dynamic_programming/boj_2579.py
```python
previous_scores = [0] * 301
def solve(num_stairs, points):
if num_stairs < 3:
print(previous_scores[num_stairs])
return
else:
return points[num_stairs - 1] + solve(num_stairs - 1)
if __name__ == '__main__':
num_stairs = int(input())
points = [int(input()) for _ in range(num_stairs)]
previous_scores[1] = points[0]
if num_stairs > 1:
previous_scores[2] = points[0] + points[1]
print()
print(points)
solve(num_stairs, points)
```
#### File: src/dynamic_programming/hkr_fibonacci_modified.py
```python
def solve(t1, t2, n):
dp = [0] * 21
dp[0] = t1
dp[1] = t2
for i in range(2, n):
dp[i] = dp[i - 1] ** 2 + dp[i - 2]
return dp[n - 1]
if __name__ == '__main__':
print(solve(*list(map(int, input().split()))))
```
#### File: src/implementation/boj_2448.py
```python
import sys
stars = [[0] * 6143 for _ in range(3072)]
def find_star(x, y, size):
if size <= 3:
stars[y][x] = 1
stars[y + 1][x - 1] = 1
stars[y + 1][x + 1] = 1
stars[y + 2][x + 2] = 1
stars[y + 2][x + 1] = 1
stars[y + 2][x] = 1
stars[y + 2][x - 1] = 1
stars[y + 2][x - 2] = 1
else:
size //= 2
find_star(x, y, size)
find_star(x - size, y + size, size)
find_star(x + size, y + size, size)
def print_star(n):
vertical = n
horizontal = 2 * n - 1
for i in range(vertical):
for j in range(horizontal):
if stars[i][j] == 1:
sys.stdout.write('*')
else:
sys.stdout.write(' ')
sys.stdout.write('\n')
if __name__ == '__main__':
N = int(input())
find_star(N - 1, 0, N)
print_star(N)
```
#### File: src/implementation/hkr_organize_containers.py
```python
def solve(rows, cols):
rows = sorted(rows)
cols = sorted(cols)
return 'Possible' if rows == cols else 'Impossible'
if __name__ == '__main__':
q = int(input())
for _ in range(q):
n = int(input())
rows = [0] * n
cols = [0] * n
for i in range(n):
for j, k in enumerate(list(map(int, input().split()))):
cols[i] += k
rows[j] += k
print(solve(rows, cols))
```
#### File: src/pattern-searching/boj_10250.py
```python
def main():
H, W, N = input().split(' ')
H = int(H)
W = int(W)
N = int(N)
floor = H if N % H == 0 else N % H
room = (N - 1) // H + 1
print(floor * 100 + room)
if __name__ == '__main__':
[main() for _ in range(int(input()))]
```
#### File: src/pattern-searching/boj_2775.py
```python
def solve(floor, room):
pass
if __name__ == '__main__':
num_tc = int(input())
for _ in range(num_tc):
k = int(input())
n = int(input())
print(solve(k, n))
```
#### File: src/searching/boj_1316.py
```python
def solve(num_words, words):
check_result = [word for word in words if check_group(word) is True]
return len(check_result)
def check_group(word):
n = len(word)
prev = [word[0]]
for i in range(n - 1):
if word[i] != word[i + 1]:
if word[i + 1] in prev:
return False
prev.append(word[i + 1])
return True
if __name__ == '__main__':
num_words = int(input())
words = [input() for _ in range(num_words)]
print(solve(num_words, words))
```
#### File: src/sorting/boj_10825.py
```python
def solve():
pass
if __name__ == '__main__':
num_students = int(input())
students_info = list()
for i in range(num_students):
pass
```
#### File: src/sorting/hkr_sock_merchant.py
```python
from collections import Counter
# Complete the sockMerchant function below.
def sockMerchant_without_counter(n, ar):
valid = 0
socks = dict.fromkeys(ar, 0)
for value in ar:
socks[value] += 1
for value in socks.values():
valid += value // 2
return valid
def sockMerchant(n, ar):
valid = 0
cnt = Counter(ar)
for value in cnt.values():
valid += value // 2
return valid
if __name__ == '__main__':
n = int(input())
ar = list(map(int, input().rstrip().split()))
result = sockMerchant_without_counter(n, ar)
print(result)
```
#### File: src/strings/hkr_sherlock_valid_string.py
```python
import os
from collections import Counter
def isValid(s):
c = Counter(s).most_common()
length = len(c)
diff_cnt = 0
for i in range(length - 1):
_, prev_cnt = c[i]
_, next_cnt = c[i + 1]
diff = prev_cnt - next_cnt
if prev_cnt - next_cnt != 0:
if diff == 1:
if i == 0 or i == (length - 2):
diff_cnt += 1
else:
return 'NO'
elif diff > 0:
if next_cnt == 1 and i == (length - 2):
diff_cnt += 1
else:
return 'NO'
else:
return 'NO'
return 'YES' if diff_cnt <= 1 else 'NO'
if __name__ == '__main__':
s = input()
result = isValid(s)
print(result)
```
#### File: src/strings/hkr_whats_next.py
```python
def solve(n, arr):
n1 = arr[n - 1]
if (n - 1) % 2 == 0:
# 마지막이 ....1111
if n - 2 < 0:
arr = [1, 1, n1 - 1]
else:
n2 = arr[n - 2]
del arr[n - 1]
del arr[n - 2]
arr += [n2 - 1, 1, 1, n1 - 1]
else:
n2 = arr[n - 2]
# 마지막이 ....0000
if n - 3 < 0:
del arr[n - 1]
del arr[n - 2]
if n2 == 1:
arr = [n2, n1 + 1]
else:
arr = [1, n1 + 1, n2 - 1]
else:
n3 = arr[n - 3]
del arr[n - 1]
del arr[n - 2]
del arr[n - 3]
arr += [n3 - 1, 1, n1 + 1, n2 - 1]
idx = 0
n = len(arr)
while True:
if idx == n:
break
if arr[idx] == 0:
if idx == n - 1:
del arr[idx]
break
else:
arr[idx - 1] += arr[idx + 1]
del arr[idx + 1]
del arr[idx]
n -= 2
else:
idx += 1
return arr
if __name__ == '__main__':
T = int(input())
for _ in range(T):
n = int(input())
arr = list(map(int, input().split()))
res = solve(n, arr)
print(len(res))
print(*res)
``` |
{
"source": "joeyworld/patienceforest-user-api",
"score": 3
} |
#### File: tests/api/test_users_api.py
```python
import unittest
from django.test import TestCase
from rest_framework.test import APIRequestFactory, force_authenticate
from apps.users.models import User
from api.users.views import UserView
class LoginViewTest(TestCase):
def setUp(self):
self.email = '<EMAIL>'
self.username = 'John Doe'
self.password = '<PASSWORD>'
User.objects.create_user(
email=self.email,
password=<PASSWORD>,
username=self.username
)
self.request = APIRequestFactory()
self.view = UserView.as_view()
self.request_body = {
'email': self.email,
'username': self.username,
'password': <PASSWORD>
}
self.response = lambda: self.view(
self.request.post(
path='/api/users/login/',
data=self.request_body,
format='json'
)
)
@unittest.skip('Test not fully implemented')
def test_successful_login(self):
response = self.response()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['username'], self.username)
self.assertIsNotNone(response['token'])
@unittest.skip('Test not fully implemented')
def test_login_failure_email_not_provided(self):
del self.request_body['email']
self.assertEqual(self.response().status_code, 400)
@unittest.skip('Test not fully implemented')
def test_login_failure_incorrect_email_type(self):
self.request_body['email'] = 'incorrect_email'
self.assertEqual(self.response().status_code, 400)
@unittest.skip('Test not fully implemented')
def test_login_failure_username_not_provided(self):
del self.request_body['username']
self.assertEqual(self.response().status_code, 400)
@unittest.skip('Test not fully implemented')
def test_login_failure_password_not_provided(self):
del self.request_body['password']
self.assertEqual(self.response().status_code, 400)
``` |
{
"source": "joeyyin123/wordcount-project",
"score": 2
} |
#### File: wordcount-project/wordcount/views.py
```python
from django.http import HttpResponse
from django.shortcuts import render
import operator
def homepage(request):
#return HttpResponse('hello')
return render(request, 'home.html',{'Hithere': 'this is me'})
def eggs(request):
#return HttpResponse('Eggs are great!')
return HttpResponse('<h1>eggs are great</h1>')
def count(request):
fulltext = request.GET['fulltext']
wordlist = fulltext.split()
#print(fulltext)
worddictionary = {}
for word in wordlist:
if word in worddictionary:
#increase
worddictionary[word] += 1
else:
#add to the worddictionary
worddictionary[word] = 1
sortedwords = sorted(worddictionary.items(), key=operator.itemgetter(1), reverse=True)
#return render(request, 'count.html',{'fulltext': fulltext,'count': len(wordlist),'worddictionary': worddictionary.items()})
return render(request, 'count.html',{'fulltext': fulltext,'count': len(wordlist),'sortedwords': sortedwords})
def about(request):
return render(request, 'about.html')
``` |
{
"source": "JoeyYoung/sound_localization",
"score": 3
} |
#### File: JoeyYoung/sound_localization/game.py
```python
import numpy as np
import collections
import math
import pickle
from walker import Walker
"""
env computing, reward computing
game play settings
"""
class Game:
def __init__(self):
self.n_features = 366
self.n_actions = 8
self.max_epoch = 100
self.max_steps = 100
# define sound source information
self.src_pos_x = -3.0
self.src_pos_y = 1.6
self.src_pos_z = -3.0
# sample as a grid map with 0.5m unit
self.unit = 0.5
self.room_grids = [i for i in np.arange(-3.5, 3.5 + self.unit, self.unit)]
self.walker = Walker(self.n_features, self.n_actions)
def detect_invalids(self, x, y, z):
invalids = []
if x == 3.5:
invalids.append(self.walker.action_labels.index('90'))
invalids.append(self.walker.action_labels.index('45'))
invalids.append(self.walker.action_labels.index('135'))
if x == -3.5:
invalids.append(self.walker.action_labels.index('270'))
invalids.append(self.walker.action_labels.index('225'))
invalids.append(self.walker.action_labels.index('315'))
if z == 3.5:
invalids.append(self.walker.action_labels.index('180'))
invalids.append(self.walker.action_labels.index('135'))
invalids.append(self.walker.action_labels.index('225'))
if z == -3.5:
invalids.append(self.walker.action_labels.index('0'))
invalids.append(self.walker.action_labels.index('315'))
invalids.append(self.walker.action_labels.index('45'))
obstable_x = [-1, -0.5, 0, 0.5, 1]
obstable_z = [-1, -0.5, 0, 0.5, 1]
if x == 1.5 and z == 1.5:
invalids.append(self.walker.action_labels.index('315'))
elif x == 1.5 and z == 1:
invalids.append(self.walker.action_labels.index('315'))
invalids.append(self.walker.action_labels.index('270'))
elif x == 1.5 and z in np.arange(-0.5, 1, 0.5):
invalids.append(self.walker.action_labels.index('315'))
invalids.append(self.walker.action_labels.index('270'))
invalids.append(self.walker.action_labels.index('225'))
elif x == 1.5 and z == -1:
invalids.append(self.walker.action_labels.index('225'))
invalids.append(self.walker.action_labels.index('270'))
elif x == 1.5 and z == -1.5:
invalids.append(self.walker.action_labels.index('225'))
elif x == 1 and z == -1.5:
invalids.append(self.walker.action_labels.index('225'))
invalids.append(self.walker.action_labels.index('180'))
elif x in np.arange(-0.5, 1, 0.5) and z == -1.5:
invalids.append(self.walker.action_labels.index('225'))
invalids.append(self.walker.action_labels.index('180'))
invalids.append(self.walker.action_labels.index('135'))
elif x == -1 and z == -1.5:
invalids.append(self.walker.action_labels.index('180'))
invalids.append(self.walker.action_labels.index('135'))
elif x == -1.5 and z == -1.5:
invalids.append(self.walker.action_labels.index('135'))
elif x == -1.5 and z == -1:
invalids.append(self.walker.action_labels.index('90'))
invalids.append(self.walker.action_labels.index('135'))
elif x == -1.5 and z in np.arange(-0.5, 1, 0.5):
invalids.append(self.walker.action_labels.index('90'))
invalids.append(self.walker.action_labels.index('135'))
invalids.append(self.walker.action_labels.index('45'))
elif x == -1.5 and z == 1:
invalids.append(self.walker.action_labels.index('45'))
invalids.append(self.walker.action_labels.index('90'))
elif x == -1.5 and z == 1.5:
invalids.append(self.walker.action_labels.index('45'))
elif x == -1 and z == 1.5:
invalids.append(self.walker.action_labels.index('0'))
invalids.append(self.walker.action_labels.index('45'))
elif x in np.arange(-0.5, 1, 0.5) and z == 1.5:
invalids.append(self.walker.action_labels.index('315'))
invalids.append(self.walker.action_labels.index('0'))
invalids.append(self.walker.action_labels.index('45'))
elif x == 1 and z == 1.5:
invalids.append(self.walker.action_labels.index('315'))
invalids.append(self.walker.action_labels.index('0'))
# todo, abstract an obstacle
return invalids
def play(self):
records_step = []
records_r = []
for epoch in range(self.max_epoch):
print("========== Epoch %d ======" % epoch)
memory = collections.defaultdict(dict)
visit = {}
for i in self.room_grids:
for j in self.room_grids:
visit[str(i) + "*" + str(j)] = 0
for k in self.walker.action_labels:
memory[str(i) + "*" + str(j)][k] = 0
# init walker position
# fixme, random choose
self.walker.reset_walker_pos(3.0, 1, 3.0)
DONE = False
sum_reward = 0.0
a_his = None
for step in range(self.max_steps):
s = self.walker.observe_gcc_vector(self.walker.pos_x, self.walker.pos_y, self.walker.pos_z)
s = np.array(s)[np.newaxis, :]
# fixme, use grids to detect
# fixme, cut action space
invalids = self.detect_invalids(self.walker.pos_x, self.walker.pos_y, self.walker.pos_z)
pos_key = str(self.walker.pos_x) + "*" + str(self.walker.pos_z)
for i in memory[pos_key].keys():
if memory[pos_key][i] >= 2:
invalids.append(self.walker.action_labels.index(i))
visit[pos_key] += 1
a, p = self.walker.choose_action(s, invalids)
# step next state
direction = self.walker.action_labels[a]
# fixme, for the first step, give more obs, argmax
if step == 0:
fe = open('first_obs.pkl', 'rb')
obs = pickle.load(fe)
s_r = obs['right']
s_r = np.array(s_r)[np.newaxis, :]
a_r, p_r = self.walker.choose_action(s_r, [])
p_rr = [p_r[len(p_r) - 2], p_r[len(p_r) - 1]]
p_rr = np.append(p_rr, p_r[:len(p_r) - 2])
s_l = obs['left']
s_l = np.array(s_l)[np.newaxis, :]
a_l, p_l = self.walker.choose_action(s_l, [])
p_ll = [p_l[0], p_l[1]]
p_ll = np.append(p_l[2:], p_ll)
s_d = obs['down']
s_d = np.array(s_d)[np.newaxis, :]
a_d, p_d = self.walker.choose_action(s_d, [])
p_dd = [p_d[len(p_d) - 4], p_d[len(p_d) - 3], p_d[len(p_d) - 2], p_d[len(p_d) - 1]]
p_dd = np.append(p_dd, p_d[:len(p_d) - 4])
# fixme, define first step based on obs, do argmax
p_mix = [0] * self.n_actions
for i in range(self.n_actions):
if i not in invalids:
p_mix[i] = p[i] + p_rr[i] + p_ll[i] + p_dd[i]
p_mix = np.array(p_mix)
p_mix /= p_mix.sum()
a_mix = np.argmax(p_mix)
fe.close()
a = a_mix
a_his = a
p = p_mix
direction = self.walker.action_labels[a]
# if epoch == 20:
# print(p)
# print(direction)
memory[pos_key][direction] += 1
if direction == '0':
self.walker.reset_walker_pos(self.walker.pos_x, self.walker.pos_y,
self.walker.pos_z - self.unit)
elif direction == '45':
self.walker.reset_walker_pos(self.walker.pos_x + self.unit, self.walker.pos_y,
self.walker.pos_z - self.unit)
elif direction == '90':
self.walker.reset_walker_pos(self.walker.pos_x + self.unit, self.walker.pos_y,
self.walker.pos_z)
elif direction == '135':
self.walker.reset_walker_pos(self.walker.pos_x + self.unit, self.walker.pos_y,
self.walker.pos_z + self.unit)
elif direction == '180':
self.walker.reset_walker_pos(self.walker.pos_x, self.walker.pos_y,
self.walker.pos_z + self.unit)
elif direction == '225':
self.walker.reset_walker_pos(self.walker.pos_x - self.unit, self.walker.pos_y,
self.walker.pos_z + self.unit)
elif direction == '270':
self.walker.reset_walker_pos(self.walker.pos_x - self.unit, self.walker.pos_y,
self.walker.pos_z)
elif direction == '315':
self.walker.reset_walker_pos(self.walker.pos_x - self.unit, self.walker.pos_y,
self.walker.pos_z - self.unit)
# fixme, don't have s_ when get source
if self.walker.pos_x == self.src_pos_x and self.walker.pos_z == self.src_pos_z:
print("get source")
DONE = True
r = 5
s_ = np.array([0 for u in range(self.n_features)])[np.newaxis, :]
else:
# fixme, rebuild reward function
# r = self.walker.observe_volume(self.walker.pos_x, self.walker.pos_y, self.walker.pos_z)
# r = 0
pos_key = str(self.walker.pos_x) + "*" + str(self.walker.pos_z)
# r /= (visit[pos_key] + 1)
max_angle = max(float(self.walker.action_labels[a]), float(self.walker.action_labels[a_his]))
min_angle = min(float(self.walker.action_labels[a]), float(self.walker.action_labels[a_his]))
diff = min(abs(max_angle - min_angle), 360 - max_angle + min_angle)
# r = 1 - abs((a + a_his) % self.n_actions - a_his) / (self.n_actions - 1)
r = 1 - diff / 180
r -= (visit[pos_key]) * 0.2
# todo, think about punishment
pub = self.detect_invalids(self.walker.pos_x, self.walker.pos_y, self.walker.pos_z)
if len(pub) > 0:
r -= 0.5
s_ = self.walker.observe_gcc_vector(self.walker.pos_x, self.walker.pos_y, self.walker.pos_z)
s_ = np.array(s_)[np.newaxis, :]
sum_reward += r
a_his = a
self.walker.learn(s, a, s_, r)
if DONE:
break
# fixme, think about a new way to evaluate
print(step)
print(sum_reward / step)
records_step.append(step)
records_r.append(sum_reward / step)
# overload now
if epoch % 500 == 0 and epoch != 0:
with open('save/rl_8x3x8_src_-3_1.6_-3/records_step', 'w') as f:
f.write(str(records_step))
with open('save/rl_8x3x8_src_-3_1.6_-3/records_reward', 'w') as f:
f.write(str(records_r))
if __name__ == '__main__':
game = Game()
# game.detect_invalids(1, 1, 1)
game.play()
```
#### File: JoeyYoung/sound_localization/generateGcc.py
```python
import numpy as np
import math
import pickle
import wave
import collections
import os
import random
import copy
import sys
import matplotlib.pyplot as plt
'''
This function computes the offset between the signal sig and the reference signal refsig
using the Generalized Cross Correlation - Phase Transform (GCC-PHAT)method.
'''
def gcc_phat(sig, refsig, fs=1, max_tau=None, interp=1):
if isinstance(sig, list):
sig = np.array(sig)
if isinstance(refsig, list):
refsig = np.array(refsig)
# make sure the length for the FFT is larger or equal than len(sig) + len(refsig)
n = sig.shape[0] + refsig.shape[0]
# Generalized Cross Correlation Phase Transform
SIG = np.fft.rfft(sig, n=n)
REFSIG = np.fft.rfft(refsig, n=n)
R = SIG * np.conj(REFSIG)
cc = np.fft.irfft(R / np.abs(R), n=(interp * n))
max_shift = int(interp * n / 2)
if max_tau:
max_shift = np.minimum(int(interp * fs * max_tau), max_shift)
cc = np.concatenate((cc[-max_shift:], cc[:max_shift + 1]))
# find max cross correlation index
shift = np.argmax(np.abs(cc)) - max_shift
tau = shift # / float(interp * fs) * 340
return tau, cc
"""
walk all fires in input dir:
collections: pos -> [gcc features, label]
meet new walker pos: read 4 mics, process; read label, process (01 vector)
meet old walker pos: continue
store ifo to file in the format : [[features], [label]]
...
[[features], [label]]
note: output file has property (room, source pos)
"""
# 已经把几号位的麦克风和信道对应好了
def generate_gcc_real(input_dir, output_dir, output_file):
res = collections.defaultdict(list)
gcc_width_half = 30
# the whole vector length is 61
files = os.listdir(input_dir)
for file in files:
# skip dir
name = str(file.title()).lower()
if os.path.isdir(file) or name[:2] != 'u_':
continue
file_names = name.split('_')
pos = file_names[2] + "_" + file_names[3] + "_" + file_names[5]
# meet old walker pos
if res.get(pos) is not None:
continue
else:
temp = int(file_names[5])
index_fill = int(temp / 45)
label = [0] * 8
label[index_fill] = 1
# read 4 mirs, compute features
min_len = 999999
fs = 0
# i indicates 几号位
for i in range(1, 5):
if i == 1:
j = 2
elif i == 2:
j = 4
elif i == 3:
j = 3
elif i == 4:
j = 1
else:
j = 0
mic = name[:len(name) - 5] + str(j) + ".wav"
wav = wave.open(os.path.join(input_dir, mic), 'rb')
n_frame = wav.getnframes()
fs = wav.getframerate()
data = np.frombuffer(wav.readframes(n_frame), dtype=np.short)
if len(data) < min_len:
min_len = len(data)
locals()['data%d' % i] = data
gcc_vector = []
for i in range(1, 5):
locals()['data%d' % i] = locals()['data%d' % i][:min_len]
for i in range(1, 5):
for j in range(i + 1, 5):
tau, cc = gcc_phat(locals()['data%d' % i], locals()['data%d' % j], fs)
for k in range(min_len - gcc_width_half, min_len + gcc_width_half + 1):
gcc_vector.append(cc[k])
res[pos] = [gcc_vector, label]
print(len(res.keys())) # 1088
# write into file
with open(os.path.join(output_dir, output_file), 'w') as f:
for k in res.keys():
print(res[k], file=f)
# collect real data using walker
def generate_gcc_deploy(input_dir, output_dir, output_file):
res = collections.defaultdict(list)
gcc_width_half = 30
# the whole vector length is 61
files = os.listdir(input_dir)
for file in files:
# skip dir
name = str(file.title()).lower()
if os.path.isdir(file) or name[:4] != 'real':
continue
file_names = name.split('_')
pos = file_names[1] + "_" + file_names[2]
# meet old walker pos
if res.get(pos) is not None:
continue
else:
temp = int(file_names[2])
index_fill = int(temp / 45)
label = [0] * 8
label[index_fill] = 1
# read 4 mirs, compute features
min_len = 999999
fs = 0
# i indicates 几号位
for i in range(1, 5):
mic = name[:len(name) - 5] + str(i) + ".wav"
wav = wave.open(os.path.join(input_dir, mic), 'rb')
n_frame = wav.getnframes()
fs = wav.getframerate()
data = np.frombuffer(wav.readframes(n_frame), dtype=np.short)
if len(data) < min_len:
min_len = len(data)
locals()['data%d' % i] = data
gcc_vector = []
for i in range(1, 5):
locals()['data%d' % i] = locals()['data%d' % i][:min_len]
for i in range(1, 5):
for j in range(i + 1, 5):
tau, cc = gcc_phat(locals()['data%d' % i], locals()['data%d' % j], fs)
for k in range(min_len - gcc_width_half, min_len + gcc_width_half + 1):
gcc_vector.append(cc[k])
res[pos] = [gcc_vector, label]
print(len(res.keys())) # 1088
# write into file
with open(os.path.join(output_dir, output_file), 'w') as f:
for k in res.keys():
print(res[k], file=f)
# collect real data using walker
def generate_gcc_bias(input_dir, output_dir, output_file):
res = collections.defaultdict(list)
gcc_width_half = 50
# the whole vector length is 61
files = os.listdir(input_dir)
for file in files:
# skip dir
name = str(file.title()).lower()
if os.path.isdir(file) or name[:4] != 'real':
continue
file_names = name.split('_')
pos = file_names[1] + "_" + file_names[2]
# meet old walker pos
if res.get(pos) is not None:
continue
else:
temp = int(file_names[2])
index_fill = int(temp / 45)
label = [0] * 8
label[index_fill] = 1
# read 4 mirs, compute features
min_len = 999999
fs = 0
# i indicates 几号位
for i in range(1, 5):
mic = name[:len(name) - 5] + str(i) + ".wav"
wav = wave.open(os.path.join(input_dir, mic), 'rb')
n_frame = wav.getnframes()
fs = wav.getframerate()
data = np.frombuffer(wav.readframes(n_frame), dtype=np.short)
if len(data) < min_len:
min_len = len(data)
locals()['data%d' % i] = data
gcc_vector = []
for i in range(1, 5):
locals()['data%d' % i] = locals()['data%d' % i][:min_len]
for i in range(1, 5):
for j in range(i + 1, 5):
tau, cc = gcc_phat(locals()['data%d' % i], locals()['data%d' % j], fs)
temp_gcc = []
for k in range(min_len - gcc_width_half, min_len + gcc_width_half + 1):
temp_gcc.append(cc[k])
gcc_vector.append(temp_gcc)
# add bias
pair1 = gcc_vector[0]
pair2 = gcc_vector[1]
pair3 = gcc_vector[2]
pair4 = gcc_vector[3]
pair5 = gcc_vector[4]
pair6 = gcc_vector[5]
center = int(len(pair1) / 2)
p1 = pair1[center - gcc_width_half:center + gcc_width_half]
p2 = pair2[center - gcc_width_half:center + gcc_width_half]
p3 = pair3[center - gcc_width_half:center + gcc_width_half]
p4 = pair4[center - gcc_width_half:center + gcc_width_half]
p5 = pair5[center - gcc_width_half:center + gcc_width_half]
p6 = pair6[center - gcc_width_half:center + gcc_width_half]
bias1 = list(p1).index(np.max(p1)) - gcc_width_half
bias2 = list(p2).index(np.max(p2)) - gcc_width_half
bias3 = list(p3).index(np.max(p3)) - gcc_width_half
bias4 = list(p4).index(np.max(p4)) - gcc_width_half
bias5 = list(p5).index(np.max(p5)) - gcc_width_half
bias6 = list(p6).index(np.max(p6)) - gcc_width_half
bias = [bias1, bias2, bias3, bias4, bias5, bias6]
res[pos] = [bias, label]
print(len(res.keys())) # 1088
# write into file
with open(os.path.join(output_dir, output_file), 'w') as f:
for k in res.keys():
print(res[k], file=f)
# rsc back simulated env
# data[1] represents right
def generate_gcc_simu_rscback(input_dir, output_dir, output_file):
res = collections.defaultdict(list)
gcc_width_half = 30
# the whole vector length is 61
files = os.listdir(input_dir)
for file in files:
# skip dir
name = str(file.title()).lower()
if os.path.isdir(file) or name[:3] != 'src':
continue
file_names = name.split('_')
pos = file_names[1] + "_" + file_names[2] + "_" + file_names[4]
# meet old walker pos
if res.get(pos) is not None:
continue
else:
temp = int(file_names[5])
index_fill = int(temp / 45)
label = [0] * 8
label[index_fill] = 1
# read 4 mirs, compute features
min_len = 999999
fs = 0
# i indicates 几号位, 需要选择不一样的wav文件的特定信道
for i in range(1, 5):
mic = name[:len(name) - 5] + str(i) + ".wav"
wav = wave.open(os.path.join(input_dir, mic), 'rb')
n_frame = wav.getnframes()
fs = wav.getframerate()
data = np.frombuffer(wav.readframes(n_frame), dtype=np.short)
if len(data) % 2 != 0:
data = np.append(data, 0)
data.shape = -1, 2
data = data.T
# data[0] represents right, data[1] represents left
data_pro = data[1]
# [(data[0][j] + data[1][j]) / 2 for j in range(len(data[0]))]
if len(data_pro) < min_len:
min_len = len(data_pro)
locals()['data%d' % i] = data_pro
gcc_vector = []
for i in range(1, 5):
locals()['data%d' % i] = locals()['data%d' % i][:min_len]
for i in range(1, 5):
for j in range(i + 1, 5):
tau, cc = gcc_phat(locals()['data%d' % i], locals()['data%d' % j], fs)
for k in range(min_len - gcc_width_half, min_len + gcc_width_half + 1):
gcc_vector.append(cc[k])
res[pos] = [gcc_vector, label]
print(len(res.keys())) # 1088
# write into file
with open(os.path.join(output_dir, output_file), 'w') as f:
for k in res.keys():
print(res[k], file=f)
def generate_gcc(input_dir, output_dir, output_file, average=True, vector=True, savepos=False):
res = collections.defaultdict(list)
gcc_width_half = 30
# the whole vector length is 61
files = os.listdir(input_dir)
for file in files:
# skip dir
name = str(file.title()).lower()
if os.path.isdir(file) or name[:6] != 'walker':
continue
file_names = name.split('_')
pos = file_names[1] + "_" + file_names[2] + "_" + file_names[3]
# meet old walker pos
if res.get(pos) is not None:
continue
else:
index_fill = int(int(file_names[4]) / 45)
label = [0] * 8
label[index_fill] = 1
# read 4 mirs, compute features
min_len = 999999
fs = 0
for i in range(1, 5):
mic = name[:len(name) - 5] + str(i) + ".wav"
wav = wave.open(os.path.join(input_dir, mic), 'rb')
n_frame = wav.getnframes()
fs = wav.getframerate()
if average is True:
data = np.frombuffer(wav.readframes(n_frame), dtype=np.short)
if len(data) % 2 != 0:
data = np.append(data, 0)
data.shape = -1, 2
data = data.T
data_avg = [(data[0][j] + data[1][j]) / 2 for j in range(len(data[0]))]
if len(data_avg) < min_len:
min_len = len(data_avg)
locals()['data%d' % i] = data_avg
else:
data = np.frombuffer(wav.readframes(n_frame), dtype=np.short)
if len(data) < min_len:
min_len = len(data)
locals()['data%d' % i] = data
gcc_offset = []
gcc_vector = []
for i in range(1, 5):
locals()['data%d' % i] = locals()['data%d' % i][:min_len]
if vector is True:
for i in range(1, 5):
for j in range(i + 1, 5):
tau, cc = gcc_phat(locals()['data%d' % i], locals()['data%d' % j], fs)
for k in range(min_len - gcc_width_half, min_len + gcc_width_half + 1):
gcc_vector.append(cc[k])
res[pos] = [gcc_vector, label]
else:
for i in range(1, 5):
for j in range(i + 1, 5):
tau, cc = gcc_phat(locals()['data%d' % i], locals()['data%d' % j], fs)
gcc_offset.append(tau)
res[pos] = [gcc_offset, label]
print(len(res.keys())) # 1088
# write into file
with open(os.path.join(output_dir, output_file), 'w') as f:
for k in res.keys():
print(res[k], file=f)
# fixme, save variable res into disk (binary)
if savepos is True:
disk = open('simu_r2.pkl', 'wb')
pickle.dump(res, disk)
disk.close()
def generate_srp(input_dir, output_dir, output_file, average=True, vector=True, savepos=False):
res = collections.defaultdict(list)
gcc_width_half = 60
# the whole vector length is 61
files = os.listdir(input_dir)
for file in files:
# skip dir
name = str(file.title()).lower()
if os.path.isdir(file) or name[:6] != 'walker':
continue
file_names = name.split('_')
pos = file_names[1] + "_" + file_names[2] + "_" + file_names[3]
# meet old walker pos
if res.get(pos) is not None:
continue
else:
index_fill = int(int(file_names[4]) / 45)
label = [0] * 8
label[index_fill] = 1
# read 4 mirs, compute features
min_len = 999999
fs = 0
for i in range(1, 5):
mic = name[:len(name) - 5] + str(i) + ".wav"
wav = wave.open(os.path.join(input_dir, mic), 'rb')
n_frame = wav.getnframes()
fs = wav.getframerate()
if average is True:
data = np.frombuffer(wav.readframes(n_frame), dtype=np.short)
if len(data) % 2 != 0:
data = np.append(data, 0)
data.shape = -1, 2
data = data.T
data_avg = [(data[0][j] + data[1][j]) / 2 for j in range(len(data[0]))]
if len(data_avg) < min_len:
min_len = len(data_avg)
locals()['data%d' % i] = data_avg
else:
data = np.frombuffer(wav.readframes(n_frame), dtype=np.short)
if len(data) < min_len:
min_len = len(data)
locals()['data%d' % i] = data
gcc_offset = []
gcc_vector = []
cc_srp = None
for i in range(1, 5):
locals()['data%d' % i] = locals()['data%d' % i][:min_len]
if vector is True:
for i in range(1, 5):
for j in range(1, 5):
tau, cc = gcc_phat(locals()['data%d' % i], locals()['data%d' % j], fs)
if cc_srp is None:
cc_srp = cc
else:
cc_srp += cc
for k in range(min_len - gcc_width_half, min_len + gcc_width_half + 1):
gcc_vector.append(cc_srp[k])
res[pos] = [gcc_vector, label]
else:
for i in range(1, 5):
for j in range(i + 1, 5):
tau, cc = gcc_phat(locals()['data%d' % i], locals()['data%d' % j], fs)
gcc_offset.append(tau)
res[pos] = [gcc_offset, label]
print(len(res.keys())) # 1088
# write into file
with open(os.path.join(output_dir, output_file), 'w') as f:
for k in res.keys():
print(res[k], file=f)
# fixme, save variable res into disk (binary)
if savepos is True:
disk = open('new.pkl', 'wb')
pickle.dump(res, disk)
disk.close()
def generate_gcc_binary(input_dir, output_dir, output_file):
room1_pos = [-2.0, 1.6, -1.0]
room2_pos = [2.0, 1.6, -1.0]
room3_pos = [-2.0, 1.6, 1.0]
room4_pos = [2.0, 1.6, 1.0]
room1_x = [i for i in np.arange(-3.5, 0, 0.5)]
room1_z = [i for i in np.arange(-4.5, -1, 0.5)]
room2_x = [i for i in np.arange(0.5, 4.0, 0.5)]
room2_z = [i for i in np.arange(-4.5, -1, 0.5)]
room3_x = [i for i in np.arange(-3.5, 0, 0.5)]
room3_z = [i for i in np.arange(1.5, 5.0, 0.5)]
room4_x = [i for i in np.arange(0.5, 4.0, 0.5)]
room4_z = [i for i in np.arange(1.5, 5.0, 0.5)]
hall_x = [i for i in np.arange(-3.5, 4.0, 0.5)]
hall_z = [i for i in np.arange(-0.5, 1.0, 0.5)]
res = collections.defaultdict(list)
gcc_width_half = 30
# the whole vector length is 61
files = os.listdir(input_dir)
for file in files:
# skip dir
name = str(file.title()).lower()
if os.path.isdir(file) or name[:6] != 'walker':
continue
file_names = name.split('_')
pos = file_names[1] + "_" + file_names[2] + "_" + file_names[3]
if res.get(pos) is not None:
continue
else:
min_len = 999999
fs = 0
label = [0] * 2
# fixme, add binary label based on in/out door, need indicate room type
# if (float(file_names[1]) in hall_x and float(file_names[3]) in hall_z) or (
# float(file_names[1]) == room1_pos[0] and float(file_names[3]) == room1_pos[2]) or (
# float(file_names[1]) == room2_pos[0] and float(file_names[3]) == room2_pos[2]) or (
# float(file_names[1]) == room3_pos[0] and float(file_names[3]) == room3_pos[2]) or (
# float(file_names[1]) == room4_pos[0] and float(file_names[3]) == room4_pos[2]):
# label = [1, 0]
# else:
# label = [0, 1]
label = [1, 0]
for i in range(1, 5):
mic = name[:len(name) - 5] + str(i) + ".wav"
wav = wave.open(os.path.join(input_dir, mic), 'rb')
n_frame = wav.getnframes()
fs = wav.getframerate()
data = np.frombuffer(wav.readframes(n_frame), dtype=np.short)
if len(data) % 2 != 0:
data = np.append(data, 0)
data.shape = -1, 2
data = data.T
data_avg = [(data[0][j] + data[1][j]) / 2 for j in range(len(data[0]))]
if len(data_avg) < min_len:
min_len = len(data_avg)
locals()['data%d' % i] = data_avg
gcc_vector = []
for i in range(1, 5):
locals()['data%d' % i] = locals()['data%d' % i][:min_len]
for i in range(1, 5):
for j in range(i + 1, 5):
tau, cc = gcc_phat(locals()['data%d' % i], locals()['data%d' % j], fs)
for k in range(min_len - gcc_width_half, min_len + gcc_width_half + 1):
gcc_vector.append(cc[k])
print("======")
print(pos)
print(label)
res[pos] = [gcc_vector, label]
with open(os.path.join(output_dir, output_file), 'w') as f:
for k in res.keys():
print(res[k], file=f)
def split_test_tain(dir, unionfile, testfire, trainfire, test_size, train_size):
temp = [i for i in range(1, test_size + train_size + 1)]
random.shuffle(temp)
index_test = temp[:test_size]
with open(os.path.join(dir, unionfile), 'r+') as f:
lines = f.readlines()
test_lines = []
train_lines = []
print(len(lines))
count = 0
for i in range(len(lines)):
index = i + 1
if index in index_test:
test_lines.append(lines[i])
else:
count += 1
train_lines.append(lines[i])
with open(os.path.join(dir, testfire), 'w+') as t:
t.writelines(test_lines)
with open(os.path.join(dir, trainfire), 'w+') as n:
n.writelines(train_lines)
print(count)
def cal_volume(waveData, frameSize, overLap):
wlen = len(waveData)
step = frameSize - overLap
frameNum = int(math.ceil(wlen * 1.0 / step))
volume = np.zeros((frameNum, 1))
for i in range(frameNum):
curFrame = waveData[np.arange(i * step, min(i * step + frameSize, wlen))]
curFrame = curFrame - np.median(curFrame) # zero-justified
volume[i] = np.sum(np.abs(curFrame))
return volume
# fixme, generate strength features used for reward; store res[pos] = [gcc, label], can be look up
def generate_volume(input_dir, output_dir, output_file):
res = {}
files = os.listdir(input_dir)
done = 0
for file in files:
# skip dir
name = str(file.title()).lower()
if os.path.isdir(file) or name[:6] != 'walker':
continue
file_names = name.split('_')
pos = file_names[1] + "_" + file_names[2] + "_" + file_names[3]
# meet old walker pos
if res.get(pos) is not None:
continue
else:
# read 4 mirs, compute volume
frameSize = 256
overLap = 128
min_len = 999999
for i in range(1, 5):
mic = name[:len(name) - 5] + str(i) + ".wav"
fw = wave.open(os.path.join(input_dir, mic), 'r')
params = fw.getparams()
nchannels, sampwidth, framerate, nframes = params[:4]
strData = fw.readframes(nframes)
waveData = np.fromstring(strData, dtype=np.int16)
waveData = waveData * 1.0 / max(abs(waveData)) # normalization
fw.close()
if len(waveData) < min_len:
min_len = len(waveData)
locals()['data%d' % i] = waveData
vol = []
for i in range(1, 5):
locals()['data%d' % i] = locals()['data%d' % i][:min_len]
waveData = locals()['data%d' % i]
volumes = cal_volume(waveData, frameSize, overLap)
vol.append(np.average(volumes[300:3600]))
res[pos] = vol
done += 1
print(done)
with open(os.path.join(output_dir, output_file), 'w') as f:
for k in res.keys():
print(res[k], file=f)
# fixme, save variable res into disk (binary)
disk = open('env_hole_vol.pkl', 'wb')
pickle.dump(res, disk)
disk.close()
def generate_obs(input_dir, output):
res = collections.defaultdict(list)
gcc_width_half = 30
# the whole vector length is 61
files = os.listdir(input_dir)
for file in files:
# skip dir
name = str(file.title()).lower()
file_names = name.split('_')
pos = file_names[0]
# meet old walker pos
if res.get(pos) is not None:
continue
else:
# read 4 mirs, compute features
min_len = 999999
fs = 0
for i in range(1, 5):
mic = name[:len(name) - 5] + str(i) + ".wav"
wav = wave.open(os.path.join(input_dir, mic), 'rb')
n_frame = wav.getnframes()
fs = wav.getframerate()
data = np.frombuffer(wav.readframes(n_frame), dtype=np.short)
if len(data) % 2 != 0:
data = np.append(data, 0)
data.shape = -1, 2
data = data.T
data_avg = [(data[0][j] + data[1][j]) / 2 for j in range(len(data[0]))]
if len(data_avg) < min_len:
min_len = len(data_avg)
locals()['data%d' % i] = data_avg
gcc_vector = []
for i in range(1, 5):
locals()['data%d' % i] = locals()['data%d' % i][:min_len]
for i in range(1, 5):
for j in range(i + 1, 5):
tau, cc = gcc_phat(locals()['data%d' % i], locals()['data%d' % j], fs)
for k in range(min_len - gcc_width_half, min_len + gcc_width_half + 1):
gcc_vector.append(cc[k])
res[pos] = gcc_vector
print(pos)
disk = open(output, 'wb')
pickle.dump(res, disk)
disk.close()
if __name__ == '__main__':
# fixme, change output file and gcc type here, rl env no need split
# wavedir = './wavdata/multiple/add/bin_src_1_1.6_2'
# gccdir = './gccdata/multiple/eight_classific/'
# gccdir = './gccdata/multiple/hole_eight'
# srpdir = './srpdata'
#
# split_test_tain(gccdir, unionfile='vector_hole', testfire='vector_test', trainfire='vector_train',
# test_size=4, train_size=3100)
# pickle dump res[pos] = [gcc, label] into env.pkl
# generate_gcc_binary(wavedir, gccdir, output_file='label_add')
# generate_gcc(wavedir, gccdir, output_file='label_add', average=True, vector=True, savepos=False)
# split_test_tain(gccdir, unionfile='vector_eight', testfire='vector_test', trainfire='vector_train',
# test_size=400, train_size=2033)
# generate_volume(wavedir, gccdir, output_file='volume')
# generate_obs('./wavdata/multiple/add/0_0', '0_0.pkl')
# a = './wavdata/8x3x8_src_-2_1.6_-2'
# b = './wavdata/8x3x8_src_-2_1.6_2'
# c = './wavdata/8x3x8_src_-3_1.6_0'
# d = './wavdata/8x3x8_src_0_1.6_-1.5'
# e = './wavdata/8x3x8_src_0_1.6_0'
# f = './wavdata/8x3x8_src_0_1.6_3'
# g = './wavdata/8x3x8_src_2_1.6_-2'
# h = './wavdata/8x3x8_src_2_1.6_2'
# i = './wavdata/8x3x8_src_3.5_1.6_-3.5'
#
# j = './wavdata/multiple/eight_classific/src_-2_1.6_-3'
# k = './wavdata/multiple/eight_classific/src_-2_1.6_3'
# l = './wavdata/multiple/eight_classific/src_0_1.6_0'
# m = './wavdata/multiple/eight_classific/src_2_1.6_-3'
# n = './wavdata/multiple/eight_classific/src_2_1.6_3'
#
# wavs = [a, b, c, d, e, f, g, h, i, j, k, l, m, n]
#
# srpdir = './srpdata'
#
# count = 0
# for filename in wavs:
# generate_srp(filename, srpdir, output_file='label_' + str(count), average=True, vector=True, savepos=False)
# count += 1
# print("========== %d" % count)
# audiodir = './audio'
# gccdir = './audio'
# generate_gcc_real(audiodir, gccdir, output_file='label_real', average=True, vector=True, savepos=False)
"""
Generate data for training
"""
# a = './wavdata/hole/src_-1_3'
# b = './wavdata/hole/src_1_4'
# c = './wavdata/hole/src_-2_0'
# d = './wavdata/hole/src_-2_-2'
# e = './wavdata/hole/src_-3_-4'
#
# wavs = [e]
#
# count = 4
# gccdir = './gccdata/multiple/hole_eight'
# for dirname in wavs:
# generate_gcc(dirname, gccdir, output_file='vector_' + str(count), average=True, vector=True, savepos=False)
# count += 1
# print("========== %d" % count)
"""
generate part for training and exp
"""
# voldir = './voldata/hole'
# wavdir3 = './wavdata/hole/src_-2_3_exp'
# wavdir4 = './wavdata/hole/src_2_3_exp'
# wavdir2 = './wavdata/hole/src_2_-3_exp'
# wavdir1 = './wavdata/hole/src_-2_-3_exp'
# wavdir5 = './wavdata/hole/src_-2_-4_rl'
#
# # todo, modify pickle file name - simu_r%d
# generate_gcc(wavdir2, gccdir, output_file='exp_' + '2', average=True, vector=True, savepos=True)
# generate_volume(wavdir5, voldir, output_file='rl_vol')
#
#
# """
# generate rl to pickle only
# """
# # todo, modify pickle file name, no use label file for training
# generate_gcc(wavdir5, gccdir, output_file='rl', average=True, vector=True, savepos=True)
# for i in range(11, 12):
# wavedir = './wav/rsc_back_wo_diff/src%d' % i
# gccdir = './gcc/rsc_back_wo_diff'
# generate_gcc_simu_rscback(wavedir, gccdir, 'src%d' % i)
wavedir = './wav/real_cyc4'
gccdir = './gcc/cyc4'
generate_gcc_bias(wavedir, gccdir, 'cyc4_bias')
# split_test_tain(gccdir, unionfile='cyc4', testfire='cyc4_test', trainfire='cyc4_train', test_size=30,
# train_size=169)
# split_test_tain(gccdir, unionfile='cyc4', testfire='cyc4_test', trainfire='cyc4_train', test_size=30,
# train_size=169)
```
#### File: sound_localization/main_ssl/ssl_turning.py
```python
import math
import time
def SSLturning(cd, angle):
time_sleep_value = 0.05
cd.speed = 0
cd.omega = 0
cd.radius = 0
# cd: an instance of class ControlandOdometryDriver, angle: angle to turn as in degree
# angle = 0, 45, 90, 135, 180, 225, 270, 315
if angle > 180:
rad = (360 - angle) / 180 * math.pi
else:
rad = -angle / 180 * math.pi
currentTHETA = cd.position[2] # read current THETA∈(-π,π]
expectedTHETA = currentTHETA + rad
if expectedTHETA > math.pi:
expectedTHETA -= 2 * math.pi
elif expectedTHETA <= -math.pi:
expectedTHETA += 2 * math.pi
# print('rad: ', rad, '; Current theta: ', currentTHETA, '; Expected theta: ', expectedTHETA)
if rad != 0:
if rad > 0:
cd.omega = math.pi / 6
else:
cd.omega = - math.pi / 6
cd.radius = 0
cd.speed = 0
time.sleep(time_sleep_value)
# print('start moving...')
while 1:
if (cd.position[2] * expectedTHETA) > 0:
break
if (cd.position[2] * expectedTHETA) >= 0 and rad > 0:
while 1:
if abs(cd.position[2] - expectedTHETA) <= 0.2:
cd.omega = 0
time.sleep(time_sleep_value)
# print('reached')
break
elif (cd.position[2] * expectedTHETA) >= 0 and rad < 0:
while 1:
if abs(expectedTHETA - cd.position[2]) <= 0.2:
cd.omega = 0
time.sleep(time_sleep_value)
# print('reached')
break
else:
print('false')
pass
else:
pass
cd.omega = 0
time.sleep(0.1)
# print('final position: ', cd.position[2])
```
#### File: JoeyYoung/sound_localization/record.py
```python
import pyaudio
import wave
import json
import signal
import sys
import os
import time
# from src.data.SSLDataPreprocessor import wavToGCCOffsets
from tensorflow import keras
import numpy as np
model_path = "../../models/ssl_model_CB313_10_10_3.h5"
RECORD_RATE = 16000
RECORD_CHANNELS_DEFAULT = 1
RECORD_CHANNELS = 4
RECORD_WIDTH = 2
CHUNK = 1024
RECORD_SECONDS = 1
OUTPUT_ROOT = "./"
RECORD_COORDINATES = "_test"
preprocess_folder = os.path.abspath("../../data/processed/CB412")
filename = "output" + RECORD_COORDINATES + ".wav"
WAVE_OUTPUT_FILENAME = os.path.join(OUTPUT_ROOT, filename)
WAVE_OUTPUT_FILENAME1 = OUTPUT_ROOT + "/output1.wav"
WAVE_OUTPUT_FILENAME2 = OUTPUT_ROOT + "/output2.wav"
WAVE_OUTPUT_FILENAME3 = OUTPUT_ROOT + "/output3.wav"
WAVE_OUTPUT_FILENAME4 = OUTPUT_ROOT + "/output4.wav"
# RECORD_DEVICE_NAME = "seeed-2mic-voicecard"
RECORD_DEVICE_NAME = "USB Camera-B4.09.24.1"
p = pyaudio.PyAudio()
stream = p.open(
rate=RECORD_RATE,
format=p.get_format_from_width(RECORD_WIDTH),
channels=RECORD_CHANNELS_DEFAULT,
input=True,
start=False)
wave_file = wave.open(WAVE_OUTPUT_FILENAME, "wb")
# wave_file1 = wave.open(WAVE_OUTPUT_FILENAME1, "wb")
# wave_file2 = wave.open(WAVE_OUTPUT_FILENAME2, "wb")
# wave_file3 = wave.open(WAVE_OUTPUT_FILENAME3, "wb")
# wave_file4 = wave.open(WAVE_OUTPUT_FILENAME4, "wb")
buffer1 = list(range(CHUNK))
buffer2 = list(range(CHUNK))
buffer3 = list(range(CHUNK))
buffer4 = list(range(CHUNK))
def open_files():
wave_file.setnchannels(RECORD_CHANNELS)
wave_file.setsampwidth(2)
wave_file.setframerate(RECORD_RATE)
# wave_file1.setnchannels(RECORD_CHANNELS)
# wave_file1.setsampwidth(2)
# wave_file1.setframerate(RECORD_RATE)
#
# wave_file2.setnchannels(RECORD_CHANNELS)
# wave_file2.setsampwidth(2)
# wave_file2.setframerate(RECORD_RATE)
#
# wave_file3.setnchannels(RECORD_CHANNELS)
# wave_file3.setsampwidth(2)
# wave_file3.setframerate(RECORD_RATE)
#
# wave_file4.setnchannels(RECORD_CHANNELS)
# wave_file4.setsampwidth(2)
# wave_file4.setframerate(RECORD_RATE)
def close_files():
wave_file.close()
# wave_file1.close()
# wave_file2.close()
# wave_file3.close()
# wave_file4.close()
def record():
open_files()
time.sleep(5)
stream.start_stream()
print("* recording")
for i in range(0, int(RECORD_RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
# print("length of data: %d" %(len(data)))
for j in range(CHUNK):
# assert((data[j*8] | (data[j*8 + 1] << 8)) == data[j*8]+data[j*8+1]*256)
# print("%x" %(data[j*8] | (data[j*8 + 1] << 8)),
# "\t%x %x" %(data[j*8 + 2], data[j*8 + 3]),
# "\t%x %x" % (data[j*8 + 4], data[j*8 + 5]),
# "\t%x %x" % (data[j*8 + 6], data[j*8 + 7])
# )
# bytes_buffer1 = bytes_buffer1 + data[j*8 + 0]
# bytes_buffer1[j*2 + 1] = data[j*8 + 1]
# bytes_buffer1[j*2 + 0] = data[j*8 + 2]
# bytes_buffer1[j*2 + 1] = data[j*8 + 3]
# bytes_buffer1[j*2 + 0] = data[j*8 + 4]
# bytes_buffer1[j*2 + 1] = data[j*8 + 5]
# bytes_buffer1[j*2 + 0] = data[j*8 + 6]
# bytes_buffer1[j*2 + 1] = data[j*8 + 7]
buffer1[j] = data[j * 8 + 0] | (data[j * 8 + 1] << 8)
buffer2[j] = data[j * 8 + 2] | (data[j * 8 + 3] << 8)
buffer3[j] = data[j * 8 + 4] | (data[j * 8 + 5] << 8)
buffer4[j] = data[j * 8 + 6] | (data[j * 8 + 7] << 8)
if j == 0 and i == 0:
print("%x\t%x\t%x\t%x" % (buffer1[j], buffer2[j], buffer3[j], buffer4[j]))
wave_file.writeframes(data)
# wave_file1.writeframes(bytes_buffer1)
# wave_file2.writeframes(bytes_buffer2)
# wave_file3.writeframes(bytes_buffer3)
# wave_file4.writeframes(bytes_buffer4)
print("* done recording")
stream.stop_stream()
close_files()
# audio_data should be raw_data
return ("record end")
def sigint_handler(signum, frame):
stream.stop_stream()
stream.close()
p.terminate()
close_files()
print('catched interrupt signal!')
sys.exit(0)
if __name__ == "__main__":
# Register ctrl-c interruption
signal.signal(signal.SIGINT, sigint_handler)
print("Number of devices: ", p.get_device_count())
device_index = -1
for index in range(0, p.get_device_count()):
info = p.get_device_info_by_index(index)
device_name = info.get("name")
print("device_name: ", device_name)
if device_name.find(RECORD_DEVICE_NAME) != -1:
device_index = index
break
if device_index != -1:
print("find the device")
stream.close()
print(p.get_device_info_by_index(device_index))
stream = p.open(
rate=RECORD_RATE,
format=p.get_format_from_width(RECORD_WIDTH),
channels=RECORD_CHANNELS,
input=True,
input_device_index=device_index,
start=False)
else:
print("don't find the device")
record()
# full_input_filename = os.path.abspath(WAVE_OUTPUT_FILENAME)
# output_filename = filename.split(".wav")[0] + ".dat"
# full_output_filename = os.path.join(preprocess_folder, output_filename)
#
# offsets = wavToGCCOffsets(full_input_filename)
#
# # Save GCC offsets to files
# # Each position pair has one file
# with open(full_output_filename, "w") as f:
# s = ""
# for e in offsets:
# s = s + str(e) + " "
# s = s[0:-1] + "\n"
# f.write(s)
#
# # Read .dat
# with open(full_output_filename, "r") as f:
# # Remove '\n'
# content = f.readline()[:-1]
#
# # Read x
# x = [int(n) for n in content.split(" ")]
#
# test_inputs = np.array([[x]])
#
# # Scale x
# test_inputs_scaled = test_inputs / test_inputs.max()
#
# # Read model
# model = keras.models.load_model(model_path)
# model.summary()
#
# # Predict
# predictions = model.predict(test_inputs_scaled)
# for i in range(len(test_inputs_scaled)):
# print("input = ", test_inputs[i])
# # print(predictions[0])
# print("prediction = ", np.argmax(predictions[i]))
# print("")
```
#### File: JoeyYoung/sound_localization/serious_test.py
```python
import serial
import serial.tools.list_ports
def print_serial(port):
print("---------------[ %s ]---------------" % port.name)
print("Path: %s" % port.device)
print("Descript: %s" % port.description)
print("HWID: %s" % port.hwid)
if not None == port.manufacturer:
print("Manufacture: %s" % port.manufacturer)
if not None == port.product:
print("Product: %s" % port.product)
if not None == port.interface:
print("Interface: %s" % port.interface)
print()
def detect_serials(description="target device", vid=0x10c4, pid=0xea60):
ports = serial.tools.list_ports.comports()
for port in ports:
print_serial(port)
if port.description.__contains__(description):
port_path = port.device
return port_path
else:
print("Cannot find the target device: %s" % description)
return None
if __name__ == '__main__':
detect_serials()
```
#### File: sound_localization/wakeup/kws_detector.py
```python
import pyaudio
import ctypes as ct
import numpy as np
import wave
import math
import matplotlib.pyplot as plt
import pyaudio
import os
import librosa
import librosa.display
import threading
import time
from numpy.linalg import norm
from kws_do_inference import KwsNNet
class KwsDetector:
def __init__(self, chunk, record_device_name, record_width, channels, rate, format, wav_path):
self.CHUNK = 1024
self.RECORD_DEVICE_NAME = "MacBook Pro 麦克风"
self.RECORD_WIDTH = 2
self.CHANNELS = 1
self.RATE = 16000
self.FORMAT = pyaudio.paInt16
self.WAV_PATH = "/Users/xyzhao/Desktop/sound_localization/wakeup/stream_tmp"
self.device_index = self.setup_device_index()
now = int(round(time.time()*1000))
self.RANDOM_PREFIX = time.strftime('%m-%d_%H:%M',time.localtime(now/1000))
"""
init NN model, and load graph
"""
# self.KwsNet = KwsNNet(os.path.join(self.WAV_PATH, self.RANDOM_PREFIX + "win.wav"), "Pretrained_models/DNN/DNN_M.pb", "Pretrained_models/labels.txt")
"""
Window settings
"""
# can be large enough
self.RECORD_SECONDS = 500
# need to contain the word
self.WINDOW_SECONDS = 1
# number of frames in a stream
self.frame_num_total = int(self.RATE / self.CHUNK * self.RECORD_SECONDS)
# number of frames in a window
self.frame_num_win = int(self.RATE / self.CHUNK * self.WINDOW_SECONDS)
# number of frames for one stride
self.frame_num_stride = 3 # 5
# after read how many windows flush the buffer, large enough since no delay
self.win_num_flush = 100 # 10
# frames buffer from stream, need flush after sometime
self.frames_buffer = []
# to avoid buffer conflict when do flush
self.buffer_lock = threading.Lock()
# trigger for flush, init start frame
self.flush_event = threading.Event()
self.end_event = threading.Event()
def setup_device_index(self):
device_index = -1
p = pyaudio.PyAudio()
"""
Recognize Mic device, before loop
"""
# scan to get usb device
for index in range(0, p.get_device_count()):
info = p.get_device_info_by_index(index)
device_name = info.get("name")
print("device_name: ", device_name)
# find mic usb device
if device_name.find(self.RECORD_DEVICE_NAME) != -1:
device_index = index
# break
if device_index != -1:
print("find the device")
print(p.get_device_info_by_index(device_index))
else:
print("don't find the device")
return device_index
def store_frames_to_file(self, frames, name_id):
# set to only one temp wav file in real
wave_output_filename = self.RANDOM_PREFIX + "win.wav" # % (name_id)
wf = wave.open(os.path.join(self.WAV_PATH, wave_output_filename), 'wb')
wf.setnchannels(self.CHANNELS)
wf.setsampwidth(self.RECORD_WIDTH)
wf.setframerate(self.RATE)
wf.writeframes(b''.join(frames))
wf.close()
def read_from_stream(self):
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(self.RECORD_WIDTH),
channels=self.CHANNELS,
rate=self.RATE,
input=True,
input_device_index=self.device_index)
for i in range(0, self.frame_num_total):
# if self.end_event.is_set() is True:
# break
frame = stream.read(self.CHUNK)
self.frames_buffer.append(frame)
# if i % self.frame_num_win == 0 and i != 0:
# print("read in a window size")
# flush the buffer
# after a large time duration to avoid high memory useage
if i % (self.frame_num_win * self.win_num_flush) == 0 and i != 0:
print("===== p1: set the flush")
self.flush_event.set()
self.buffer_lock.acquire()
self.frames_buffer = []
self.buffer_lock.release()
stream.stop_stream()
stream.close()
p.terminate()
def process_from_buffer(self):
# KwsNet = KwsNNet(os.path.join(self.WAV_PATH, self.RANDOM_PREFIX + "win.wav"), "Pretrained_models/DNN/DNN_M.pb", "Pretrained_models/labels.txt")
KwsNet = KwsNNet(os.path.join(self.WAV_PATH, self.RANDOM_PREFIX + "win.wav"), "follow.pb", "tmp/speech_commands_train/follow_labels.txt")
# init setting
window_count = 0
start_frame = 0
continous_wakeups = 0
while True:
frames = []
if self.flush_event.is_set() is True:
print("===== p2: detect the flush")
start_frame = 0
self.flush_event.clear()
time.sleep(self.WINDOW_SECONDS)
if start_frame >= self.frame_num_total:
print("ERROR: start frame out of buffer. ")
exit()
self.buffer_lock.acquire()
for i in range(0, self.frame_num_win):
# detect index out of ranage, wait for p1 to fill the buffer
while (start_frame + i) >= len(self.frames_buffer):
continue
frames.append(self.frames_buffer[start_frame + i])
self.buffer_lock.release()
self.store_frames_to_file(frames, window_count)
# call DNN part to do inference for this file
this_frame_status = KwsNet.do_inference()
if this_frame_status == 1:
continous_wakeups += 1
print(continous_wakeups)
elif this_frame_status == 0:
continous_wakeups -= 0.3
if continous_wakeups < 0:
continous_wakeups = 0
# print(continous_wakeups)
if continous_wakeups >= 3:
print(" ====== wake up")
# self.end_event.set()
# break
# time.sleep(0.05)
window_count += 1
start_frame += self.frame_num_stride
# print("process a window")
def slide_win_loop(self):
p1 = threading.Thread(target=self.read_from_stream, args=())
p2 = threading.Thread(target=self.process_from_buffer, args=())
p1.start()
time.sleep(1)
time.sleep(self.WINDOW_SECONDS)
p2.start()
p1.join()
p2.join()
if __name__ == "__main__":
kws = KwsDetector(1, 2, 3, 4, 5, 6, 7)
kws.slide_win_loop()
``` |
{
"source": "JoeyyScott/Gamer-Supplies",
"score": 2
} |
#### File: Gamer-Supplies/checkout/signals.py
```python
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from .models import CrateItems
@receiver(post_save, sender=CrateItems)
def update_on_save(sender, instance, created, **kwargs):
# Update order total when items are added to or updated in the crate
instance.order.update_total()
@receiver(post_delete, sender=CrateItems)
def update_on_delete(sender, instance, **kwargs):
# Update order total when items are deleted from the crate
instance.order.update_total()
```
#### File: Gamer-Supplies/crate/contexts.py
```python
from django.shortcuts import get_object_or_404
from decimal import Decimal
from supplies.models import Supply
from .models import Coupon
# Populates the Crate contents
def crate_contents(request):
coupon_id = request.session.get('coupon_id', int())
crate_items = []
crate_total = 0
total = 0
savings = 0
coupon_amount = 0
supply_count = 0
crate = request.session.get('crate', {})
# Checks coupon code entered against Coupon model
try:
coupon = Coupon.objects.get(id=coupon_id)
except Coupon.DoesNotExist:
coupon = None
# Calculates the crate total and returns all relevant info in the context
for item_id, quantity in crate.items():
supply = get_object_or_404(Supply, pk=item_id)
crate_total += quantity * supply.price
# Applies discount if Coupon is found
if coupon is not None:
coupon_amount = coupon.amount
savings = crate_total*(coupon_amount/Decimal('100'))
total = crate_total - savings
else:
total = crate_total
supply_count += quantity
crate_items.append({
'item_id': item_id,
'quantity': quantity,
'supply': supply
})
context = {
'crate_items': crate_items,
'supply_count': supply_count,
'total': total,
'coupon': coupon,
'coupon_amount': coupon_amount,
'savings': savings,
}
return context
```
#### File: Gamer-Supplies/home/forms.py
```python
from .models import Review
from django import forms
from django.utils.safestring import mark_safe
# Review form
class FormReview(forms.ModelForm):
# Credit for adapted choices on TypedChoiceField
# See README.md for more details
rating = forms.TypedChoiceField(
choices=(
(1, mark_safe('<span class="text-center altFont star">\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star"></i>\
<i class="fa fa-star"></i>\
<i class="fa fa-star"></i>\
<i class="fa fa-star"></i>\
Extremely Unsatisfied </span>')),
(2, mark_safe('<span class="text-center altFont star">\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star"></i>\
<i class="fa fa-star"></i>\
<i class="fa fa-star"></i>\
Unsatisfied </span>')),
(3, mark_safe('<span class="text-center altFont star">\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star"></i>\
<i class="fa fa-star"></i>\
Neutral </span>')),
(4, mark_safe('<span class="text-center altFont star">\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star"></i>\
Satisfied </span>')),
(5, mark_safe('<span class="text-center altFont star">\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star highlight"></i>\
<i class="fa fa-star highlight"></i>\
Extremely Satisfied </span>'))
),
widget=forms.RadioSelect,
)
class Meta:
model = Review
fields = ['review', 'rating']
exclude = ['added_by']
widgets = {
'review': forms.Textarea(attrs={'\
placeholder': 'Tell us what you think...', 'class': 'altFont'})
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set autofocus
self.fields['review'].widget.attrs['autofocus'] = True
# Apply form validation
for field in self.fields:
if field == 'review':
self.fields[field].widget.attrs[
"minlength"
] = '10'
# Remove labels auto generated by form
self.fields['review'].label = False
self.fields['rating'].label = False
```
#### File: Gamer-Supplies/profiles/views.py
```python
from django.shortcuts import render, get_object_or_404
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.utils.safestring import mark_safe
from .models import UserProfile
from .forms import FormUserProfile
from checkout.models import Order
@login_required
def profile(request):
# View to display a user's profile
profile = get_object_or_404(UserProfile, user=request.user)
# Check for form submission and display appropriate message
if request.method == 'POST':
form = FormUserProfile(request.POST, instance=profile)
if form.is_valid():
form.save()
messages.success(request, 'Profile successfully updated.')
else:
messages.error(request, 'Unable to update your profile, please \
check that your details are correct.')
else:
form = FormUserProfile(instance=profile)
orders = profile.orders.all()
template = 'profiles/profile.html'
context = {
'form': form,
'orders': orders,
}
return render(request, template, context)
@login_required
def order_history(request, order_number):
# View to display an order history
order = get_object_or_404(Order, order_number=order_number)
messages.info(request, mark_safe(
f'This is a past confirmation for order number beginning: \
<span class="highlight">{ order_number[:16] }.. </span><br> A \
confirmation email was sent to: <span class="highlight">\
{order.email}</span><br> Order date: <span class="highlight">\
{order.date}</span>.'))
template = 'checkout/checkout_success.html'
context = {
'order': order,
'from_profile': True,
}
return render(request, template, context)
``` |
{
"source": "JoeyZhaoJy/NSFC_conclusion_downloader",
"score": 3
} |
#### File: JoeyZhaoJy/NSFC_conclusion_downloader/nsfc_downloader.py
```python
import os
import re
import json
import argparse
import errno
import img2pdf
import requests
__VERSION__ = 'v0.2.1'
__AUTHOR__ = 'Rhilip'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.66'
def arg_parser():
parser = argparse.ArgumentParser(
description='A tool to Download PDF format conclusion from http://output.nsfc.gov.cn/'
)
parser.add_argument('--version', '-v', action='version', version=__VERSION__)
parser.add_argument('--ratify', '-r', help='The conclusionProject link of the project you want to download', required=True)
parser.add_argument('--tmp_path', '-t', default='./tmp', help='The path you want to save tmp file')
parser.add_argument('--out_path', '-o', default='./output', help='The path you want to save output PDF file')
parser.add_argument('--no-debug', dest='debug', action='store_false', help='Disable The debug mode')
parser.set_defaults(debug=True)
return parser.parse_args()
def mkdir_p(path, mode=0o777):
"""
创建文件夹
:param path:
:param mode:
:return:
"""
try:
path = os.path.abspath(path)
os.makedirs(path, mode)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def open_filepath(path):
try:
os.startfile(path)
except Exception:
pass
def clean_filename(string: str) -> str:
"""
清理文件名中的非法字符,防止保存到文件系统时出错
:param string:
:return:
"""
string = string.replace(':', '_').replace('/', '_').replace('\x00', '_')
string = re.sub('[\n\\\*><?\"|\t]', '', string)
return string.strip()
class NsfcDownloader:
i = 1
download_finish = False
download_stats = {}
debug = True
ratify_info = {}
def __init__(self, out_path, tmp_path):
self.out_path = out_path
self.tmp_path = tmp_path
# 使得相关文件夹存在
mkdir_p(out_path)
mkdir_p(tmp_path)
def clear_state(self):
self.i = 1
self.download_finish = False
self.download_stats = {}
@staticmethod
def get_hashed_ratify_from_uri(url) -> str:
# 如果输入的是32位的话,直接跳过,让后续的信息获取函数判断项目编号是否合法
if len(url) == 32:
return url
pat = re.match(r'(?:https?://)?output\.nsfc\.gov\.cn/conclusionProject/([0-9a-f]{32})', url)
if pat:
return pat.group(1)
return ''
def get_ratify_info_from_nsfc(self, ratify) -> dict:
"""
从官网获取信息
:param ratify: 申请号
:return:
"""
r = requests.get('http://output.nsfc.gov.cn/baseQuery/data/conclusionProjectInfo/{}'.format(ratify))
try:
r.raise_for_status()
rj = r.json() # project_info
rj['success'] = True
except Exception as e:
rj = {'success': False, 'msg': str(e)}
# Debug 模式下重新抛出抛出
if self.debug:
raise e
return rj
def get_ratify_info(self, ratify) -> dict:
# 检查对象内是不是有缓存
if ratify not in self.ratify_info:
# 检查本地目录是不是有缓存
project_info_file = os.path.join(self.tmp_path, '{}.json'.format(ratify))
if os.path.exists(project_info_file):
rj = json.load(open(project_info_file, 'r', encoding='utf-8'))
else:
rj = self.get_ratify_info_from_nsfc(ratify)
# 存入对象缓存
self.ratify_info[ratify] = rj
return self.ratify_info[ratify]
def download_loop(self, ratify) -> tuple:
"""
下载核心方法
:param ratify:
:return:
"""
img_files_list = []
img_bytes_list = []
self.i = 1
should_loop = True
while should_loop:
tmp_file = os.path.join(self.tmp_path, '{}_{}.png'.format(ratify, self.i))
if os.path.exists(tmp_file):
content = open(tmp_file, 'rb').read()
else:
# 请求接口
r = requests.post('http://output.nsfc.gov.cn/baseQuery/data/completeProjectReport', data={
'id': ratify,
'index': self.i
}, headers={
'User-Agent': USER_AGENT,
})
rj = r.json()
req_url = "http://output.nsfc.gov.cn{}".format(rj['data']['url'])
print('正在请求第{}页 {}'.format(self.i, req_url))
r = requests.get(req_url, timeout=10, headers={
'User-Agent': USER_AGENT,
})
content = r.content
if self.debug:
with open(tmp_file, 'wb') as tmp_f:
tmp_f.write(r.content)
should_loop = rj['data'].get('hasnext', False)
img_files_list.append(tmp_file)
img_bytes_list.append(content)
self.i += 1
self.download_finish = True
return img_files_list, img_bytes_list
def download(self, url):
status = {'success': False, 'msg': ''}
# 重写ratify参数
ratify = self.get_hashed_ratify_from_uri(url)
if ratify:
print('开始获取项目信息,项目编号: {}'.format(ratify))
ratify_info = self.get_ratify_info(ratify)
if ratify_info.get('code') != 200 or 'data' not in ratify_info:
status['msg'] = '项目可能不存在,请重新检查项目编号是否正确'
else:
project_name = ratify_info['data'].get('projectName', '')
ratify_no = ratify_info['data'].get('ratifyNo', '')
status['path'] = os.path.join(self.out_path,
clean_filename('{} {}.pdf'.format(ratify_no, project_name)))
if os.path.exists(status['path']):
status['success'] = True
status['msg'] = 'PDF已存在,请打开 `{}`。'.format(status['path'])
else:
try:
print('开始下载 {}({}) {}'.format(ratify_no, ratify, project_name))
img_files_list, img_bytes_list = self.download_loop(ratify)
print('下载完成 {}({}) {}'.format(ratify_no, ratify, project_name))
if len(img_bytes_list) > 0:
print('正在组合PDF {}'.format(status['path']))
pdf = img2pdf.convert(img_bytes_list)
with open(status['path'], "wb") as file_:
file_.write(pdf)
status['success'] = True
if self.debug:
print('移除临时文件')
for f in img_files_list:
os.remove(f)
else:
status['msg'] = '下载过程出现问题,未获得有效图片。'
except Exception as e:
status['msg'] = '内部错误: {}'.format(e)
else:
status['msg'] = '输入链接或项目编号格式错误'
if status['msg']:
print(status['msg'])
self.download_stats = status
return self.download_stats
if __name__ == '__main__':
args = arg_parser()
downloader = NsfcDownloader(args.out_path, args.tmp_path)
downloader.download(args.ratify)
``` |
{
"source": "JoeyZhaoJy/TgDLF",
"score": 2
} |
#### File: JoeyZhaoJy/TgDLF/arima.py
```python
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.arima_model import ARMA
from statsmodels.graphics.api import qqplot
ave_window = 1
use_diff = 0
use_ratio = 0 # dimensionless ratio
data_origin = []
data_diff = []
load_mean = []
load_std = []
data_list = ['CY','HD','FT','SJS','PG','YZ','CP',
'MTG','FS','DX','HR','MY','SY','YQ'] # 缺少net 缺少PG,PG天气数据从2010年6月开始,缺失过多
for i in range (13):
if use_ratio == True:
name = 'E:/CYQ/LSTM-ENN/Grid_ratio_correctdata_correctweather/data/data_day_'+ data_list[i] +'.csv'
else:
name = 'E:/CYQ/LSTM-ENN/Grid_ratio_correctdata_correctweather/data/real_data_day_'+ data_list[i] +'.csv'
#name = 'E:/Research CYT/grid/enlstm_code/ratio/data/data_day_'+ data_list[i] +'.csv'
script = 'df = pd.read_csv(\'{0}\')'.format(name)
script2 = 'data_origin.append(df.iloc[:,1])'
exec (script)
exec (script2)
load_mean.append(data_origin[i].mean())
load_std.append(data_origin[i].std())
new_load = (data_origin[i]-load_mean[i])/load_std[i]
temp1 = np.array(new_load).reshape(-1,ave_window)
new_load = temp1.mean(1)
data_origin[i] = pd.DataFrame(new_load)
if use_diff == True:
data_diff.append(data_origin[i].diff(int(24/ave_window))[int(24/ave_window):])
params_list = []
for i in range (13):
if use_diff == True:
script3 = 'arima_%d = ARIMA(data_diff[%d], order=(20, 0, 2)).fit()' % (i, i)
else:
script3 = 'arima_%d = ARIMA(data_origin[%d], order=(20, 0, 2)).fit()' % (i, i)
script4 = 'params_list.append(arima_%d.params)' % (i)
exec (script3)
exec (script4)
print(i)
from statsmodels.tsa.arima_model import _arma_predict_out_of_sample
def forecast_transfer (params, step, model_name, endog):
forecast_transfer = _arma_predict_out_of_sample(params, step,
model_name.resid, model_name.k_ar,
model_name.k_ma, model_name.k_trend,
model_name.k_exog, endog,
exog=None, method=model_name.model.method)
pred_test = model_name.forecast(steps=int(24/ave_window))
return forecast_transfer
def forecast_transfer_multiple_output (params, step, start, data, ID):
N_sample = (len(data)-start)/step
# print('fitting model...')
# arima_temp = ARIMA(data, order=(4, 0, 4)).fit() # create the model with input data
# print(np.shape(data))
pred = None
for i in range (int(N_sample)):
data_temp = data[0:(start+i*step)]
endog = arima_temp.model.endog[0:(start+i*step)]
pred_temp = forecast_transfer (params, step, arima_temp, endog)
if pred is None:
pred = pred_temp
else:
pred = np.hstack((pred, pred_temp))
# if i%300 == 0:
# print('Finish the sample:', i)
return pred
print(i)
#####################################################################################################
a = [[5, 4, 1], [6, 8, 3], [12, 9, 7], [13, 2, 10]]
b = [[6, 8, 3, 12, 9, 7, 13, 2, 10],
[5, 4, 1, 12, 9, 7, 13, 2, 10],
[5, 4, 1, 6, 8, 3, 13, 2, 10],
[5, 4, 1, 6, 8, 3, 12, 9, 7]]
for test_set_ID in range(4):
test_ID = a[test_set_ID]
train_ID = b[test_set_ID]
script5 = 'arima_train_params = (params_list[%d] + params_list[%d] + params_list[%d] + \
params_list[%d] + params_list[%d] + params_list[%d] + \
params_list[%d] + params_list[%d] + params_list[%d])/9'\
%(train_ID[0]-1, train_ID[1]-1, train_ID[2]-1, train_ID[3]-1, train_ID[4]-1, train_ID[5]-1,\
train_ID[6]-1, train_ID[7]-1, train_ID[8]-1)
exec (script5)
# print('arima_train_params:',arima_train_params)
print(test_ID)
# for i in range(13):
# print(params_list[i])
pred_list = []
for j in range (len(test_ID)):
print('Predicting test_ID:', test_ID[j])
# arima_train_params
start = np.max((4,int(24/ave_window)))
script_temp = 'arima_temp = arima_%d' % ((test_ID[j]-1))
exec (script_temp)
print(arima_temp)
if use_diff == True:
pred = forecast_transfer_multiple_output (arima_train_params, int(24/ave_window), start, (data_diff[test_ID[j]-1]), (test_ID[j]-1))
else:
pred = forecast_transfer_multiple_output (arima_train_params, int(24/ave_window), start, (data_origin[test_ID[j]-1]), (test_ID[j]-1))
pred_list.append(pred)
for j in range (len(test_ID)):
pred_real = pred_list[j]
if use_diff == True:
data_real = np.array(data_diff[test_ID[j]-1][-len(pred_real):])
plt.figure(figsize=(100, 20))
# plt.plot(np.cumsum(np.array(pred_real)), label='prediction', color='red', alpha=0.4)
# plt.plot(np.cumsum(np.array(data_real)), label='target', color='black', alpha=0.4)
else:
data_real = np.array(data_origin[test_ID[j]-1][-len(pred_real):])
print('ID:',test_ID[j])
print(np.shape(pred_real))
print(np.shape(data_real))
plt.figure(figsize=(100, 20))
# plt.plot(pred_real, label='prediction', color='red', alpha=0.4)
# plt.plot(data_real, label='target', color='black', alpha=0.4)
error = []
for i in range(len(data_real)):
error.append(data_real[i] - pred_real[i])
squaredError = []
absError = []
for val in error:
squaredError.append(val * val)#target-prediction之差平方
absError.append(abs(val))#误差绝对值
print("MSE = ", sum(squaredError) / len(squaredError))#均方误差MSE
for i in range (13):
script_save = '''arima_%d.save('arima_20_0_2_id%d.pkl')''' % (i, i)
exec (script_save)
```
#### File: TgDLF/EnLSTM/data.py
```python
import numpy as np
from configuration import config
import torch.utils.data
import pandas as pd
from sklearn import preprocessing
import copy
# read file and change the head name
def read_file(path):
df = pd.read_csv(path)
df.columns = config.head
return df
# make dataset using moving window with the step of -> window_step
def make_dataset(data, window_size):
i = 0
while i + window_size - 1 < len(data):
yield data[i:i+window_size]
i += config.window_step
def normalize(x):
scaler = preprocessing.StandardScaler().fit(x)
return scaler.transform(x), scaler
class TextDataset(torch.utils.data.Dataset):
scaler = None
dataset_scaler = {}
test_data = {}
def __init__(self):
self.df_list = []
for i in range(config.well_num):
# the test and train data will be the unnormalized data
filename = config.data_prefix.format(i+1)
df = read_file(filename)
df['DEPT'] = np.arange(1, len(df)+1)
self.df_list.append(df)
if i+1 in config.test_ID:
self.test_data[i+1] = df[config.columns_target]
self.dataset = pd.concat(self.df_list, axis=0, ignore_index=True)
for feature in config.columns_target:
self.dataset_scaler[feature] = preprocessing.StandardScaler().fit(self.dataset[feature].values.reshape(-1, 1))
self.input_data, self.target_data = self.train_dataset()
self.line_num = len(self.input_data)
def reset_train_dataset(self):
self.input_data, self.target_data = self.train_dataset()
self.line_num = len(self.input_data)
def reset_test_dataset(self):
for items in config.test_ID:
self.df_list[items-1][config.columns_target] = self.test_data[items][config.columns_target].values
def train_dataset(self):
input_data = []
target_data = []
selected = config.columns[:config.input_dim+config.output_dim]
for items in config.train_ID:
data = copy.copy(self.df_list[items-1])
input_ = np.array(list(make_dataset(
normalize(data[selected[:config.input_dim]].values)[0], config.train_len)))
target_ = np.array(list(make_dataset(
self.dataset_scaler[selected[-1]].transform(data[selected[-1]].values.reshape(-1, 1)), config.train_len)))
input_data.append(input_)
target_data.append(target_)
return np.concatenate(input_data), np.concatenate(target_data)
def test_dataset(self, index):
selected = config.columns[:config.input_dim+1]
data = copy.copy(self.df_list[index-1])
input_ = normalize(data[selected[:config.input_dim]].values)[0]
target_ = self.dataset_scaler[selected[-1]].transform(data[selected[-1]].values.reshape(-1, 1))
self.scaler = self.dataset_scaler[selected[-1]]
return input_, target_
def inverse_normalize(self, x):
return self.scaler.inverse_transform(x)
def __getitem__(self, index):
return self.input_data[index], self.target_data[index]
def __len__(self):
return self.line_num
``` |
{
"source": "joeyzhong90595/Lane-Changing-for-Autonomous-Vehicles-in-CARLA-Simulator",
"score": 3
} |
#### File: agents/navigation/lange_change.py
```python
import numpy as np
from math import pi, sin, cos, radians, sqrt
from scipy.interpolate import splrep, splev
from agents.learning.GMM import GMM
from agents.navigation.local_planner import RoadOption
from agents.navigation.local_waypoint import LocalWaypoint
from agents.tools.misc import get_poly_y
# This class generate lateral and longitudinal quintic polynomial
# trajectory s(t) d(t) and generate waypoints according to target_speed
class PolyLaneChange:
def __init__(self, world, param):
self._world_obj = world
self._map = self._world_obj.world.get_map()
self._lon_dis = param['lon_dis']
self._lat_dis = param['lat_dis']
self._lon_param = param['lon_param']
self._lat_param = param['lat_param']
self._dt = param["dt"]
self._npts = 20
# Return lane change waypoints
def get_waypoints(self, ref):
lane_change_plan = []
x_ref = ref[0]
y_ref = ref[1]
yaw = ref[2]
sy = sin(radians(yaw))
cy = cos(radians(yaw))
# Get points
t = np.linspace(0, self._dt, self._npts)
x = get_poly_y(t, self._lon_param)
y = get_poly_y(t, self._lat_param)
# Transform to world coordinate
R = np.array([[cy, -sy], [sy, cy]])
coord = np.matmul(R, np.stack((x, y))) + np.array([[x_ref], [y_ref]])
# Store waypoints
for i in range(self._npts):
waypoint = LocalWaypoint(coord[0][i], coord[1][i], 0)
lane_change_plan.append((waypoint, RoadOption.CHANGELANELEFT))
return lane_change_plan
# This class generate waypoints given sinusoidal trajectory
class SinLaneChange:
def __init__(self, world, param, GMM_v=np.array([])):
self._world_obj = world
self._lon_vel = param['lon_vel']
self._lat_dis = param['lat_dis']
self._dt = param["dt"]
# Load GMM
if GMM_v.size > 0:
GMM_sin = GMM()
if GMM_sin.GMM_model is not None:
self._dt = GMM_sin.predict_value(GMM_v)[0][0]
if np.isnan(self._dt) or self._dt < 0:
self._dt = param["dt"]
print("GMM model failed, send dt = 4")
else:
print("Predict dt: %s from GMM" % (self._dt))
self._npts = int(20*self._dt)
# Return lane change waypoints
def get_waypoints(self, ref):
lane_change_plan = []
x_ref = ref[0]
y_ref = ref[1]
yaw = ref[2]
sy = sin(radians(yaw))
cy = cos(radians(yaw))
# Get points
t = np.linspace(0, self._dt, self._npts)
x = np.linspace(0, self._lon_vel*self._dt, self._npts)
# a_lat = (2*pi*self._lat_dis) / (self._dt*self._dt) * np.sin(2*pi * t_lat/self._dt)
# v_lat = -self._lat_dis/self._dt * np.sin(2*pi * t_lat/self._dt) + self._lat_dis/self._dt
y = -self._lat_dis/(2*pi) * np.sin(2*pi * t/self._dt) + self._lat_dis * t/self._dt
# Transform to world coordinate
R = np.array([[cy, -sy], [sy, cy]])
coord = np.matmul(R, np.stack((x, y))) + np.array([[x_ref], [y_ref]])
# Store waypoints
for i in range(self._npts):
waypoint = LocalWaypoint(coord[0][i], coord[1][i], 0)
lane_change_plan.append((waypoint, RoadOption.CHANGELANELEFT))
return lane_change_plan
```
#### File: agents/navigation/pid_controller.py
```python
import carla
import math
import numpy as np
from collections import deque
from agents.tools.misc import get_speed
import time
class VehiclePIDController:
"""
VehiclePIDController is the combination of two PID controllers (lateral and longitudinal)
"""
def __init__(self, vehicle, args_lateral=None, args_longitudinal=None):
"""
:param vehicle: actor to apply to local planner logic onto
:param args_lateral: dictionary of arguments to set the lateral PID controller
:param args_longitudinal: dictionary of arguments to set the longitudinal PID controller
"""
if not args_lateral:
args_lateral = {'K_P': 0.4, 'K_I': 0.2, 'K_D': 0.4, 'dt': 0.05, 'control_type': 'PID'}
if not args_longitudinal:
args_longitudinal = {'K_P': 1.0, 'K_I': 0.2, 'K_D': 0.6, 'dt': 0.05}
self._vehicle = vehicle
self._world = self._vehicle.get_world()
self._lon_controller = PIDLongitudinalController(self._vehicle, **args_longitudinal)
self._lat_controller = PIDLateralController(self._vehicle, **args_lateral)
def run_step(self, target_speed, waypoints, target_waypoint, current_waypoint):
"""
Execute one step of control invoking both lateral and longitudinal PID controllers to reach a target waypoint
at a given target_speed.
:param target_speed: desired vehicle speed
:param waypoint: target location encoded as a waypoint
:return: Carla.VehicleControl() instance
"""
throttle = self._lon_controller.run_step(target_speed)
steering = self._lat_controller.run_step(waypoints, target_waypoint, current_waypoint)
# throttle, steering = self._mpc.run_step(target_speed, waypoints)
control = carla.VehicleControl()
control.steer = steering
control.throttle = throttle
control.brake = 0.0
control.hand_brake = False
control.manual_gear_shift = False
return control
class PIDLongitudinalController:
"""
PIDLongitudinalController implements longitudinal control using a PID.
Speed longitudinal controller (Position longitudinal controller preferred)
"""
def __init__(self, vehicle, K_P=1.0, K_D=0.5, K_I=0.5, dt=0.05):
"""
:param vehicle: actor to apply to local planner logic onto
:param K_P: Proportional term
:param K_D: Differential term
:param K_I: Integral term
:param dt: time differential in seconds
"""
self._vehicle = vehicle
self._K_P = K_P
self._K_D = K_D
self._K_I = K_I
self._dt = dt
self._e_buffer = deque(maxlen=30)
def run_step(self, target_speed, debug=False):
"""
Execute one step of longitudinal control to reach a given target speed.
:param target_speed: target speed in Km/h
:return: throttle control in the range [0, 1]
"""
current_speed = get_speed(self._vehicle)
if debug:
print('Current speed = {}'.format(current_speed))
return self._pid_control(target_speed, current_speed)
def _pid_control(self, target_speed, current_speed):
"""
Estimate the throttle of the vehicle based on the PID equations
:param target_speed: target speed in Km/h
:param current_speed: current speed of the vehicle in Km/h
:return: throttle control in the range [0, 1]
when it is [-1, 0], it becomes brake control
"""
# speed error
_e = (target_speed - current_speed)
self._e_buffer.append(_e)
# d, i term of error
if len(self._e_buffer) >= 2:
_de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt
_ie = sum(self._e_buffer) * self._dt
else:
_de = 0.0
_ie = 0.0
# control signal
return np.clip((self._K_P * _e) + (self._K_D * _de / self._dt) + (self._K_I * _ie * self._dt), 0.0, 1.0)
class PIDLateralController:
"""
PIDLateralController implements lateral control using a PID.
Heading lateral controller (Stanley lateral controller preferred)
"""
def __init__(self, vehicle, K_P=0.5, K_D=0.5, K_I=0.2, dt=0.05, control_type='PID'):
"""
:param vehicle: actor to apply to local planner logic onto
:param K_P: Proportional term
:param K_D: Differential term
:param K_I: Integral term
:param dt: time differential in seconds
"""
self._vehicle = vehicle
self._K_P = K_P
self._K_D = K_D
self._K_I = K_I
self._dt = dt
self._e_buffer = deque(maxlen=10)
self._control_type = control_type
def run_step(self, waypoints, target_waypoint, current_waypoint):
"""
Execute one step of lateral control to steer the vehicle towards a certain waypoin.
:param waypoint: target waypoint
:return: steering control in the range [-1, 1] where:
-1 represent maximum steering to left
+1 maximum steering to right
"""
if self._control_type=='PID':
return self._pid_control(target_waypoint, self._vehicle.get_transform())
else:
return self._stanley_control(target_waypoint, current_waypoint, self._vehicle.get_transform())
def _pid_control(self, waypoint, vehicle_transform):
"""
Estimate the steering angle of the vehicle based on the PID equations
:param waypoint: target waypoint
:param vehicle_transform: current transform of the vehicle
:return: steering control in the range [-1, 1]
"""
# print(" ")
# print("================= PID Control ======================")
v_begin = vehicle_transform.location
v_end = v_begin + carla.Location(x=math.cos(math.radians(vehicle_transform.rotation.yaw)),
y=math.sin(math.radians(vehicle_transform.rotation.yaw)))
v_vec = np.array([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])
w_vec = np.array([waypoint.transform.location.x -
v_begin.x, waypoint.transform.location.y -
v_begin.y, 0.0])
_dot = math.acos(np.clip(np.dot(w_vec, v_vec) /
(np.linalg.norm(w_vec) * np.linalg.norm(v_vec)), -1.0, 1.0))
_cross = np.cross(v_vec, w_vec)
if _cross[2] < 0:
_dot *= -1.0
# _dot should range from -pi to pi
if _dot > 1.5708:
_dot = -(math.pi - _dot)
elif _dot < -1.5708:
_dot = math.pi + _dot
self._e_buffer.append(_dot)
if len(self._e_buffer) >= 2:
_de = (self._e_buffer[-1] - self._e_buffer[-2]) / self._dt
_ie = sum(self._e_buffer) * self._dt
else:
_de = 0.0
_ie = 0.0
return np.clip((self._K_P * _dot) + (self._K_D * _de /
self._dt) + (self._K_I * _ie * self._dt), -1.0, 1.0)
def _stanley_control(self, target_waypoint, current_waypoint, vehicle_transform):
"""
Estimate the steering angle of the vehicle based on the PID equations
:param waypoint: target waypoint
:param vehicle_transform: current transform of the vehicle
:return: steering control in the range [-1, 1]
"""
# heading error
# print(" ")
# print("================= Stanley ======================")
yaw_path = np.arctan2(target_waypoint.transform.location.y-current_waypoint.transform.location.y, target_waypoint.transform.location.x - current_waypoint.transform.location.x)
v_begin = vehicle_transform.location
v_end = v_begin + carla.Location(x=math.cos(math.radians(vehicle_transform.rotation.yaw)),
y=math.sin(math.radians(vehicle_transform.rotation.yaw)))
# vehicle heading vector
v_vec = np.array([v_end.x - v_begin.x, v_end.y - v_begin.y, 0.0])
yaw_vehicle = np.arctan2(v_vec[1], v_vec[0])
yaw_diff = yaw_path - yaw_vehicle
# Wrapping the yaw_diff
if yaw_diff > np.pi:
yaw_diff -= 2 * np.pi
if yaw_diff < - np.pi:
yaw_diff += 2 * np.pi
# Calculate cross-track error
cross_err_current = (v_begin.x - current_waypoint.transform.location.x)**2 + (v_begin.y - current_waypoint.transform.location.y)**2
cross_err_target = (v_begin.x - target_waypoint.transform.location.x)**2 + (v_begin.y - target_waypoint.transform.location.y)**2
crosstrack_error = np.min([cross_err_current, cross_err_target])
yaw_cross_track = np.arctan2(v_begin.y-target_waypoint.transform.location.y, v_begin.x-target_waypoint.transform.location.x)
yaw_path2ct = yaw_path - yaw_cross_track
if yaw_path2ct > np.pi:
yaw_path2ct -= 2 * np.pi
if yaw_path2ct < - np.pi:
yaw_path2ct += 2 * np.pi
if yaw_path2ct > 0:
crosstrack_error = abs(crosstrack_error)
else:
crosstrack_error = -abs(crosstrack_error)
v = get_speed(self._vehicle)
k_e = 3
k_v = 1
#print("crosstrack_error: ", crosstrack_error)
yaw_diff_crosstrack = np.arctan(k_e * crosstrack_error / (k_v + v))
steer_expect = yaw_diff + yaw_diff_crosstrack
steer_expect = min(2, steer_expect)
steer_expect = max(-2, steer_expect)
if steer_expect > np.pi:
steer_expect -= 2 * np.pi
if steer_expect < - np.pi:
steer_expect += 2 * np.pi
#print("steer expect: ", steer_expect)
return steer_expect
```
#### File: Lane-Changing-for-Autonomous-Vehicles-in-CARLA-Simulator/environment/load_actors.py
```python
import glob
import os
import sys
try:
sys.path.append(glob.glob('../../CARLA_Simulator/PythonAPI/carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
import pygame
import random
def spawn_surrounding_vehicles(world, scene):
# Scenes
transform_list = []
if scene == "1":
transform_curr_front = carla.Transform(carla.Location(x=65, y=7.8, z=0.1))
transform_list = [transform_curr_front]
if scene == "2":
transform_curr_front = carla.Transform(carla.Location(x=65, y=7.8, z=0.1))
transform_curr_back = carla.Transform(carla.Location(x=35, y=7.3, z=0.1))
transform_side_front = carla.Transform(carla.Location(x=65, y=3.8, z=0.1))
transform_side_back = carla.Transform(carla.Location(x=35, y=3.5, z=0.1))
transform_list = [transform_curr_front, transform_curr_back, transform_side_front, transform_side_back]
# Set vehicles selection
blueprint_library = world.get_blueprint_library()
bp_lib = []
bp_lib.extend(blueprint_library.filter('vehicle.nissan.*'))
bp_lib.extend(blueprint_library.filter('vehicle.audi.*'))
bp_lib.extend(blueprint_library.filter('vehicle.tesla.model3'))
# Spawn vehicles
vehicle_list = []
for vehicle_i in range(len(transform_list)):
# Vehicle attribute
bp = random.choice(bp_lib)
if bp.has_attribute('color'):
color = random.choice(bp.get_attribute('color').recommended_values)
bp.set_attribute('color', color)
# Vehicle location
transform = transform_list[vehicle_i]
# Spawn the vehicle
vehicle = world.try_spawn_actor(bp, transform)
if vehicle is not None:
vehicle_list.append(vehicle)
print('created %s' % vehicle.type_id)
else:
print('location for %s occupied' % vehicle.type_id)
pygame.time.wait(700)
for vehicle in vehicle_list:
vehicle.set_autopilot(True)
print("%s set to Autopilot mode." % vehicle.type_id)
return vehicle_list
```
#### File: Lane-Changing-for-Autonomous-Vehicles-in-CARLA-Simulator/environment/manual_control.py
```python
import sys
import glob
import os
try:
sys.path.append(glob.glob('../../CARLA_Simulator/PythonAPI/carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
try:
import pygame
from pygame.locals import KMOD_CTRL, KMOD_SHIFT
from pygame.locals import K_ESCAPE
from pygame.locals import K_BACKSPACE
from pygame.locals import K_F1
from pygame.locals import K_h
from pygame.locals import K_TAB
from pygame.locals import K_BACKQUOTE
from pygame.locals import K_c
from pygame.locals import K_p
from pygame.locals import K_l
from pygame.locals import K_t
from pygame.locals import K_UP, K_DOWN, K_LEFT, K_RIGHT
from pygame.locals import K_w, K_a, K_s, K_d
from pygame.locals import K_q
from pygame.locals import K_m
from pygame.locals import K_COMMA, K_PERIOD
from pygame.locals import K_SPACE
from pygame.locals import K_r
from pygame.locals import K_MINUS
from pygame.locals import K_EQUALS
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
"""
F1 : toggle HUD
H : toggle help
ESC : quit
C : change weather (Shift+C reverse)
TAB : change sensor position
` : change sensor
W/Up : throttle
S/Down : brake
AD/LeftRight : steer
Q : toggle reverse
Space : hand-brake
M : toggle manual transmission
,/. : gear up/down
P : toggle autopilot
L : toggle learning mode
T : train the model with existing data
Backspace : reborn
"""
# Configparser for Logitech Steering Wheel
if sys.version_info >= (3, 0):
from configparser import ConfigParser
else:
from ConfigParser import RawConfigParser as ConfigParser
import math
class KeyboardControl(object):
def __init__(self, world, start_in_autopilot=False):
self._autopilot_enabled = start_in_autopilot
self._learning_enabled = False
if isinstance(world.player, carla.Vehicle):
self._control = carla.VehicleControl()
world.enable_agent(start_in_autopilot)
else:
raise NotImplementedError("Actor type not supported")
'''
elif isinstance(world.player, carla.Walker):
self._control = carla.WalkerControl()
self._autopilot_enabled = False
self._rotation = world.player.get_transform().rotation
'''
self._steer_cache = 0.0
world.hud.notification("Press 'h' for help.", seconds=4.0)
def parse_events(self, client, world, clock):
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYDOWN:
# World setting
if self._is_quit_shortcut(event.key):
return True
elif event.key == K_BACKSPACE:
self._learning_enabled = False
world.restart()
elif event.key == K_TAB:
world.main_rgb_camera.toggle_camera()
elif event.key == K_BACKQUOTE:
world.main_rgb_camera.next_sensor()
elif event.key == K_c and pygame.key.get_mods() & KMOD_SHIFT:
world.next_weather(reverse=True)
elif event.key == K_c:
world.next_weather()
# Hud setting
elif event.key == K_F1:
world.hud.toggle_info()
elif event.key == K_h:
world.hud.help.toggle()
'''
# Record setting
elif event.key == K_r and not (pygame.key.get_mods() & KMOD_CTRL):
world.main_rgb_camera.toggle_recording()
elif event.key == K_r and (pygame.key.get_mods() & KMOD_CTRL):
if world.recording_enabled:
client.stop_recorder()
world.recording_enabled = False
world.hud.notification("Recorder is OFF")
else:
client.start_recorder("manual_recording.rec")
world.recording_enabled = True
world.hud.notification("Recorder is ON")
elif event.key == K_p and (pygame.key.get_mods() & KMOD_CTRL):
# stop recorder
client.stop_recorder()
world.recording_enabled = False
# work around to fix camera at start of replaying
currentIndex = world.main_rgb_camera.index
world.destroy_sensors()
# disable autopilot
self._autopilot_enabled = False
world.enable_agent(self._autopilot_enabled)
world.hud.notification("Replaying file 'manual_recording.rec'")
# replayer
client.replay_file("manual_recording.rec", world.recording_start, 0, 0)
world.main_rgb_camera.set_sensor(currentIndex)
elif event.key == K_MINUS and (pygame.key.get_mods() & KMOD_CTRL):
if pygame.key.get_mods() & KMOD_SHIFT:
world.recording_start -= 10
else:
world.recording_start -= 1
world.hud.notification("Recording start time is %d" % world.recording_start)
elif event.key == K_EQUALS and (pygame.key.get_mods() & KMOD_CTRL):
if pygame.key.get_mods() & KMOD_SHIFT:
world.recording_start += 10
else:
world.recording_start += 1
world.hud.notification("Recording start time is %d" % world.recording_start)
'''
# Control setting
if isinstance(self._control, carla.VehicleControl):
# gear
if event.key == K_q:
self._control.gear = 1 if self._control.reverse else -1
elif event.key == K_m:
self._control.manual_gear_shift = not self._control.manual_gear_shift
self._control.gear = world.player.get_control().gear
world.hud.notification('%s Transmission' %
('Manual' if self._control.manual_gear_shift else 'Automatic'))
elif self._control.manual_gear_shift and event.key == K_COMMA:
self._control.gear = max(-1, self._control.gear - 1)
elif self._control.manual_gear_shift and event.key == K_PERIOD:
self._control.gear = self._control.gear + 1
# autopilot mode
elif event.key == K_p and not (pygame.key.get_mods() & KMOD_CTRL):
self._autopilot_enabled = not self._autopilot_enabled
world.enable_agent(self._autopilot_enabled)
world.hud.notification('Autopilot %s' % ('On' if self._autopilot_enabled else 'Off'))
# learning mode
elif event.key == K_l and not self._autopilot_enabled:
self._learning_enabled = not self._learning_enabled
world.enable_learning(self._learning_enabled)
world.hud.notification('Learning %s' % ('On' if self._learning_enabled else 'Off'))
# train model
elif event.key == K_t:
world.agent.train_model()
world.hud.notification('Training Model...')
# send control signal
if not self._autopilot_enabled:
if isinstance(self._control, carla.VehicleControl):
self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())
self._control.reverse = self._control.gear < 0
'''
elif isinstance(self._control, carla.WalkerControl):
self._parse_walker_keys(pygame.key.get_pressed(), clock.get_time(), world)
'''
world.player.apply_control(self._control)
# Vehicle Control
def _parse_vehicle_keys(self, keys, milliseconds):
throttle_increment = 5e-4 * milliseconds
if keys[K_UP] or keys[K_w]:
self._control.throttle = min(abs(self._control.throttle + throttle_increment), 0.7)
else:
self._control.throttle = max(self._control.throttle - throttle_increment, 0.2)
steer_increment = 5e-4 * milliseconds
if keys[K_LEFT] or keys[K_a]:
if self._steer_cache > 0:
self._steer_cache = 0
else:
self._steer_cache -= steer_increment
elif keys[K_RIGHT] or keys[K_d]:
if self._steer_cache < 0:
self._steer_cache = 0
else:
self._steer_cache += steer_increment
else:
self._steer_cache = 0.0
self._steer_cache = min(0.7, max(-0.7, self._steer_cache))
self._control.steer = round(self._steer_cache, 1)
if keys[K_DOWN] or keys[K_s]:
self._control.brake = 0.8
self._control.throttle = 0.0
else:
self._control.brake = 0.0
self._control.hand_brake = keys[K_SPACE]
'''
def _parse_walker_keys(self, keys, milliseconds, world):
self._control.speed = 0.0
if keys[K_DOWN] or keys[K_s]:
self._control.speed = 0.0
if keys[K_LEFT] or keys[K_a]:
self._control.speed = .01
self._rotation.yaw -= 0.08 * milliseconds
if keys[K_RIGHT] or keys[K_d]:
self._control.speed = .01
self._rotation.yaw += 0.08 * milliseconds
if keys[K_UP] or keys[K_w]:
self._control.speed = world.player_max_speed_fast if pygame.key.get_mods() & KMOD_SHIFT else world.player_max_speed
self._control.jump = keys[K_SPACE]
self._rotation.yaw = round(self._rotation.yaw, 1)
self._control.direction = self._rotation.get_forward_vector()
'''
@staticmethod
def _is_quit_shortcut(key):
return key == K_ESCAPE
# Controller with Joystick
class DualControl(object):
def __init__(self, world, start_in_autopilot=False):
self._autopilot_enabled = start_in_autopilot
self._learning_enabled = False
if isinstance(world.player, carla.Vehicle):
self._control = carla.VehicleControl()
world.enable_agent(start_in_autopilot)
else:
raise NotImplementedError("Actor type not supported")
'''
elif isinstance(world.player, carla.Walker):
self._control = carla.WalkerControl()
self._autopilot_enabled = False
self._rotation = world.player.get_transform().rotation
'''
self._steer_cache = 0.0
world.hud.notification("Press 'h' for help.", seconds=4.0)
# initialize steering wheel
pygame.joystick.init()
joystick_count = pygame.joystick.get_count()
if joystick_count > 1:
raise ValueError("Please Connect Just One Joystick")
self._joystick = pygame.joystick.Joystick(0)
self._joystick.init()
self._parser = ConfigParser()
self._parser.read('wheel_config.ini')
self._steer_idx = 0
self._throttle_idx = 1
self._brake_idx = 2
self._reverse_idx = 9
self._handbrake_idx = 8
def parse_events(self, client, world, clock):
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.JOYBUTTONDOWN:
if event.button == 0:
world.restart()
elif event.button == 1:
world.hud.toggle_info()
elif event.button == 2:
world.camera_manager.toggle_camera()
elif event.button == 3:
world.next_weather()
elif event.button == self._reverse_idx:
self._control.gear = 1 if self._control.reverse else -1
elif event.button == 23:
world.camera_manager.next_sensor()
elif event.type == pygame.KEYDOWN:
# World setting
if self._is_quit_shortcut(event.key):
return True
elif event.key == K_BACKSPACE:
self._learning_enabled = False
world.restart()
elif event.key == K_TAB:
world.main_rgb_camera.toggle_camera()
elif event.key == K_BACKQUOTE:
world.main_rgb_camera.next_sensor()
elif event.key == K_c and pygame.key.get_mods() & KMOD_SHIFT:
world.next_weather(reverse=True)
elif event.key == K_c:
world.next_weather()
# Hud setting
elif event.key == K_F1:
world.hud.toggle_info()
elif event.key == K_h:
world.hud.help.toggle()
'''
# Record setting
elif event.key == K_r and not (pygame.key.get_mods() & KMOD_CTRL):
world.main_rgb_camera.toggle_recording()
elif event.key == K_r and (pygame.key.get_mods() & KMOD_CTRL):
if world.recording_enabled:
client.stop_recorder()
world.recording_enabled = False
world.hud.notification("Recorder is OFF")
else:
client.start_recorder("manual_recording.rec")
world.recording_enabled = True
world.hud.notification("Recorder is ON")
elif event.key == K_p and (pygame.key.get_mods() & KMOD_CTRL):
# stop recorder
client.stop_recorder()
world.recording_enabled = False
# work around to fix camera at start of replaying
currentIndex = world.main_rgb_camera.index
world.destroy_sensors()
# disable autopilot
self._autopilot_enabled = False
world.enable_agent(self._autopilot_enabled)
world.hud.notification("Replaying file 'manual_recording.rec'")
# replayer
client.replay_file("manual_recording.rec", world.recording_start, 0, 0)
world.main_rgb_camera.set_sensor(currsmv2_driventIndex)
elif event.key == K_MINUS and (pygame.key.get_mods() & KMOD_CTRL):
if pygame.key.get_mods() & KMOD_SHIFT:
world.recording_start -= 10
else:
world.recording_start -= 1
world.hud.notification("Recording start time is %d" % world.recording_start)
elif event.key == K_EQUALS and (pygame.key.get_mods() & KMOD_CTRL):
if pygame.key.get_mods() & KMOD_SHIFT:
world.recording_start += 10
else:
world.recording_start += 1
world.hud.notification("Recording start time is %d" % world.recording_start)
'''
# Control setting
if isinstance(self._control, carla.VehicleControl):
# gear
if event.key == K_q:
self._control.gear = 1 if self._control.reverse else -1
elif event.key == K_m:
self._control.manual_gear_shift = not self._control.manual_gear_shift
self._control.gear = world.player.get_control().gear
world.hud.notification('%s Transmission' %
('Manual' if self._control.manual_gear_shift else 'Automatic'))
elif self._control.manual_gear_shift and event.key == K_COMMA:
self._control.gear = max(-1, self._control.gear - 1)
elif self._control.manual_gear_shift and event.key == K_PERIOD:
self._control.gear = self._control.gear + 1
# autopilot mode
elif event.key == K_p and not (pygame.key.get_mods() & KMOD_CTRL):
self._autopilot_enabled = not self._autopilot_enabled
world.enable_agent(self._autopilot_enabled)
world.hud.notification('Autopilot %s' % ('On' if self._autopilot_enabled else 'Off'))
# learning mode
elif event.key == K_l and not self._autopilot_enabled:
self._learning_enabled = not self._learning_enabled
world.enable_learning(self._learning_enabled)
world.hud.notification('Learning %s' % ('On' if self._learning_enabled else 'Off'))
# train model
elif event.key == K_t:
world.agent.train_model()
world.hud.notification('Training Model...')
# send control signal
if not self._autopilot_enabled:
if isinstance(self._control, carla.VehicleControl):
self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())
self._parse_vehicle_wheel()
self._control.reverse = self._control.gear < 0
'''
elif isinstance(self._control, carla.WalkerControl):
self._parse_walker_keys(pygame.key.get_pressed(), clock.get_time(), world)
'''
world.player.apply_control(self._control)
# Keyboard Control
def _parse_vehicle_keys(self, keys, milliseconds):
throttle_increment = 5e-4 * milliseconds
if keys[K_UP] or keys[K_w]:
self._control.throttle = min(abs(self._control.throttle + throttle_increment), 0.7)
else:
self._control.throttle = max(self._control.throttle - throttle_increment, 0.2)
steer_increment = 5e-4 * milliseconds
if keys[K_LEFT] or keys[K_a]:
if self._steer_cache > 0:
self._steer_cache = 0
else:
self._steer_cache -= steer_increment
elif keys[K_RIGHT] or keys[K_d]:
if self._steer_cache < 0:
self._steer_cache = 0
else:
self._steer_cache += steer_increment
else:
self._steer_cache = 0.0
self._steer_cache = min(0.7, max(-0.7, self._steer_cache))
self._control.steer = round(self._steer_cache, 1)
if keys[K_DOWN] or keys[K_s]:
self._control.brake = 0.8
self._control.throttle = 0.0
else:
self._control.brake = 0.0
self._control.hand_brake = keys[K_SPACE]
'''
def _parse_walker_keys(self, keys, milliseconds, world):
self._control.speed = 0.0
if keys[K_DOWN] or keys[K_s]:
self._control.speed = 0.0
if keys[K_LEFT] or keys[K_a]:
self._control.speed = .01
self._rotation.yaw -= 0.08 * milliseconds
if keys[K_RIGHT] or keys[K_d]:
self._control.speed = .01
self._rotation.yaw += 0.08 * milliseconds
if keys[K_UP] or keys[K_w]:
self._control.speed = world.player_max_speed_fast if pygame.key.get_mods() & KMOD_SHIFT else world.player_max_speed
self._control.jump = keys[K_SPACE]
self._rotation.yaw = round(self._rotation.yaw, 1)
self._control.direction = self._rotation.get_forward_vector()
'''
# Joystick control
def _parse_vehicle_wheel(self):
numAxes = self._joystick.get_numaxes()
jsInputs = [float(self._joystick.get_axis(i)) for i in range(numAxes)]
# print (jsInputs)
jsButtons = [float(self._joystick.get_button(i)) for i in
range(self._joystick.get_numbuttons())]
# Custom function to map range of inputs [1, -1] to outputs [0, 1] i.e 1 from inputs means nothing is pressed
# For the steering, it seems fine as it is
K1 = 0.2 # 0.55
steerCmd = K1 * math.tan(1.1 * jsInputs[self._steer_idx])
K2 = 1.6 # 1.6
throttleCmd = K2 + (2.05 * math.log10(
-0.7 * jsInputs[self._throttle_idx] + 1.4) - 1.2) / 0.92
if throttleCmd <= 0:
throttleCmd = 0
elif throttleCmd > 1:
throttleCmd = 1
brakeCmd = 1.6 + (2.05 * math.log10(
-0.7 * jsInputs[self._brake_idx] + 1.4) - 1.2) / 0.92
if brakeCmd <= 0:
brakeCmd = 0
elif brakeCmd > 1:
brakeCmd = 1
self._control.steer = steerCmd
self._control.brake = brakeCmd
self._control.throttle = throttleCmd
#toggle = jsButtons[self._reverse_idx]
self._control.hand_brake = bool(jsButtons[self._handbrake_idx])
@staticmethod
def _is_quit_shortcut(key):
return key == K_ESCAPE
```
#### File: simplemotion-drive/carla_utils/CameraManager.py
```python
import pygame
import numpy as np
import weakref
from ._utils import get_actor_display_name
import carla
from carla import ColorConverter as cc
# ==============================================================================
# -- CameraManager -------------------------------------------------------------
# ==============================================================================
class CameraManager(object):
def __init__(self, parent_actor, hud):
self.sensor = None
self.surface = None
self._parent = parent_actor
self.hud = hud
self.recording = False
self._camera_transforms = [
carla.Transform(carla.Location(x=-5.5, z=2.8),
carla.Rotation(pitch=-15)),
carla.Transform(carla.Location(x=1.6, z=1.7))]
self.transform_index = 1
self.sensors = [
['sensor.camera.rgb', cc.Raw, 'Camera RGB'],
['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)'],
['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)'],
['sensor.camera.depth', cc.LogarithmicDepth,
'Camera Depth (Logarithmic Gray Scale)'],
['sensor.camera.semantic_segmentation', cc.Raw,
'Camera Semantic Segmentation (Raw)'],
['sensor.camera.semantic_segmentation', cc.CityScapesPalette,
'Camera Semantic Segmentation (CityScapes Palette)'],
['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)']]
world = self._parent.get_world()
bp_library = world.get_blueprint_library()
for item in self.sensors:
bp = bp_library.find(item[0])
if item[0].startswith('sensor.camera'):
bp.set_attribute('image_size_x', str(hud.dim[0]))
bp.set_attribute('image_size_y', str(hud.dim[1]))
elif item[0].startswith('sensor.lidar'):
bp.set_attribute('range', '50')
item.append(bp)
self.index = None
def toggle_camera(self):
self.transform_index = (self.transform_index +
1) % len(self._camera_transforms)
self.sensor.set_transform(
self._camera_transforms[self.transform_index])
def set_sensor(self, index, notify=True):
index = index % len(self.sensors)
needs_respawn = True if self.index is None \
else self.sensors[index][0] != self.sensors[self.index][0]
if needs_respawn:
if self.sensor is not None:
self.sensor.destroy()
self.surface = None
self.sensor = self._parent.get_world().spawn_actor(
self.sensors[index][-1],
self._camera_transforms[self.transform_index],
attach_to=self._parent)
# We need to pass the lambda a weak reference to self to avoid
# circular reference.
weak_self = weakref.ref(self)
self.sensor.listen(
lambda image: CameraManager._parse_image(weak_self, image))
if notify:
self.hud.notification(self.sensors[index][2])
self.index = index
def next_sensor(self):
self.set_sensor(self.index + 1)
def toggle_recording(self):
self.recording = not self.recording
self.hud.notification('Recording %s' %
('On' if self.recording else 'Off'))
def render(self, display):
if self.surface is not None:
display.blit(self.surface, (0, 0))
@staticmethod
def _parse_image(weak_self, image):
self = weak_self()
if not self:
return
if self.sensors[self.index][0].startswith('sensor.lidar'):
points = np.frombuffer(image.raw_data, dtype=np.dtype('f4'))
points = np.reshape(points, (int(points.shape[0] / 3), 3))
lidar_data = np.array(points[:, :2])
lidar_data *= min(self.hud.dim) / 100.0
lidar_data += (0.5 * self.hud.dim[0], 0.5 * self.hud.dim[1])
lidar_data = np.fabs(lidar_data) # pylint: disable=E1111
lidar_data = lidar_data.astype(np.int32)
lidar_data = np.reshape(lidar_data, (-1, 2))
lidar_img_size = (self.hud.dim[0], self.hud.dim[1], 3)
lidar_img = np.zeros(lidar_img_size)
lidar_img[tuple(lidar_data.T)] = (255, 255, 255)
self.surface = pygame.surfarray.make_surface(lidar_img)
else:
image.convert(self.sensors[self.index][1])
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if self.recording:
image.save_to_disk('_out/%08d' % image.frame)
``` |
{
"source": "joeyzhong90595/Robotic_Poker_Dealer",
"score": 3
} |
#### File: Robotic_Poker_Dealer/src/test_main.py
```python
from s_dealer.srv import StartGameFlag, StartGameFlagResponse, GetInstruction
from sensor_msgs.msg import Image, PointCloud2
from cv_bridge import CvBridge
import sensor_msgs.point_cloud2 as pc2
from player_recognition import face_rec
from areas_determination import det_areas
import pcl
import numpy as np
import time
import rospy
def get_signal():
rospy.wait_for_service("get_instruction")
try:
signal = rospy.ServiceProxy('get_instruction', GetInstruction)
result = signal()
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return result
# Game Procedure
def handle_game(req):
print "Waiting for instruction"
start_signal = 0
while start_signal != 2:
start_signal = get_signal().result
print "Game Starts"
# Determine number of players
# Get rgb image from kinect
bridge = CvBridge()
image_message = rospy.wait_for_message("/camera/rgb/image_color", Image)
rbg_image = bridge.imgmsg_to_cv2(image_message, desired_encoding="passthrough")
# Get pointclouds from kinect
data =rospy.wait_for_message("/camera/depth_registered/points", PointCloud2)
pc = pc2.read_points(data, skip_nans=True, field_names=("x", "y", "z"))
pc_list = []
for point in pc:
pc_list.append( [point[0],point[1],point[2]] )
p = pcl.PointCloud()
p.from_list(pc_list)
# Perform Facial Recognition
print "Start Facial Recognition"
loc = face_rec(rbg_image)
'''
if len(loc) < 2:
print "There is only", len(loc), "player."
return StartGameFlagResponse("Not enough players")'''
# Determine players area
print "Start Players Area Detection"
players_info = det_areas(rbg_image, p, loc)
print(players_info)
# Deal the first two facing-down cards
print "First deal"
time.sleep(2)
print "Waiting for instruction"
# Acquire instruction
game_continue = 0
while game_continue == 0:
game_continue = get_signal().result
if game_continue == 1:
return StartGameFlagResponse("Game Ends after the first deal")
# Deal the first three facing-up cards
print "Second deal"
time.sleep(2)
print "Waiting for instruction"
# Acquire instruction
game_continue = 0
while game_continue == 0:
game_continue = get_signal().result
if game_continue == 1:
return StartGameFlagResponse("Game Ends after second deal")
# Deal the forth facing-up cards
print "Third deal"
time.sleep(2)
print "Waiting for instruction"
# Acquire instruction
game_continue = 0
while game_continue == 0:
game_continue = get_signal().result
if game_continue == 1:
return StartGameFlagResponse("Game Ends after third deal")
# Deal the last facing-up cards
print "Final deal"
return StartGameFlagResponse("Game Ends after all deals")
# Connector services
def main():
rospy.init_node('s_dealer')
s = rospy.Service('start_game', StartGameFlag, handle_game)
print "Ready to start"
rospy.spin()
if __name__ == "__main__":
main()
``` |
{
"source": "joeyzhou85/python",
"score": 3
} |
#### File: python/ciphers/onepad_cipher.py
```python
from __future__ import print_function
import random
class Onepad:
def encrypt(self, text):
'''Function to encrypt text using psedo-random numbers'''
plain = [ord(i) for i in text]
key = []
cipher = []
for i in plain:
k = random.randint(1, 300)
c = (i+k)*k
cipher.append(c)
key.append(k)
return cipher, key
def decrypt(self, cipher, key):
'''Function to decrypt text using psedo-random numbers.'''
plain = []
for i in range(len(key)):
p = int((cipher[i]-(key[i])**2)/key[i])
plain.append(chr(p))
plain = ''.join([i for i in plain])
return plain
if __name__ == '__main__':
c, k = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
```
#### File: data_structures/stacks/next_greater_element.py
```python
from __future__ import print_function
# Function to print element and NGE pair for all elements of list
def printNGE(arr):
for i in range(0, len(arr), 1):
next = -1
for j in range(i+1, len(arr), 1):
if arr[i] < arr[j]:
next = arr[j]
break
print(str(arr[i]) + " -- " + str(next))
# Driver program to test above function
arr = [11,13,21,3]
printNGE(arr)
```
#### File: python/dynamic_programming/longest_common_subsequence.py
```python
from __future__ import print_function
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def lcs_dp(x, y):
# find the length of strings
m = len(x)
n = len(y)
# declaring the array for storing the dp values
L = [[None] * (n + 1) for i in xrange(m + 1)]
seq = []
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
L[i][j] = 0
elif x[i - 1] == y[ j - 1]:
L[i][j] = L[i - 1][j - 1] + 1
seq.append(x[i -1])
else:
L[i][j] = max(L[i - 1][j], L[i][j - 1])
# L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]
return L[m][n], seq
if __name__=='__main__':
x = 'AGGTAB'
y = 'GXTXAYB'
print(lcs_dp(x, y))
```
#### File: python/dynamic_programming/longest_increasing_subsequence_o(nlogn).py
```python
from __future__ import print_function
#############################
# Author: <NAME>
# File: lis.py
# comments: This programme outputs the Longest Strictly Increasing Subsequence in O(NLogN)
# Where N is the Number of elements in the list
#############################
def CeilIndex(v,l,r,key):
while r-l > 1:
m = (l + r)/2
if v[m] >= key:
r = m
else:
l = m
return r
def LongestIncreasingSubsequenceLength(v):
if(len(v) == 0):
return 0
tail = [0]*len(v)
length = 1
tail[0] = v[0]
for i in range(1,len(v)):
if v[i] < tail[0]:
tail[0] = v[i]
elif v[i] > tail[length-1]:
tail[length] = v[i]
length += 1
else:
tail[CeilIndex(tail,-1,length-1,v[i])] = v[i]
return length
if __name__ == "__main__":
v = [2, 5, 3, 7, 11, 8, 10, 13, 6]
print(LongestIncreasingSubsequenceLength(v))
```
#### File: project_euler/problem_07/sol3.py
```python
from __future__ import print_function
import math
import itertools
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
def primeCheck(number):
if number % 2 == 0 and number > 2:
return False
return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2))
def prime_generator():
num = 2
while True:
if primeCheck(num):
yield num
num += 1
def solution(n):
"""Returns the n-th prime number.
>>> solution(6)
13
>>> solution(1)
2
>>> solution(3)
5
>>> solution(20)
71
>>> solution(50)
229
>>> solution(100)
541
"""
return next(itertools.islice(prime_generator(), n - 1, n))
if __name__ == "__main__":
print(solution(int(raw_input().strip())))
```
#### File: project_euler/problem_11/sol2.py
```python
from __future__ import print_function
import os
try:
xrange # Python 2
except NameError:
xrange = range # Python 2
def solution():
"""Returns the sum of all the multiples of 3 or 5 below n.
>>> solution()
70600674
"""
with open(os.path.dirname(__file__) + "/grid.txt") as f:
l = []
for i in xrange(20):
l.append([int(x) for x in f.readline().split()])
maximum = 0
# right
for i in xrange(20):
for j in xrange(17):
temp = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
maximum = temp
# down
for i in xrange(17):
for j in xrange(20):
temp = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
maximum = temp
# diagonal 1
for i in xrange(17):
for j in xrange(17):
temp = (
l[i][j]
* l[i + 1][j + 1]
* l[i + 2][j + 2]
* l[i + 3][j + 3]
)
if temp > maximum:
maximum = temp
# diagonal 2
for i in xrange(17):
for j in xrange(3, 20):
temp = (
l[i][j]
* l[i + 1][j - 1]
* l[i + 2][j - 2]
* l[i + 3][j - 3]
)
if temp > maximum:
maximum = temp
return maximum
if __name__ == "__main__":
print(solution())
```
#### File: python/searches/sentinel_linear_search.py
```python
def sentinel_linear_search(sequence, target):
"""Pure implementation of sentinel linear search algorithm in Python
:param sequence: some sequence with comparable items
:param target: item value to search
:return: index of found item or None if item is not found
Examples:
>>> sentinel_linear_search([0, 5, 7, 10, 15], 0)
0
>>> sentinel_linear_search([0, 5, 7, 10, 15], 15)
4
>>> sentinel_linear_search([0, 5, 7, 10, 15], 5)
1
>>> sentinel_linear_search([0, 5, 7, 10, 15], 6)
"""
sequence.append(target)
index = 0
while sequence[index] != target:
index += 1
sequence.pop()
if index == len(sequence):
return None
return index
if __name__ == '__main__':
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
user_input = raw_input('Enter numbers separated by comma:\n').strip()
sequence = [int(item) for item in user_input.split(',')]
target_input = raw_input('Enter a single number to be found in the list:\n')
target = int(target_input)
result = sentinel_linear_search(sequence, target)
if result is not None:
print('{} found at positions: {}'.format(target, result))
else:
print('Not found')
```
#### File: python/sorts/comb_sort.py
```python
def comb_sort(data):
"""Pure implementation of comb sort algorithm in Python
:param collection: some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
Examples:
>>> comb_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> comb_sort([])
[]
>>> comb_sort([-2, -5, -45])
[-45, -5, -2]
"""
shrink_factor = 1.3
gap = len(data)
swapped = True
i = 0
while gap > 1 or swapped:
# Update the gap value for a next comb
gap = int(float(gap) / shrink_factor)
swapped = False
i = 0
while gap + i < len(data):
if data[i] > data[i+gap]:
# Swap values
data[i], data[i+gap] = data[i+gap], data[i]
swapped = True
i += 1
return data
if __name__ == '__main__':
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
user_input = raw_input('Enter numbers separated by a comma:\n').strip()
unsorted = [int(item) for item in user_input.split(',')]
print(comb_sort(unsorted))
```
#### File: python/sorts/cycle_sort.py
```python
from __future__ import print_function
def cycle_sort(array):
ans = 0
# Pass through the array to find cycles to rotate.
for cycleStart in range(0, len(array) - 1):
item = array[cycleStart]
# finding the position for putting the item.
pos = cycleStart
for i in range(cycleStart + 1, len(array)):
if array[i] < item:
pos += 1
# If the item is already present-not a cycle.
if pos == cycleStart:
continue
# Otherwise, put the item there or right after any duplicates.
while item == array[pos]:
pos += 1
array[pos], item = item, array[pos]
ans += 1
# Rotate the rest of the cycle.
while pos != cycleStart:
# Find where to put the item.
pos = cycleStart
for i in range(cycleStart + 1, len(array)):
if array[i] < item:
pos += 1
# Put the item there or right after any duplicates.
while item == array[pos]:
pos += 1
array[pos], item = item, array[pos]
ans += 1
return ans
# Main Code starts here
if __name__ == '__main__':
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
user_input = raw_input('Enter numbers separated by a comma:\n')
unsorted = [int(item) for item in user_input.split(',')]
n = len(unsorted)
cycle_sort(unsorted)
print("After sort : ")
for i in range(0, n):
print(unsorted[i], end=' ')
```
#### File: python/sorts/gnome_sort.py
```python
from __future__ import print_function
def gnome_sort(unsorted):
"""Pure implementation of the gnome sort algorithm in Python."""
if len(unsorted) <= 1:
return unsorted
i = 1
while i < len(unsorted):
if unsorted[i - 1] <= unsorted[i]:
i += 1
else:
unsorted[i - 1], unsorted[i] = unsorted[i], unsorted[i - 1]
i -= 1
if (i == 0):
i = 1
if __name__ == '__main__':
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
user_input = raw_input('Enter numbers separated by a comma:\n').strip()
unsorted = [int(item) for item in user_input.split(',')]
gnome_sort(unsorted)
print(unsorted)
``` |
{
"source": "joeyzhou98/boutiques",
"score": 2
} |
#### File: boutiques/tests/test_example_flag.py
```python
import os
import subprocess
import pytest
from unittest import TestCase
from boutiques import __file__ as bfile
import boutiques as bosh
class TestExampleFlag(TestCase):
def test_example_flag_1(self):
ex_dir = os.path.join(os.path.dirname(bfile),
"schema", "examples", "example-flag")
ret = bosh.execute("simulate",
os.path.join(ex_dir, "example-flag.json"),
"-i",
os.path.join(ex_dir, "i1.json"))
self.assertEqual(ret.shell_command.strip(), "/bin/true -a -b")
def test_example_flag_2(self):
ex_dir = os.path.join(os.path.dirname(bfile),
"schema", "examples", "example-flag")
ret = bosh.execute("simulate",
os.path.join(ex_dir, "example-flag.json"),
"-i",
os.path.join(ex_dir, "i2.json"))
self.assertEqual(ret.shell_command.strip(), "/bin/true")
def test_example_flag_3(self):
self.maxDiff = None
ex_dir = os.path.join(os.path.dirname(bfile),
"schema", "examples", "example-flag")
ret = bosh.execute("simulate",
os.path.join(ex_dir, "example-flag.json"),
"-i",
os.path.join(ex_dir, "i3.json"))
self.assertEqual(ret.shell_command.replace(" ", " ").strip(),
"/bin/true -b")
def test_example_flag_4(self):
self.maxDiff = None
test_desc = os.path.join(
os.path.split(bfile)[0],
'schema/examples/example-flag/example-flag.json')
test_invocation = os.path.join(
os.path.split(bfile)[0],
'schema/examples/example-flag/i4.json')
command = ("bosh exec simulate " +
test_desc + " -i " + test_invocation)
process = subprocess.Popen(command, shell=True,
stderr=subprocess.PIPE)
stderr = process.stderr.read()[-60:].decode("utf-8").strip()
self.assertTrue(
"dash_b (False) flag is set to true or otherwise omitted" in
stderr)
``` |
{
"source": "joeyzhou98/ShopTheLook",
"score": 3
} |
#### File: app/api/vision_script.py
```python
from google.cloud import vision
import os
# set path to api key here
""" INPUT PATH TO JSON API KEY"""
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="g_credentials.json"
uri = "https://media.endclothing.com/media/catalog/product/0/6/06-12-2017_adidas_ultraboost_coreblack_bb6166_mg_1.jpg"
uri2 = "https://static.nike.com/a/images/t_PDP_864_v1/f_auto,b_rgb:f5f5f5/gsuin11ptg5qgktmzoat/air-force-1-07-shoe-KyTDGepj.jpg"
# detect_web_uri(uri)
def best_match_uri(uri):
""" find best matching product from the given uri and returns best match and additional searches """
client = vision.ImageAnnotatorClient()
image = vision.Image()
image.source.image_uri = uri
response = client.web_detection(image=image)
annotations = response.web_detection
best_web_entity = annotations.web_entities[0]
best_matching_pages = annotations.pages_with_matching_images[0]
result = {}
result['best_web_entity'] = best_web_entity
result['best_matching_pages'] = annotations.pages_with_matching_images
return result
def best_match_uploaded_img(img):
"""Detects web annotations given an image."""
from google.cloud import vision
import io
client = vision.ImageAnnotatorClient()
image = vision.Image(content=img)
response = client.web_detection(image=image)
# print(response)
annotations = response.web_detection
best_web_entity = annotations.web_entities[0]
best_matching_pages = annotations.pages_with_matching_images[0]
result = {}
result['best_web_entity'] = best_web_entity
result['best_matching_pages'] = annotations.pages_with_matching_images
return response
# best_web_entity = best_match_uri(uri)['best_web_entity']
# best_matching_pages = best_match_uri(uri)['best_matching_pages']
# print("The product is " , best_web_entity.description , " with a score of " , best_web_entity.score)
# print("You can find it here ", best_matching_pages[0].url)
# print("\nIf that was not the exact product, here are" , str(len(best_matching_pages)) ," additional searches:\n")
# for item in best_matching_pages:
# print('\n' , item.page_title, ':' , item.url, '\n')
# detect_web('images/woman_fashion.jpg')
``` |
{
"source": "joeyzhou98/TheStars354",
"score": 3
} |
#### File: app/api/resources.py
```python
from datetime import datetime
import os
import tempfile
import json
from app import config, mail
from flask import request, jsonify, abort, render_template
from flask_restplus import Resource, fields
from flask_mail import Message
import boto3
import boto3.s3
from flask_jwt_extended import (jwt_required, create_access_token,
jwt_refresh_token_required, create_refresh_token,
get_jwt_identity, set_access_cookies,
set_refresh_cookies, get_raw_jwt, unset_access_cookies,
unset_refresh_cookies, get_jwt_claims)
from app import jwt
from .security import generate_encoded_token, decode_token, admin_required
from . import api_rest
from .models import *
resource = api_rest.namespace('resource', description='Resource namespace')
authentication = api_rest.namespace('authentication', description='Authentication namespace')
# class SecureResource(Resource):
# """ Calls require_auth decorator on all requests """
# method_decorators = [require_auth]
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return RevokedTokenModel.is_in_blacklist(jti)
@authentication.route('/registration', doc={
"description": "Registration route, will store an access token and a refresh token in cookies if registration is successful"})
@resource.doc(params={'username': "user name for the new user, will return 400 it already exists in database.",
'email': "email for the new user, will return 400 if it already exists in database.",
'password': "<PASSWORD>."})
class UserRegistration(Resource):
def post(self):
username = request.args['username']
email = request.args['email']
password = request.args['password']
if username is None or email is None or password is None:
abort(400, "Invalid username or email.")
if UserAuthModel.find_by_username(username):
abort(400, "User name {} is already taken.".format(username))
if UserAuthModel.find_by_useremail(email):
abort(400, "Email {} is already used for another user".format(email))
new_user = UserAuthModel(username=username, useremail=email, password=password)
try:
new_user.save_to_db()
access_token = create_access_token(identity=username, user_claims={'role': new_user.role.value})
refresh_token = create_refresh_token(identity=username)
resp = jsonify({
"success": True,
"id": new_user.uid,
"username": new_user.username,
"email": new_user.useremail,
"role": new_user.role.value})
set_access_cookies(resp, access_token)
set_refresh_cookies(resp, refresh_token)
msg = Message("Your account is created! - 354TheStars.com",
recipients=[email])
msg.html = render_template('AccountCreation.html', username=username)
mail.send(msg)
return resp
except:
return jsonify(success=False), 500
@authentication.route('/login', doc={
"description": "Login route, will store an access token and a refresh token in the cookies if login is successful"})
@resource.doc(params={'username': "user name of the user, will return 404 if it doesn't exist in database.",
'password': "<PASSWORD>, will return 404 if it doesn't match with the password stored in db."})
class UserLogin(Resource):
def post(self):
username = request.args['username']
password = request.args['password']
current_user = UserAuthModel.find_by_username(username)
if not current_user:
abort(404, "User with username {} not found".format(username))
if current_user.password == password:
access_token = create_access_token(identity=username, user_claims={'role': current_user.role.value})
refresh_token = create_refresh_token(identity=username)
resp = jsonify({
"success": True,
"id": current_user.uid,
"username": current_user.username,
"email": current_user.useremail,
"role": current_user.role.value})
set_access_cookies(resp, access_token)
set_refresh_cookies(resp, refresh_token)
return resp
abort(404, "Password for user {} is not correct".format(username))
@authentication.route('/logout/access', doc={
"description": "access token logout route, will put access token into token blacklist if successfully executed, access token needed."})
class LogoutAccess(Resource):
@jwt_required
def post(self):
jti = get_raw_jwt()['jti']
try:
revoked_token = RevokedTokenModel(jti=jti)
revoked_token.add()
resp = jsonify(success=True)
unset_access_cookies(resp)
return resp
except:
return jsonify(success=False), 500
@authentication.route('/logout/refresh', doc={
"description": "refresh token logout route, will put refresh token into token blacklist if successfully executed, refresh token needed."})
class LogoutRefresh(Resource):
@jwt_refresh_token_required
def post(self):
jti = get_raw_jwt()['jti']
try:
revoked_token = RevokedTokenModel(jti=jti)
revoked_token.add()
resp = jsonify(success=True)
unset_refresh_cookies(resp)
return resp
except:
return jsonify(success=False), 500
@authentication.route('/token/refresh', doc={
"description": "access token refresh route, will generate a new access token for the user, refresh token needed."})
class TokenRefresh(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
user = UserAuthModel.find_by_username(current_user)
access_token = create_access_token(identity=current_user, user_claims={'role': user.role.value})
resp = jsonify(success=True)
set_access_cookies(resp, access_token)
return resp
@authentication.route('/password/forget', doc={"description": "This route will send an email to the validated email address with a link to rest password, part of the link is jwt token"})
class ForgetPassword(Resource):
@resource.doc(params={'email': "email for the new user, will return 404 if it doesn't exist in database."})
def post(self):
email = request.args['email']
user = UserAuthModel.find_by_useremail(email)
if user is None:
abort(404, "We weren't able to identify you given the email provided.")
payload = {"username": user.username, "useremail": user.useremail}
encoded_token = generate_encoded_token(payload, 'secret', algorithm='HS256')
# print(encoded_token)
password_reset_url = 'https://thestars354.herokuapp.com/#/changePassword/'+encoded_token.decode("utf-8")+'/'+user.username
msg = Message("Reset password - <PASSWORD>",
recipients=[email])
msg.html = render_template('ResetPasswordEmail.html', username=user.username, link=password_reset_url)
mail.send(msg)
return jsonify(success=True)
@authentication.route('/changePassword/<string:token>', doc={"description": "This route will check the token in url, return success if validated."})
class CheckTokenInEmail(Resource):
def get(self, token):
decoded_payload = decode_token(token, 'secret', algorithm=['HS256'])
username = decoded_payload['username']
email = decoded_payload['useremail']
current_user = UserAuthModel.find_by_username(username)
if not current_user:
abort(404, "User with username {} not found".format(username))
if current_user.useremail != email:
abort(404, "Invalid token.")
return jsonify(success=True)
@authentication.route('/changePassword', doc={"description": "This route will update the password for a user"})
class ResetPassword(Resource):
@resource.doc(params={'username': "username for the account, will return 404 if it doesn't exist in database.",
'password': "<PASSWORD>"})
def put(self):
username = request.args['username']
password = request.args['password']
current_user = UserAuthModel.find_by_username(username)
if not current_user:
abort(404, "User with username {} not found".format(username))
current_user.password = password
db.session.commit()
return jsonify(success=True)
@authentication.route('/allUser', doc={"description": "Get the all users info in db, needs admin status"})
class AllUserInfo(Resource):
@admin_required
def get(self):
users = UserAuthModel.query.all()
return [i.serialize for i in users]
@authentication.route('/deleteUser/<string:username>', doc={"description": "This route will delete a user from the datebase, admin status needed"})
class DeleteUser(Resource):
@admin_required
def delete(self, username):
user = UserAuthModel.find_by_username(username)
if user is None:
abort(404, "User with username {} not found".format(username))
uid = user.uid
buyer_info = BuyerModel.find_by_uid(uid)
seller_info = SellerModel.find_by_uid(uid)
if buyer_info is None or seller_info is None:
abort(404, "Seller info or buyer info with uid {} not found".format(uid))
orders = Order.find_by_buyer_id(buyer_info.uid)
# clean up the database that relate to the user's buyer status
db.session.query(wishListItem).\
filter_by(buyer_id=buyer_info.uid).delete(synchronize_session=False)
db.session.query(shoppingListItem). \
filter_by(buyer_id=buyer_info.uid).delete(synchronize_session=False)
for order in orders:
db.session.query(orderItem).\
filter_by(order_id=order.order_id).delete(synchronize_session=False)
db.session.query(orderSeller).\
filter_by(order_id=order.order_id).delete(synchronize_session=False)
db.session.query(Review).\
filter_by(buyer_id=buyer_info.uid).update({"buyer_id": None}, synchronize_session=False)
# clean up the database that relate to the user's seller status
db.session.query(orderSeller). \
filter_by(seller_id=seller_info.uid).update({"seller_id": None}, synchronize_session=False)
db.session.query(Item).\
filter_by(seller_id=seller_info.uid).update({"seller_id": None}, synchronize_session=False)
db.session.query(Order). \
filter_by(buyer_id=buyer_info.uid).delete(synchronize_session=False)
db.session.query(BuyerModel). \
filter_by(uid=uid).delete(synchronize_session=False)
db.session.query(SellerModel). \
filter_by(uid=uid).delete(synchronize_session=False)
db.session.delete(user)
db.session.commit()
return jsonify(success=True)
@resource.route('/user/<int:uid>', doc={"description": "Search and return user name and email"})
class UserInfo(Resource):
@jwt_required
def get(self, uid):
current_user = UserAuthModel.find_by_uid(uid)
if not current_user:
return jsonify(success=False)
result = {'uid': uid, 'username': current_user.username, 'email': current_user.useremail}
return result
@resource.route('/user/<int:uid>/ordered/<int:item_id>', doc={"description": "Determines if user has ordered a particular item"})
class UserInfo(Resource):
def get(self, uid, item_id):
user_id = UserAuthModel.find_by_uid(uid).uid
if not user_id:
abort(404, "User for uid {} not found".format(uid))
if not Item.item_exists(item_id):
abort(404, "Item for item id {} not found".format(item_id))
orders = Order.query.filter(Order.buyer_id == uid).all()
item_ids = []
for order in orders:
for item in order.serialize["items"]:
item_ids.append(item["item"]["item_id"])
return jsonify(True) if item_id in item_ids else jsonify(False)
@resource.route('/user/<int:uid>/reviewed/<int:item_id>', doc={"description": "Determines if user has reviewed a particular item"})
class UserInfo(Resource):
def get(self, uid, item_id):
if not UserAuthModel.find_by_uid(uid):
abort(404, "User for uid {} not found".format(uid))
if not Item.item_exists(item_id):
abort(404, "Item for item id {} not found".format(item_id))
count = Review.query.filter(Review.buyer_id == uid).filter(Review.item_id == item_id).count()
return jsonify(False) if count == 0 else jsonify(True)
@resource.route('/buyerInfo', doc={
"description": "Search and return buyer data that match the queried user name, access token needed"})
@resource.doc(params={'uid': "uid of the user"})
class BuyerInfo(Resource):
@jwt_required
def get(self):
uid = request.args.get('uid')
buyerInfo = BuyerModel.find_by_uid(uid)
if buyerInfo is None:
abort(404, "Buyer info for uid {} not found".format(uid))
return jsonify(buyerInfo.serialize)
@resource.route('/updateAddress/<int:uid>/<int:address_index>', doc={"description": "Update the i-th address of the user."})
@resource.doc(params={'newAddress': "new address for the i-th address"})
class UpdateAddress(Resource):
@jwt_required
def put(self, uid, address_index):
address = 'address'+str(address_index)
db.session.query(BuyerModel) \
.filter(BuyerModel.uid == uid). \
update({address: request.args.get('newAddress')})
db.session.commit()
return jsonify(success=True)
@resource.route('/updatePaypal/<int:uid>', doc={"description": "Update user's paypal account"})
@resource.doc(params={'paypal': "new paypal account"})
class UpdatePaypal(Resource):
@jwt_required
def put(self, uid):
db.session.query(BuyerModel) \
.filter(BuyerModel.uid == uid). \
update({"paypal": request.args.get('paypal')})
db.session.commit()
return jsonify(success=True)
@resource.route('/sellerInfo', doc={
"description": "Search and return seller data that match the queried user uid"})
@resource.doc(params={'uid': "uid of the seller"})
class SellerInfo(Resource):
def get(self):
uid = request.args.get('uid')
sellerName = UserAuthModel.find_by_uid(uid).serialize["username"]
sellerInfo = SellerModel.find_by_uid(uid)
if sellerInfo is None:
abort(404, "Seller info for id {} not found".format(uid))
sellerInfo = sellerInfo.serialize
sellerInfo.update({"seller_name": sellerName})
return jsonify(sellerInfo)
@resource.route('/search', doc={
"description": "Search and return items that match the search query string, returns all items if search query is empty"})
@resource.doc(params={'query': 'Search query'})
class Search(Resource):
def get(self):
query = request.args.get('query')
if query is not None:
query = "%{}%".format(query)
data = Item.query.filter((
Item.description.like(query) |
Item.item_name.like(query) |
Item.brand.like(query)
)).all()
else:
data = Item.query.all()
payload = add_avg_rating(data)
return jsonify(payload)
@resource.route('/category', doc={"description": "Get all items in a certain category"})
@resource.doc(params={
'category': "Category query, one of {'Health & Beauty', 'Jewellery & Watches', 'Automotives & Electronics', 'Clothing, Shoes & Accessories', 'Books', 'Home Supplies'}"})
class Category(Resource):
def get(self):
query = request.args.get('category')
data = Item.query.filter(Item.category == query).all()
payload = add_avg_rating(data)
return jsonify(payload)
@resource.route('/subcategory', doc={"description": "Get all items in a certain subcategory"})
@resource.doc(params={
'subcategory': "Subcategory query, one of {'Men's Clothing', 'Children's Clothing', 'Pet Supplies', 'Women's Clothing', 'Cameras & Video Games', 'Women's Jewellery & Watches', 'Appliances', 'Creams', 'Garden Supplies', 'Shoes', 'Furniture & Accessories', 'Motos & Car Supplies', 'Men's Jewellery & Watches', 'Makeup', 'Books', 'Bags & Accessories', 'Sports', 'Cellphones, Computers & Tablets'}"})
class Subcategory(Resource):
def get(self):
query = request.args.get('subcategory').replace("’", "'")
data = Item.query.filter(Item.subcategory == query).all()
payload = add_avg_rating(data)
return jsonify(payload)
create_item_payload = api_rest.model('ItemModel', {
'item_name': fields.String(description='Item name', required=True),
'price': fields.Float(description='Item price', min=0.05, required=True),
'category': fields.String(description='Item category', required=True),
'subcategory': fields.String(description='Item subcategory', required=True),
'brand': fields.String(description='Item brand', required=True),
'description': fields.String(description='Item description, maximum 1000 characters', required=True),
'quantity': fields.Integer(description='Number of items in stock', min=1, required=True),
'discount': fields.Float(description='Discount on the price', min=0.0, max=1.0, required=True),
'images': fields.String(description='Comma separated item image urls', required=True)
})
item_keys = ("item_name", "price", "category", "subcategory", "brand", "description", "quantity", "discount", "images")
@resource.route('/item/<int:item_id>', doc={"description": "Manipulate (get, update or delete) a specific item"})
class ItemRoutes(Resource):
def get(self, item_id):
item = Item.query.filter(Item.item_id == item_id).first()
if item is None:
abort(404, "Item with id {} not found".format(item_id))
seller_auth_info = UserAuthModel.query.filter_by(uid=item.seller_id).first()
reviews = [i.serialize for i in Review.query.filter(Review.item_id == item_id).all()]
ratings = list(map(lambda x: x["rating"], reviews))
ratings_avg = sum(ratings) / len(ratings) if len(ratings) != 0 else None
item = item.serialize
item.update({"rating": ratings_avg})
result = {"seller_name": seller_auth_info.username if seller_auth_info is not None else None,
"item_info": item,
"reviews": reviews}
return result
@resource.expect(create_item_payload)
@jwt_required
def put(self, item_id):
item = Item.query.filter(Item.item_id == item_id).first()
if item is None:
abort(404, "Item with id {} not found".format(item_id))
payload = json.loads(request.form.get('item'))
image = request.files.get('file')
image_url = ""
image_prefix = "https://comp354.s3.us-east-2.amazonaws.com/itemPic/"
bucket_name = "comp354"
s3 = boto3.client('s3',
aws_access_key_id=config.Config.AWS_ACCESS_KEY_ID,
aws_secret_access_key=config.Config.AWS_SECRET_ACCESS_KEY)
with tempfile.TemporaryDirectory() as tempdir:
if image is not None:
# create a temporary folder to save the review images
image_path = os.path.join(tempdir, image.filename)
image.save(image_path)
s3.upload_file(image_path, bucket_name, 'itemPic/{}'.format(image.filename), ExtraArgs={'ACL': 'public-read'})
image_url += image_prefix + image.filename
item.item_name = payload["item_name"]
item.price = payload["price"]
item.category = payload["category"]
item.subcategory = payload["subcategory"]
item.brand = payload["brand"]
item.description = payload["description"]
item.quantity = payload["quantity"]
item.images = image_url
try:
db.session.commit()
except Exception as e:
abort(400, str(e))
return jsonify(success=True)
@jwt_required
def delete(self, item_id):
item = Item.query.filter(Item.item_id == item_id)
if item is not None:
item.delete()
db.session.commit()
return jsonify(success=True)
else:
abort(404, "Sorry, item with id {} not found".format(item_id))
@resource.route('/item', doc={"description": "Create new item to be inserted into database"})
@resource.expect(create_item_payload)
class CreateItem(Resource):
@jwt_required
def post(self):
current_user_id = UserAuthModel.find_by_username(get_jwt_identity()).uid
payload = json.loads(request.form.get('item'))
image = request.files.get('file')
image_url = ""
image_prefix = "https://comp354.s3.us-east-2.amazonaws.com/itemPic/"
bucket_name = "comp354"
s3 = boto3.client('s3',
aws_access_key_id=config.Config.AWS_ACCESS_KEY_ID,
aws_secret_access_key=config.Config.AWS_SECRET_ACCESS_KEY)
with tempfile.TemporaryDirectory() as tempdir:
if image is not None:
# create a temporary folder to save the review images
image_path = os.path.join(tempdir, image.filename)
image.save(image_path)
s3.upload_file(image_path, bucket_name, 'itemPic/{}'.format(image.filename), ExtraArgs={'ACL': 'public-read'})
image_url += image_prefix + image.filename
new_item = Item(item_name=payload["item_name"],
price=payload["price"],
category=payload["category"],
subcategory=payload["subcategory"],
brand=payload["brand"],
description=payload["description"],
quantity=payload["quantity"],
quantity_sold=0,
discount=payload["discount"],
seller_id=current_user_id,
images=image_url)
try:
new_item.save_to_db()
return jsonify(success=True)
except KeyError as e:
abort(400, "Missing attribute " + str(e))
@resource.route('/item/best', doc={"description": "Return top 20 most sold items"})
class BestSellers(Resource):
def get(self):
items = Item.query.order_by(Item.quantity_sold.desc()).limit(20).all()
payload = add_avg_rating(items)
return jsonify(payload)
@resource.route('/item/deals', doc={"description": "Return top 20 most discounted items"})
class Deals(Resource):
def get(self):
items = Item.query.order_by(Item.discount.desc()).limit(20).all()
payload = add_avg_rating(items)
return jsonify(payload)
@resource.route('/review/<int:item_id>', doc={"description": "1. post/update a new review for an item. 2. Delete all reviews for an item."})
class CreateAndDeleteReview(Resource):
@resource.doc(params={'content': "content of the review", 'rating': "rating"},)
@jwt_required
def post(self, item_id):
item = Item.find_by_id(item_id)
if item is None:
abort(404, "Item with id {} not found".format(item_id))
orders = Order.query.join(orderItem.join(Item, Item.item_id == item_id))
if orders.count() == 0:
abort(404, "No order record for item with id {} ".format(item_id))
current_user_id = UserAuthModel.find_by_username(get_jwt_identity()).uid
current_user_is_buyer = False
for order in orders:
if order.buyer_id == current_user_id:
current_user_is_buyer = True
break
if not current_user_is_buyer:
abort(400, "Current user {} is not a buyer of this item".format(current_user_id))
image_keys = ['image1', 'image2', 'image3', 'image4', 'image5']
images = []
for image_key in image_keys:
if request.files.get(image_key, False):
images.append(request.files[image_key])
images_list = []
image_prefix = "https://comp354.s3.us-east-2.amazonaws.com/reviewPic/"
bucket_name = "comp354"
s3 = boto3.client('s3',
aws_access_key_id=config.Config.AWS_ACCESS_KEY_ID,
aws_secret_access_key=config.Config.AWS_SECRET_ACCESS_KEY)
with tempfile.TemporaryDirectory() as tempdir:
for image in images:
if image is not None:
# create a temporary folder to save the review images
image_path = os.path.join(tempdir, image.filename)
image.save(image_path)
s3.upload_file(image_path, bucket_name, 'reviewPic/{}'.format(image.filename), ExtraArgs={'ACL': 'public-read'})
images_list.append(image_prefix+image.filename)
images = "&".join(images_list)
content = request.args.get('content')
rating = request.args.get('rating')
# Check for existing review with same uid and item id
review = Review.query.filter(Review.buyer_id == current_user_id).filter(Review.item_id == item_id).first()
if review:
# If existing review, update content, rating and images
review.content = content
review.rating = rating
review.images = images
else:
# If not existing, create a new review
new_review = Review(buyer_id=current_user_id, item_id=item_id, content=content, rating=rating, images=images)
item.reviews.append(new_review)
db.session.commit()
return jsonify(success=True)
@jwt_required
def delete(self, item_id):
item = Item.query.filter(Item.item_id == item_id).first()
if item is None:
abort(404, "Item with id {} not found".format(item_id))
reviews = db.session.query(Review).filter_by(item_id=item_id)
reviews.delete(synchronize_session=False)
db.session.commit()
return jsonify(success=True)
@resource.route('/review/<int:item_id>/<int:review_id>', doc={"description": "Manipulate (put, delete) a review for an item."})
class PutAndDeleteReview(Resource):
@resource.doc(params={'response': "seller's response for the review"})
@jwt_required
def put(self, item_id, review_id):
db.session.query(Review)\
.filter(Review.review_id == review_id and Review.item_id == item_id).\
update({"reply": request.args.get('response')})
db.session.commit()
return jsonify(success=True)
@jwt_required
def delete(self, item_id, review_id):
review = db.session.query(Review) \
.filter(Review.review_id == review_id and Review.item_id == item_id)
if review is None:
abort(404, "Review with id {} not found".format(review_id) + "for item {}".format(item_id))
review.delete(synchronize_session=False)
db.session.commit()
return jsonify(success=True)
@resource.route('/shopping-cart/<int:user_id>', doc={"description": "Get and empty items in the shopping cart"})
class ShoppingCart(Resource):
@jwt_required
def get(self, user_id):
shoppingListItems = db.session.query(shoppingListItem).filter_by(buyer_id=user_id).all()
shopping_list_items = []
for i in shoppingListItems:
item = Item.query.filter_by(item_id=i.item_id).first()
shopping_list_items.append({"item": item.serialize,
"qty": i.quantity})
return shopping_list_items
@jwt_required
def delete(self, user_id):
db.engine.execute(db.delete(shoppingListItem)
.where(shoppingListItem.c.buyer_id == user_id))
return jsonify(success=True)
@resource.route('/shopping-cart/<int:user_id>/<int:item_id>',
doc={"description": "Add and remove items in the shopping cart"})
class ShoppingCart(Resource):
@resource.doc(params={'newQuantity': "new quantity"},)
@jwt_required
def post(self, user_id, item_id):
buyer = BuyerModel.find_by_uid(user_id)
item = Item.query.filter_by(item_id=item_id).first()
qty = int(request.args.get('newQuantity'))
if item is None:
abort(404, "Item with id {} not found".format(item_id))
if buyer is None:
abort(404, "Buyer with id {} not found".format(user_id))
if not buyer.add_to_shopping_list(item, qty):
abort(404, "Trying to remove more items than the quantity in cart.")
return jsonify(success=True)
@jwt_required
def delete(self, user_id, item_id):
items = db.session.query(shoppingListItem).filter_by(buyer_id=user_id, item_id=item_id)
if items.count() == 0:
abort(404, "Item with id {} not in the shopping cart".format(item_id))
items.delete(synchronize_session=False)
db.session.commit()
return jsonify(success=True)
def add_avg_rating(items):
items = [i.serialize for i in items]
reviews = [i.serialize for i in Review.query.all()]
for item in items:
rating = 0
count = 0
for review in reviews:
if review["item_id"] == item["item_id"]:
rating += review["rating"]
count += 1
item["rating"] = None if count == 0 else rating / count
return items
@resource.route('/place-order/<int:user_id>/<int:item_id>', doc={"description": "Place order for a single item"})
class PlaceOrder(Resource):
@jwt_required
def post(self, user_id, item_id):
buyer = BuyerModel.query.filter_by(uid=user_id).first()
item = Item.query.filter_by(item_id=item_id).first()
if item is None:
abort(404, "Item with id {} not found".format(item_id))
elif item.quantity - item.quantity_sold <= 0:
abort(403, "Not enough stock for item {}".format(item_id))
elif buyer is None:
abort(404, "Buyer with id {} not found".format(user_id))
seller = SellerModel.query.filter_by(uid=item.seller_id).first()
if seller is not None:
seller.add_commission(item)
order = Order(buyer_id=buyer.uid, purchase_date=db.func.current_date())
order.save_to_db()
order.add_item(item)
item.quantity_sold += 1
db.session.commit()
return jsonify(success=True)
@resource.route('/place-order-in-shopping-cart/<int:user_id>/<int:buyer_address_index>/<string:shipping_method>/<float:coupon_discount>',
doc={"description": "Place order for entire shopping cart"})
class PlaceOrderInShoppingCart(Resource):
@jwt_required
def post(self, user_id, buyer_address_index, shipping_method, coupon_discount):
buyer = BuyerModel.query.filter_by(uid=user_id).first()
order = Order(buyer_id=buyer.uid, purchase_date=db.func.current_date(), buyer_address_index=buyer_address_index, shipping_method=shipping_method, coupon_discount=coupon_discount)
shoppingListItems = db.session.query(shoppingListItem).filter_by(buyer_id=user_id).all()
seller_emails = []
if buyer is None:
abort(404, "Buyer with id {} not found".format(user_id))
order.save_to_db()
subtotal = 0
for shopping_list_item in shoppingListItems:
item = Item.query.filter_by(item_id=shopping_list_item.item_id).first()
if item is None:
abort(404, "Item with id {} not found".format(item.item_id))
elif item.quantity - item.quantity_sold <= 0:
abort(403, "Not enough stock for item {} (1)".format(item.item_id))
seller = SellerModel.query.filter_by(uid=item.seller_id).first()
if seller is not None:
seller.add_commission(item)
order.add_item(item)
new_quantity_sold = item.quantity_sold + shopping_list_item.quantity
if item.quantity - new_quantity_sold < 0:
abort(403, "Not enough stock for item {} (2)".format(item.item_id))
item.quantity_sold += shopping_list_item.quantity
db.session.query(orderItem). \
filter_by(order_id=order.order_id, item_id=item.item_id).update({"order_item_quantity": shopping_list_item.quantity}, synchronize_session=False)
list_item = db.session.query(shoppingListItem).filter_by(buyer_id=user_id, item_id=item.item_id)
list_item.delete(synchronize_session=False)
db.session.commit()
subtotal += (item.price - (item.price * item.discount)) * shopping_list_item.quantity
user = UserAuthModel.find_by_uid(user_id)
msg = Message("We've received your order! - 354TheStars.com",
recipients=[user.useremail])
address = buyer.get_address_from_index(buyer_address_index)
msg.html = render_template('SendReceipt.html', \
order_id= order.order_id, purchase_date=order.purchase_date, address=address, \
shipping_method=order.shipping_method, items=order.serialize['items'], subtotal=subtotal, discount=order.coupon_discount)
mail.send(msg)
for shopping_list_item in shoppingListItems:
item = Item.query.filter_by(item_id=shopping_list_item.item_id).first()
user = UserAuthModel.find_by_uid(item.seller_id)
seller_emails.append(user.useremail)
if len(seller_emails) != 0:
msg2 = Message("You got a new order! - 354TheStars.com", recipients=seller_emails)
msg2.html = render_template('NotificationToSeller.html')
mail.send(msg2)
return jsonify(order.serialize)
@resource.route('/wish-list/<int:user_id>/<int:item_id>',doc={"description": "Add and remove wish list"})
class WishList(Resource):
@jwt_required
def post(self, user_id, item_id):
buyer = BuyerModel.query.filter_by(uid=user_id).first()
item = Item.query.filter_by(item_id=item_id).first()
if item is None:
abort(404, "Item with id {} not found".format(item_id))
elif buyer is None:
abort(404, "Buyer with id {} not found".format(user_id))
buyer.add_to_wish_list(item)
return jsonify(success=True)
@jwt_required
def delete(self, user_id, item_id):
item = db.session.query(wishListItem).filter_by(buyer_id=user_id, item_id=item_id)
if item.count() == 0:
abort(404, "Item with id {} not in the shopping cart".format(item_id))
item.delete(synchronize_session=False)
db.session.commit()
return jsonify(success=True)
@jwt_required
def get(self, user_id, item_id):
item = Item.find_by_id(item_id)
buyer = BuyerModel.find_by_uid(user_id)
if item is None:
abort(404, "Item with id {} not found".format(item_id))
elif buyer is None:
abort(404, "Buyer with id {} not found".format(user_id))
wish_list_item = db.session.query(wishListItem).filter_by(buyer_id=user_id, item_id=item_id)
if wish_list_item.count() == 0:
return jsonify(False)
return jsonify(True)
@resource.route('/wish-list/<int:user_id>',doc={"description": "Get wish list"})
class WishList(Resource):
@jwt_required
def get(self, user_id):
items = db.session.query(wishListItem).filter_by(buyer_id=user_id).all()
return jsonify([Item.query.filter_by(item_id=i.item_id).first().serialize for i in items])
@resource.route('/orders/<start_date>/<end_date>',
doc={"description": "Return all orders during a given period of time"})
class AllOrders(Resource):
@admin_required
def get(self, start_date, end_date):
try:
start = datetime.strptime(start_date, "%Y-%m-%d").date()
end = datetime.strptime(end_date, "%Y-%m-%d").date()
if start > end:
abort(400, "Invalid Date")
except:
abort(400, "Invalid Date")
orders = Order.query.filter(start <= Order.purchase_date, end >= Order.purchase_date).all()
return jsonify([i.serialize for i in orders])
@resource.route('/commission/<start_date>/<end_date>',
doc={"description": "Return total commission during a given period of time"})
class TotalCommission(Resource):
@admin_required
def get(self, start_date, end_date):
try:
start = datetime.strptime(start_date, "%Y-%m-%d").date()
end = datetime.strptime(end_date, "%Y-%m-%d").date()
if start > end:
abort(400, "Invalid Date")
except:
abort(400, "Invalid Date")
commission = 0
for seller in SellerModel.query.all():
seller_orders = db.session.query(orderSeller).filter_by(seller_id=seller.uid).all()
orders = [Order.query.filter_by(order_id=i.order_id).order_by(Order.purchase_date.asc()).first() for i in seller_orders]
counter = 0
for order in orders:
order_items = db.session.query(orderItem).filter_by(order_id=order.order_id).all()
items = [Item.query.filter_by(item_id=i.item_id, seller_id=seller.uid).first() for i in order_items]
for item in items:
if item is not None:
counter += 1
if order.purchase_date <= end and order.purchase_date >= start:
if counter < 10:
commission += item.price * (1 - item.discount) * 0.03
else:
commission += item.price * (1 - item.discount) * 0.08
return jsonify(commission)
@resource.route('/coupon/<string:code>', doc={"description":"return the discount assiciated with the code, return 0 if coupon not found"})
class GetCoupon(Resource):
@jwt_required
def get(self, code):
coupon = Coupon.find_by_code(code)
if coupon is None:
return 0.0
return coupon.discount
```
#### File: app/api/security.py
```python
from functools import wraps
from flask import request
from flask_restplus import abort
from flask_jwt_extended import verify_jwt_in_request, get_jwt_claims
import jwt
def require_auth(func):
""" Secure method decorator """
@wraps(func)
def wrapper(*args, **kwargs):
# Verify if User is Authenticated
# Authentication logic goes here
if request.headers.get('authorization'):
return func(*args, **kwargs)
else:
return abort(401)
return wrapper
def admin_required(func):
""" admin feature decorator """
@wraps(func)
def wrapper(*args, **kwargs):
verify_jwt_in_request()
claims = get_jwt_claims()
if claims['role'] != 'admin':
abort(403, "Admin only")
else:
return func(*args, **kwargs)
return wrapper
def generate_encoded_token(payload, secret, algorithm):
return jwt.encode(payload, secret, algorithm)
def decode_token(encodedToken, secret, algorithm):
return jwt.decode(encodedToken, secret, algorithm)
``` |
{
"source": "joeyzhu00/FusionAD",
"score": 3
} |
#### File: geodesy/src/geodesy_conversion_UTM.py
```python
from __future__ import print_function
from __future__ import division
import math
import utm
from geodesy import Geodesy
class GeodesyConverterUTM(Geodesy):
def __init__(self, latitudesData, longitudesData):
self.latitudesData = latitudesData
self.longitudesData = longitudesData
def geodetic_data_to_UTM_data(self):
eastings = []
northings = []
zoneNumbers = []
zoneLetters = []
for i in range(len(self.latitudesData)):
easting, northing, zoneNumber, zoneLetter = utm.from_latlon(self.latitudesData[i], self.longitudesData[i])
eastings.append(easting)
northings.append(northing)
zoneLetters.append(zoneNumber)
zoneLetters.append(zoneLetter)
return eastings, northings, zoneNumbers, zoneLetters
def global_to_relative_UTM(self, eastings, northings):
"""Convert global UTM coordinates to relative coordinates at a given index"""
globalEastingInitial = eastings[0]
globalNorthingInitial = northings[0]
relativeEastings = []
relativeNorthings = []
for i in range(len(eastings)):
relativeEastings.append(eastings[i] - globalEastingInitial)
relativeNorthings.append(northings[i] - globalNorthingInitial)
return relativeEastings, relativeNorthings
###############################
##### Self Implementation #####
###############################
# """Reference: https://en.wikipedia.org/wiki/Universal_Transverse_Mercator_coordinate_system#From_latitude,_longitude_(%CF%86,_%CE%BB)_to_UTM_coordinates_(E,_N)
# NOTE: Lengths are in kilometers
# """
# TODO: Self-implemented fast geodetic to UTM conversion
# # Flattening coefficient
# flatteningCoeff = Geodesy.f
# N_0 = 0.0 # for Northern Hemisphere, in kilometers
# k_0 = 0.9996
# E_0 = 500
# def geodetic_to_UTM(lat, lng):
# """Converts input geodetic latitude/longitude to UTM"""
# # Calculate preliminary values
# n = flatteningCoeff / (2-flatteningCoeff)
# A = ( a / (1 + n) ) * (1 + ((n**2)/4) + ((n**4)/64)) )
# alpha1 = (1/2)*n - (2/3)*(n**2) + (5/16)*(n**3)
# alpha2 = (13/48)*(n**2) - (3/5)*(n**3)
# alpha3 = (61/240)*(n**3)
# alpha1 = (1/2)*n - (2/3)*(n**2) + (37/96)*(n**3)
# alpha2 = (1/48)*(n**2) + (1/15)*(n**3)
# alpha3 = (61/240)*(n**3)
# return
```
#### File: modules/perception/node_camera.py
```python
import cv2
import rospy
import roslib
import numpy as np
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
def main():
#Initialize node
rospy.init_node('camera')
#Create bridge object
bridge = CvBridge()
#Create publisher and to publish raw image data
pub = rospy.Publisher("/raw_USBcamera_images", Image, queue_size=1000)
rate = rospy.Rate(30)
#initialize camera
cap = cv2.VideoCapture(0)
print "Camera Initialized"
while not rospy.is_shutdown():
frameReadCorrectly, frame = cap.read()
#If frame is empty, don't send anything (Stuff crashes)
if frameReadCorrectly:
pub.publish(bridge.cv2_to_imgmsg(frame, "bgr8"))
rate.sleep()
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
``` |
{
"source": "JoeZajac/bhp3_class",
"score": 3
} |
#### File: areas_for_class/demo_plus/demo1.py
```python
from pprint import pprint
import multiprocessing
import os
import pdb
import sys
import threading
def fib(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return (fib(n-1) + fib(n-2))
def demo(name='fib_thread'):
if name == 'fib_thread':
# http://www.dabeaz.com/GIL/gilvis/fourthread.html
for i in range(10):
n = 35
print(f'Starting Job {i}')
t = threading.Thread(target=fib, args=(n,))
t.start()
elif name == 'fib_mp':
for i in range(10):
n = 35
print(f'Starting Job {i}')
t = multiprocessing.Process(target=fib, args=(n,))
t.start()
elif name == 'walk':
dirname = '/Users/jtimarnold/code/plastex/unittests'
for root, dirs, files in os.walk(dirname):
print(f'\nroot is {root}')
pprint(f' containing these dirs {dirs}')
pprint(f' and these files {files}')
elif name == 'debug':
j = 0
k = 0
pdb.set_trace()
for i in range(10):
j = i**2
k = i**3
print(f'j is {j}, k is {k}')
if __name__ == '__main__':
if len(sys.argv) == 2:
if sys.argv[1] == 'fib_thread':
demo('fib_thread')
elif sys.argv[1] == 'fib_mp':
demo('fib_mp')
elif sys.argv[1] == 'walk':
demo('walk')
elif sys.argv[1] == 'debug':
demo('debug')
else:
for i in range(10):
print(fib(i))
```
#### File: areas_for_class/packets/struct_icmp.py
```python
import struct
class ICMP:
def __init__(self, buff):
header = struct.unpack('<BBHHH', buff)
self.type = header[0]
self.code = header[1]
self.sum = header[2]
self.id = header[3]
self.seq = header[4]
```
#### File: bhp3_class/areas_for_class/scratch.py
```python
from collections import deque
import queue
EXTENSIONS = ['.php', '.bak', '.orig', '.inc']
WORDLIST = "/Users/jtimarnold/Downloads/SVNDigger/all.txt"
print(f'my name is {__name__}')
def get_words():
def extend_words(word):
if "." in word:
words.put(f'/{word}')
else:
words.put(f'/{word}/')
for extension in EXTENSIONS:
words.put(f'/{word}{extension}')
with open(WORDLIST) as f:
raw_words = f.read()
words = queue.Queue()
for word in raw_words.split():
extend_words(word)
return words
def main():
print('see how that\'s different?')
if __name__ == '__main__':
main()
try:
do_something()
except (IOError):
print('something IO happened')
except AttributError:
print('something attr happened')
else:
print('good')
finally:
print('cleanup')
``` |
{
"source": "Joezc/AutoLB",
"score": 2
} |
#### File: Server/LB/views.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import json
from .models.project import Project
from .models.subnet import Subnet
from .models.vm import VM
import chardet
# Create your views here.
@csrf_exempt
def project(request):
input = json.loads(request.body.decode(chardet.detect(request.body)["encoding"]))
status = "failure"
action = input["action"]
type = input["type"]
res = {
"status": status,
"action": action,
"type": type,
"info": {}
}
if(input["action"] == "create"):
print("project create...")
project = Project.create(input['user'], input['info']['name'])
res["status"] = "successful"
res["info"] = project.info()
if(input["action"] == "info"):
print("project info...")
project = Project.getby(input['user'], input['info']['name'])
if project is None:
res["info"] = "can not fint project"
else:
res["status"] = "successful"
res["info"] = project.info()
if(input["action"] == "update"):
print("project update...")
status = "successful"
if(input["action"] == "delete"):
print("project delete...")
project = Project.getby(input['user'], input['info']['name'])
project.removeproj()
status = "successful"
if(input["action"] == "list"):
res["info"] = Project.listall()
status = "successful"
return HttpResponse(json.dumps(res))
@csrf_exempt
def instance(request):
input = json.loads(request.body.decode(chardet.detect(request.body)["encoding"]))
status = "successful"
action = input["action"]
type = input["type"]
res = {
"status": status,
"action": action,
"type": type,
"info": {}
}
if(input["action"] == "create"):
print("instance create...")
# user, proj_name, subnet_ip, backends, healthcheck
instance = VM.create(input["user"], input["project"], input["info"]["subnet"],
input["info"]["backend"]["entities"],
input["info"]["backend"]["health-check"]["up"],
input["info"]["backend"]["health-check"]["interval"],
input["info"]["backend"]["health-check"]["timeout"],
input["info"]["backend"]["health-check"]["threshold"]
)
res["info"] = instance.info()
if (input["action"] == "info"):
print("instance info...")
try:
ins = VM.objects.get(pk=input["id"])
res["info"] = ins.info()
except VM.DoesNotExist:
res["type"] = "failure"
res["info"] = "can not find instance"
# if(input["action"] == "update"):
# print("instance update...")
if(input["action"] == "delete"):
print("instance delete...")
try:
ins = VM.objects.get(pk=input["id"])
ins.removeins()
except VM.DoesNotExist:
res["type"]="failure"
res["info"]= "can not find instance"
return HttpResponse(json.dumps(res))
``` |
{
"source": "joezeng/pdc2014",
"score": 3
} |
#### File: joezeng/pdc2014/test.py
```python
from subprocess import call
import textwrap
import random
import os
###### config parameters ######
# The directory where contestant executables reside.
# Don't change this unless you've also changed the directory in compile.py.
contestant_dir = "exe"
# Displays a log of each player's decisions if set to True.
log_player_outputs = False
# Displays a log of all the players' decisions at the end of the game if set to True.
show_game_log = True
# Prompts for an Enter to continue if set to True.
pause_between_rounds = False
# Shuffles the contestants' IDs in a "random" order if set to True.
# The randomness depends on the given seed, so as to be replicable.
random_order = True
# Shuffles the contestants' IDs in a completely random order if set to True and random_order is set to True.
# This will produce a different result every time.
totally_random_order = True
# The execution limit for programs.
time_limit = 2
###############################
# run code
filenames = os.listdir(contestant_dir)
filenames.sort()
if random_order:
if not totally_random_order:
random.seed(4164398143650650814)
random.shuffle(filenames)
n_players = len(filenames)
states = [""] * n_players
scores = [0] * n_players
n_rounds = n_players * 25
rounds_played = 0
game_log = ""
randseeds = [int(i) for i in open("pd_rand").readlines()]
def play_round(player):
global filenames, n_players, states, rounds_played, game_log
if log_player_outputs:
print "Player %d:" % (player + 1),
text_input = "%d %d %d\n%s%d\n%s" % (n_players, player + 1, rounds_played, game_log, randseeds[rounds_played], states[player])
fout = open("gamestate", "w")
fout.write(text_input)
fout.close()
p = call(["timelimit -t %d %s/%s" % (time_limit, contestant_dir, filenames[player])], stdin=open("gamestate", 'r'), stdout=open("output", 'w'), shell=True)
fin = open("output", "rb")
take_output = fin.readline()
if log_player_outputs:
print take_output.strip()
takes = sorted(set(map(int, take_output.split())))
state = fin.readline()
fin.close()
return (takes, state)
for n_round in xrange(n_rounds):
print textwrap.dedent("""\
=================
= Round %s =
=================""") % repr(rounds_played + 1).ljust(7)
log_entry = ""
for x in range(n_players):
output = play_round(x)
for victim in output[0]:
log_entry += "(%s, %s) " % (x + 1, victim)
scores[victim - 1] -= 2
scores[x] += 1
states[x] = output[1]
game_log += log_entry + "\n"
rounds_played += 1
print "\nScores:",
for n in xrange(len(scores)):
print scores[n],
print
print
if pause_between_rounds:
raw_input("Press Enter to continue...")
if show_game_log:
print "Game log: \n" + game_log
print "Final scores:"
for n in xrange(len(scores)):
print filenames[n], scores[n]
os.remove("gamestate")
os.remove("output")
``` |
{
"source": "joez/letspy",
"score": 4
} |
#### File: letspy/fun/add-binary.py
```python
def add_binary(a, b):
if len(a) < len(b):
a, b = b, a
result, carry = [], 0
for i in range(len(a)):
va = int(a[-(i+1)])
vb = int(b[-(i+1)]) if i < len(b) else 0
carry, v = divmod(va + vb + carry, 2)
result.append(v)
if carry:
result.append(carry)
return ''.join(map(str, reversed(result)))
a, b = input().split()
print(add_binary(a, b))
print(format(int(a, 2) + int(b, 2), 'b'))
```
#### File: letspy/fun/guess.py
```python
import random
def guess(words, health=8):
secret = random.choice(words)
done, todo = set(), set(secret)
while health > 0:
guess = input("Guess a letter: ")
right = guess in todo
if right:
done.add(guess)
else:
health -= 1
is_in = 'in' if right else 'not in'
print('"{}" is {} the word.'.format(guess, is_in))
print(f'Health points = {health:d}')
if done == todo:
print(f'Congratulations, you did it! The word is {secret}')
break
if __name__ == '__main__':
words = input("Candidate words to guess: ").split()
health = int(input("Maximum health points: "))
print("Now let's guess!")
guess(words, health)
```
#### File: letspy/fun/longest-prefix.py
```python
def longest_prefix(strs):
prefix = min(strs, key=len)
for i, c in enumerate(prefix):
for s in strs:
if s[i] != c:
return prefix[:i]
return prefix
def another_solution(strs):
prefix = ''
for l in zip(*strs):
if len(set(l)) == 1:
prefix += l[0]
else:
break
return prefix
strs = [input() for _ in range(int(input()))]
for f in (longest_prefix, another_solution):
print(f(strs))
```
#### File: letspy/fun/min-stack.py
```python
class MinStack:
def __init__(self):
self.data = []
self.mini = []
def push(self, d):
self.data.append(d)
if len(self.mini) == 0 or d <= self.mini[-1]:
self.mini.append(d)
def pop(self):
d = self.data.pop()
if d == self.mini[-1]:
self.mini.pop()
return d
@property
def min(self):
return self.mini[-1]
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
if __name__ == '__main__':
stack = MinStack()
for d in map(int, input().split()):
stack.push(d)
for _ in range(len(stack)):
print(f'top: {stack[-1]}, min: {stack.min}')
stack.pop()
```
#### File: letspy/fun/min-sum.py
```python
def min_sum(nums, target):
'''
find the minimal length of a contiguous subarray
of which the sum >= target, return 0 if can't find
>>> min_sum([1,2,2,4,2,2,2], 6)
2
'''
size = 0
total, s = 0, 0
for i, v in enumerate(nums):
total += v
if total >= target:
while total >= target:
total -= nums[s]
s += 1
l = i - s + 1 + 1
if size == 0 or l < size:
size = l
return size
if __name__ == "__main__":
target = int(input())
nums = [int(n) for n in input().split()]
print(min_sum(nums, target))
```
#### File: letspy/fun/pascal2.py
```python
import sys
def pascal_triangle(index):
row = [1]
for _ in range(index):
row = [x + y for x, y in zip([0] + row, row + [0])]
return row
if __name__ == '__main__':
index = int(sys.argv[1]) if len(sys.argv) > 1 else 5
print(pascal_triangle(index))
```
#### File: letspy/fun/rotate.py
```python
def rotate(nums, k):
'''
rotate the array to the right by k steps, k is non-negative
'''
n = len(nums)
k = k % n
for i, j in [(0, n-k-1), (n-k, n-1), (0, n-1)]:
while i < j:
nums[i], nums[j] = nums[j], nums[i]
i, j = i+1, j-1
if __name__ == "__main__":
k = int(input())
nums = [int(n) for n in input().split()]
rotate(nums, k)
print(nums)
```
#### File: lang/module/fraction.py
```python
from fractions import Fraction
from functools import reduce
import operator
def product(fracs):
t = reduce(operator.mul, fracs, 1)
return t.numerator, t.denominator
if __name__ == '__main__':
n = int(input())
l = [Fraction(*map(int, input().split())) for _ in range(n)]
print(*product(l))
```
#### File: lang/string/reverse-words.py
```python
def reverse_words(s):
return ' '.join(w[::-1] for w in s.split(' '))
def reverse_words_ext(s):
# support other whitespaces
strs, word = [], ''
for c in s:
if c.isspace():
if word:
strs.append(word[::-1])
word = ''
strs.append(c)
else:
word += c
if word:
strs.append(word[::-1])
return ''.join(strs)
if __name__ == '__main__':
s = input()
for f in (reverse_words, reverse_words_ext):
print(f(s))
``` |
{
"source": "Joe-zsc/NetworkAttackSimulator",
"score": 3
} |
#### File: nasim/agents/NDSPI.py
```python
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import random
import math
import numpy as np
from gym import error
from pprint import pprint
from SumTree2 import SumTree
import nasim
from others import save_data
from torch.autograd import Variable
try:
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
except ImportError as e:
raise error.DependencyNotInstalled(
f"{e}. (HINT: you can install dqn_agent dependencies by running "
"'pip install nasim[dqn]'.)"
)
class ReplayMemoryPER:
e = 0.01
a = 0.6
beta = 0.4
beta_increment_per_sampling = 0.001
def __init__(self, capacity,device="cuda"):
self.tree = SumTree(capacity)
self.capacity = capacity
self.device = device
def _get_priority(self, error):
return (abs(error) + self.e) ** self.a
def add(self, error, sample):
p = self._get_priority(error)
self.tree.add(p, sample)
def sample(self, n):
batch = []
idxs = []
segment = self.tree.total() / n
priorities = []
self.beta = np.min([1., self.beta + self.beta_increment_per_sampling])
for i in range(n):
a = segment * i
b = segment * (i + 1)
s = random.uniform(a, b)
(idx, p, data) = self.tree.get(s)
priorities.append(p)
batch.append(data)
idxs.append(idx)
sampling_probabilities = priorities / self.tree.total()
is_weight = np.power(self.tree.n_entries * sampling_probabilities, -self.beta)
is_weight /= is_weight.max()
batchs = np.array(batch).transpose()
s=torch.from_numpy(np.vstack(batchs[0])).to(self.device)
a=torch.from_numpy(np.vstack(list(batchs[1])).astype(np.int64)).to(self.device)
r=torch.from_numpy(np.array(list(batchs[2]))).to(self.device)
s_=torch.from_numpy(np.vstack(batchs[3])).to(self.device)
d=torch.from_numpy(np.array(list(batchs[4])).astype(np.int32)).to(self.device)
return s,a,s_,r,d, idxs, is_weight
def update(self, idx, error):
p = self._get_priority(error)
self.tree.update(idx, p)
class NoisyLinear(nn.Module):
def __init__(self, in_features, out_features, std_init=0.4):
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.FloatTensor(out_features, in_features))
self.weight_sigma = nn.Parameter(torch.FloatTensor(out_features, in_features))
self.register_buffer('weight_epsilon', torch.FloatTensor(out_features, in_features))
self.bias_mu = nn.Parameter(torch.FloatTensor(out_features))
self.bias_sigma = nn.Parameter(torch.FloatTensor(out_features))
self.register_buffer('bias_epsilon', torch.FloatTensor(out_features))
self.reset_parameters()
self.reset_noise()
def forward(self, x):
if self.training:
weight = self.weight_mu + self.weight_sigma.mul(Variable(self.weight_epsilon))
bias = self.bias_mu + self.bias_sigma.mul(Variable(self.bias_epsilon))
else:
weight = self.weight_mu
bias = self.bias_mu
return F.linear(x, weight, bias)
def reset_parameters(self):
mu_range = 1 / math.sqrt(self.weight_mu.size(1))
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(self.std_init / math.sqrt(self.weight_sigma.size(1)))
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(self.std_init / math.sqrt(self.bias_sigma.size(0)))
def reset_noise(self):
epsilon_in = self._scale_noise(self.in_features)
epsilon_out = self._scale_noise(self.out_features)
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(self._scale_noise(self.out_features))
def _scale_noise(self, size):
x = torch.randn(size)
x = x.sign().mul(x.abs().sqrt())
return x
class ICM(nn.Module):
def __init__(self, input_dim, num_actions):
"""
Initialize a deep Q-learning network as described in
https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
Arguments:
in_channels: number of channel of input.
i.e The number of most recent frames stacked together as describe in the paper
num_actions: number of action-value to output, one-to-one correspondence to action in game.
"""
super(ICM, self).__init__()
# self.conv1 = nn.Conv2d(in_channels, 32, kernel_size=8, stride=4)
# self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
# self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
# self.fc4 = nn.Linear(7 * 7 * 64, 512)
dd=128
self.linear1=nn.Linear(input_dim[0],dd)
self.linear2=nn.Linear(dd,dd)
self.linear3=nn.Linear(dd,dd)
self.pred_module1 = nn.Linear(dd + num_actions, dd)
self.pred_module2 = nn.Linear(dd, dd)
self.invpred_module1 = nn.Linear(dd + dd, dd)
self.invpred_module2 = nn.Linear(dd, num_actions)
def get_feature(self, x):
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = self.linear3(x)
#x = F.relu(self.linear3(x))
return x
def forward(self, x):
# get feature
feature_x = self.get_feature(x)
return feature_x
def get_full(self, x, x_next, a_vec):
# get feature
feature_x = self.get_feature(x)
feature_x_next = self.get_feature(x_next)
pred_s_next = self.pred(feature_x, a_vec) # predict next state feature
pred_a_vec = self.invpred(feature_x, feature_x_next) # (inverse) predict action
return pred_s_next, pred_a_vec, feature_x_next
def pred(self, feature_x, a_vec):
# Forward prediction: predict next state feature, given current state feature and action (one-hot)
aaa=torch.cat([feature_x, a_vec.float()], dim = -1)
bbb=aaa.detach()
pred_s_next = F.relu(self.pred_module1( bbb))
pred_s_next = self.pred_module2(pred_s_next)
return pred_s_next
def invpred(self,feature_x, feature_x_next):
# Inverse prediction: predict action (one-hot), given current and next state features
pred_a_vec = F.relu(self.invpred_module1(torch.cat([feature_x, feature_x_next], dim = -1)))
pred_a_vec = self.invpred_module2(pred_a_vec)
return F.softmax(pred_a_vec, dim = -1)
class NoisyDoubleDuelingDQN(nn.Module):
"""A simple Deep Q-Network """
def __init__(self, input_dim, layers, num_actions):
super().__init__()
'''
self.layers = nn.ModuleList([nn.Linear(input_dim[0], layers[0])])
for l in range(1, len(layers)):
self.layers.append(NoisyLinear(layers[l-1], layers[l]))
#self.out = NoisyLinear(layers[-1], num_actions)
self.value_stream=NoisyLinear(layers[-1],1)
self.advantage_stream=NoisyLinear(layers[-1], num_actions)
'''
n=256
self.linear = nn.Linear(input_dim[0], n)
self.linear2 = NoisyLinear(n, n)
self.advantage_hidden_layer = NoisyLinear(n, n)
self.advantage_layer = NoisyLinear(n, num_actions)
self.value_hidden_layer = NoisyLinear(n, n)
self.value_layer = NoisyLinear(n, 1)
def forward(self, x):
x = F.relu(self.linear(x))
x = F.relu(self.linear2(x))
value=F.relu(self.value_hidden_layer(x))
values = self.value_layer(value)
advantages = F.relu(self.advantage_hidden_layer(x))
advantages = self.advantage_layer(advantages)
qvals = values + (advantages - advantages.mean())
return qvals
def save_DQN(self, file_path):
torch.save(self.state_dict(), file_path)
def load_DQN(self, file_path):
self.load_state_dict(torch.load(file_path))
def get_action(self, x):
with torch.no_grad():
if len(x.shape) == 1:
x = x.view(1, -1)
return self.forward(x).max(1)[1]
def reset_noise(self):
self.linear2.reset_noise()
self.advantage_hidden_layer.reset_noise()
self.advantage_layer.reset_noise()
self.value_hidden_layer.reset_noise()
self.value_layer.reset_noise()
class NoisyDoubleDuelingDQN_PERAgent:
"""A simple Deep Q-Network Agent """
def __init__(self,
env,
seed=None,
lr=0.001,
training_steps=10000,
episode_number=10000,
batch_size=32,
replay_size=5000,
final_epsilon=0.05,
exploration_steps=10000,
gamma=0.99,
hidden_sizes=[64, 64],
target_update_freq=1000,
verbose=True,
forward_scale = 0.8,
inverse_scale = 0.2,
Qloss_scale = 1,
intrinsic_scale= 1,
use_extrinsic = True,
use_episode = True,
**kwargs):
self.use_episode=use_episode
# This DQN implementation only works for flat actions
assert env.flat_actions
self.verbose = verbose
if self.verbose:
print(f"\nRunning DQN with config:")
pprint(locals())
# set seeds
self.seed = seed
if self.seed is not None:
np.random.seed(self.seed)
# envirnment setup
self.env = env
##
self.compromised_host=[]
self.uncompromised_host=[]
self.hosts=self.env.network.hosts_addresses
##
self.num_actions = self.env.action_space.n
self.obs_dim = self.env.observation_space.shape
self.alpha=0.5/self.num_actions
# logger setup
self.logger = SummaryWriter()
# Training related attributes
self.lr = lr
self.exploration_steps = exploration_steps
self.final_epsilon = final_epsilon
self.epsilon_schedule = np.linspace(1.0,
self.final_epsilon,
self.exploration_steps)
self.batch_size = batch_size
self.discount = gamma
self.training_steps = training_steps
self.episode_number=episode_number
self.steps_done = 0
self.train_start=replay_size
self.best_return=0
self.best_action_set=[]
self.rewards_episode=[]
self.rewards_step=[]
self.num_episodes = 0
# Neural Network related attributes
self.device = torch.device("cuda"
if torch.cuda.is_available()
else "cpu")
self.dqn = NoisyDoubleDuelingDQN(self.obs_dim,
hidden_sizes,
self.num_actions).to(self.device)
# param for ICM
self.forward_scale = forward_scale # scale for loss function of forward prediction model, 0.8
self.inverse_scale = inverse_scale # scale for loss function of inverse prediction model, 0.2
self.Qloss_scale = Qloss_scale # scale for loss function of Q value, 1
self.intrinsic_scale = intrinsic_scale # scale for intrinsic reward, 1
self.use_extrinsic = use_extrinsic # whether use extrinsic rewards, if False, only intrinsic reward generated from ICM is used
self.ICM=ICM(self.obs_dim,self.num_actions).to(self.device)
self.target_dqn = NoisyDoubleDuelingDQN(self.obs_dim,
hidden_sizes,
self.num_actions).to(self.device)
self.target_update_freq = target_update_freq
if self.verbose:
print(f"\nUsing Neural Network running on device={self.device}:")
print(self.device)
print(torch.cuda.get_device_name(0))
print(self.dqn)
print(self.ICM)
self.optimizer = optim.Adam(list(self.dqn.parameters())+list(self.ICM.parameters()), lr=self.lr)
#self.loss_fn = nn.SmoothL1Loss()
self.loss_fn = nn.MSELoss(reduce=False)
# PER replay
self.replayPER = ReplayMemoryPER(replay_size,self.device)
# replay setup
# save sample (error,<s,a,r,s'>) to the replay memory
def append_sample(self, state, action, reward, next_state, done):
q_vals_raw = self.dqn(torch.from_numpy(state).to(self.device))
q_vals = q_vals_raw[action]
target_q_val_raw = self.target_dqn(torch.from_numpy(next_state).to(self.device))
target_q_val = target_q_val_raw.max()
if done:
target=reward
else:
target = reward + self.discount*target_q_val
error = abs(q_vals - target)
self.replayPER.add(error, (state, action, reward, next_state, done))
def save(self, save_path):
self.dqn.save_DQN(save_path)
def load(self, load_path):
self.dqn.load_DQN(load_path)
def get_epsilon(self):
if self.use_episode:
if self.num_episodes < self.exploration_steps:
return self.epsilon_schedule[self.num_episodes]
return self.final_epsilon
else:
if self.steps_done < self.exploration_steps:
return self.epsilon_schedule[self.steps_done]
return self.final_epsilon
def get_epsilon2(self):
if self.steps_done < self.exploration_steps:
a=self.exploration_steps
f=self.final_epsilon
w=math.pow(a,2)
x=self.steps_done
y=(w-(1-f)*x*x)/w
return y
return self.final_epsilon
def get_action_set(self):
#去除
state=self.env.current_state.copy()
actionset=[]
test=[]
s=[]
self.uncompromised_host=[i for i in self.hosts if i not in self.compromised_host]
#除去已经获取权限的目标
for addr in self.uncompromised_host:
if state.host_has_access(addr, 2):
self.compromised_host.append(addr)
#for addr in self.uncompromised_host:
for a in range(self.env.action_space.n):
t=self.env.action_space.get_action(a)
#对未获取权限的目标
#if t.target not in s:
if t.target in self.uncompromised_host:
if t.target not in s:
s.append(t.target)
b=state.get_host(t.target)
test.append(b)
#如果可达
if state.host_reachable(t.target) or state.host_discovered(t.target):
#if (b.access ==0 and t.name.find('scan')!= -1) \
#or (t.name.find('scan')== -1 and b.access>=t.req_access):
#if t.name.find('pe') != -1 :
#a=1
actionset.append(a)
#for a in actionset:
#print(env.action_space.get_action(a))
#print(a)
if len(actionset)==0:
self.env.render_network_graph(show=True)
x=1
test=[]
s=[]
return actionset
def get_egreedy_action(self, o, epsilon):
if random.random() > epsilon:
o = torch.from_numpy(o).float().to(self.device)
return self.dqn.get_action(o).cpu().item()
return random.randint(0, self.num_actions-1)
def get_egreedy_action2(self, o, epsilon):
if random.random() > epsilon:
o = torch.from_numpy(o).float().to(self.device)
return self.dqn.get_action(o).cpu().item()
actions=self.get_action_set()
x=random.randint(0, len(actions)-1)
return actions[x]
def getV(self, q_value):
#v = self.alpha * torch.log((1 / self.alpha * q_value).exp().sum(dim=-1, keepdim=True))
vv=self.alpha*torch.logsumexp(q_value/self.alpha, dim = -1) # YOUR CODE
return vv
def optimize(self):
s_batch, a_batch, next_s_batch, r_batch, d_batch, idxs, is_weight = self.replayPER.sample(self.batch_size)
#batch = self.replay.sample_batch(self.batch_size)
#s_batch, a_batch, next_s_batch, r_batch, d_batch = batch
# get q_vals for each state and the action performed in that state
q_vals_raw = self.dqn(s_batch)
q_vals = q_vals_raw.gather(1, a_batch).squeeze()
#ICM
a_vec=F.one_hot(a_batch.squeeze(), num_classes = self.num_actions).to(self.device)# convert action from int to one-hot format
pred_s_next, pred_a_vec, feature_x_next = self.ICM.get_full(s_batch, next_s_batch, a_vec)
forward_loss = F.mse_loss(pred_s_next, feature_x_next.detach(), reduce=False,reduction='none')
inverse_pred_loss = F.cross_entropy(pred_a_vec, a_batch.squeeze().detach(), reduce=False,reduction='none')
# calculate rewards
intrinsic_rewards = self.intrinsic_scale * forward_loss.mean(-1)
total_rewards = intrinsic_rewards.clone()
if self.use_extrinsic:
total_rewards += r_batch
r_batch=total_rewards
# get target q val = max val of next state
with torch.no_grad():
# argmax_Q = self.dqn(next_s_batch).max(1)[1].unsqueeze(1)
# DDQNtarget_q_val = self.target_dqn(next_s_batch).gather(1,argmax_Q).squeeze(1)
#target_q_val_raw = self.target_dqn(next_s_batch)
#target_q_val = target_q_val_raw.max(1)[0]
target_q_val_raw = self.target_dqn(next_s_batch)
#target_q_val = target_q_val_raw.max(1)[0]
target_q_val = self.getV(target_q_val_raw)
target = r_batch + self.discount*(1-(d_batch))*target_q_val
target=target.float()
# calculate error
error = torch.abs(q_vals-target_q_val).cpu().data.numpy()
# update priority
for i in range(self.batch_size):
idx = idxs[i]
self.replayPER.update(idx, error[i])
# calculate loss
Q_loss = (torch.cuda.FloatTensor(is_weight) * F.mse_loss(q_vals, target,reduce=False,reduction='none')).mean()
loss = self.Qloss_scale*Q_loss + self.forward_scale*forward_loss.mean() + self.inverse_scale* inverse_pred_loss.mean()
#loss = (torch.cuda.FloatTensor(is_weight) * self.loss_fn(q_vals, target)).mean()
# optimize the model
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.steps_done % self.target_update_freq == 0:
self.target_dqn.load_state_dict(self.dqn.state_dict())
self.dqn.reset_noise()
self.target_dqn.reset_noise()
q_vals_max = q_vals_raw.max(1)[0]
mean_v = q_vals_max.mean().item()
return loss.item(), mean_v
def train(self):
if self.verbose:
print("\nStarting training")
self.num_episodes = 1
training_steps_remaining = self.training_steps
while self.num_episodes < self.episode_number:
#while self.steps_done < self.training_steps:
ep_results = self.run_train_episode(training_steps_remaining)
#ep_return, ep_steps, goal, honeypot_state = ep_results
ep_return, ep_steps, goal = ep_results
training_steps_remaining -= ep_steps
#self.rewards_step.append(ep_return)
if self.replayPER.tree.n_entries>=self.train_start:
self.logger.add_scalar("episode", self.num_episodes, self.steps_done)
self.logger.add_scalar(
"return-steps", ep_return, self.steps_done
)
self.logger.add_scalar(
"return-episode", ep_return, self.num_episodes
)
self.logger.add_scalar(
"episode_steps", ep_steps, self.steps_done
)
self.logger.add_scalar(
"episode_goal_reached", int(goal), self.steps_done
)
self.logger.add_scalar(
"episode-steps-episode", ep_steps, self.num_episodes
)
# self.logger.add_scalar(
# "honeypot_reached", int(self.env.honeypot_reached()), self.steps_done
# )
# self.logger.add_scalar(
# "honeypot_reached-episodes", int(self.env.honeypot_reached()), self.num_episodes
# )
self.num_episodes += 1
else:
print(f"\treplay memory: = {self.replayPER.tree.n_entries} / "
f"{self.train_start}")
if self.num_episodes % 5 == 0 and self.verbose:
print(f"\nEpisode {self.num_episodes}:")
print(f"\tsteps done = {self.steps_done} / "
f"{self.training_steps}")
print(f"\tepisode steps = {ep_steps}")
print(f"\tepsilon = {self.get_epsilon()}")
print(f"\treturn = {ep_return}")
print(f"\tbest return = {self.best_return}")
print(f"\tgoal = {goal}")
self.logger.close()
if self.verbose:
print("Training complete")
print(f"\nEpisode {self.num_episodes}:")
print(f"\tsteps done = {self.steps_done} / {self.training_steps}")
print(f"\treturn = {ep_return}")
print(f"\tgoal = {goal}")
print("最佳分数为:")
print(self.best_return)
for a in self.best_action_set:
print(env.action_space.get_action(a))
def run_train_episode(self, step_limit):
o = self.env.reset()
done = False
steps = 0
episode_return = 0
action_set=[]
while not done and steps < step_limit:
eee=self.get_epsilon()
a = self.get_egreedy_action(o, eee)
action_set.append(a)
next_o, r, done, _ = self.env.step(a)
#self.replay.store(o, a, next_o, r, done) #
self.append_sample(o,a,r,next_o,done)
if self.replayPER.tree.n_entries>=self.train_start:
loss, mean_v = self.optimize()
self.steps_done += 1
steps += 1
self.logger.add_scalar(
"epsilon", eee, self.num_episodes)
self.logger.add_scalar("loss", loss, self.num_episodes)
self.logger.add_scalar("mean_v", mean_v, self.num_episodes)
#else :
#print( self.replayPER.tree.n_entries)
o = next_o
episode_return += r
if self.replayPER.tree.n_entries>=self.train_start:
self.rewards_episode.append(episode_return)
if episode_return >= self.best_return :
self.best_return=episode_return
self.best_action_set=action_set
#steps += 1
self.compromised_host=[]
self.uncompromised_host=[]
#return episode_return, steps, self.env.goal_reached(), self.env.honeypot_reached()
return episode_return, steps, self.env.goal_reached()
def run_eval_episode(self,
env=None,
render=False,
eval_epsilon=0.05,
render_mode="readable"):
if env is None:
env = self.env
o = env.reset()
done = False
steps = 0
episode_return = 0
line_break = "="*60
if render:
print("\n" + line_break)
print(f"Running EVALUATION using epsilon = {eval_epsilon:.4f}")
print(line_break)
env.render(render_mode)
input("Initial state. Press enter to continue..")
while not done:
a = self.get_egreedy_action(o, eval_epsilon)
next_o, r, done, _ = env.step(a)
o = next_o
episode_return += r
steps += 1
if render:
print("\n" + line_break)
print(f"Step {steps}")
print(line_break)
print(f"Action Performed = {env.action_space.get_action(a)}")
env.render(render_mode)
print(f"Reward = {r}")
print(f"Done = {done}")
input("Press enter to continue..")
if done:
print("\n" + line_break)
print("EPISODE FINISHED")
print(line_break)
print(f"Goal reached = {env.goal_reached()}")
print(f"Total steps = {steps}")
print(f"Total reward = {episode_return}")
return episode_return, steps, env.goal_reached()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("env_name", type=str, help="benchmark scenario name")
parser.add_argument("--render_eval", action="store_true",
help="Renders final policy")
parser.add_argument("-o", "--partially_obs", default=False, action="store_true",
help="Partially Observable Mode")
parser.add_argument("--hidden_sizes", type=int, nargs="*",
default=[128,128],
help="(default=[64. 64])")
parser.add_argument("--lr", type=float, default=0.0001,
help="Learning rate (default=0.001)")
parser.add_argument("-t", "--training_steps", type=int, default=1500000,
help="training steps (default=20000)")
parser.add_argument("-e", "--episode_number", type=int, default=10000,
help="training steps (default=20000)")
parser.add_argument("--batch_size", type=int, default=128,
help="(default=32)")
parser.add_argument("--target_update_freq", type=int, default=1000,
help="(default=1000)")
parser.add_argument("--seed", type=int, default=0,
help="(default=0)")
parser.add_argument("--replay_size", type=int, default=500000,
help="(default=100000)")
parser.add_argument("--final_epsilon", type=float, default=0.05,
help="(default=0.05)")
parser.add_argument("--init_epsilon", type=float, default=1.0,
help="(default=1.0)")
parser.add_argument("--exploration_steps", type=int, default=800000,
help="(default=10000)")
parser.add_argument("--gamma", type=float, default=0.9,
help="(default=0.99)")
parser.add_argument("--quite", action="store_false",
help="Run in Quite mode")
args = parser.parse_args()
env = nasim.make_benchmark(args.env_name,
args.seed,
fully_obs=not args.partially_obs,
flat_actions=True,
flat_obs=True)
#env.render_network_graph(show=True)
dqn_agent = NoisyDoubleDuelingDQN_PERAgent(env, verbose=args.quite, **vars(args))
num_actions = env.action_space.n
#for a in range(num_actions):
# print(env.action_space.get_action(a))
dqn_agent.train()
dqn_agent.save("D:\\Experiments\\Experiment_Record\\E0812\\Saved_model\\NDSPI-medium-multi-site-0820.pkl")
#dqn_agent.save("D:\\Experiments\\NetworkAttackSimulator\\medium-multi-site-honeypot\\NoisyDoubleDueling_PER_Mar15.pkl")
#save_data(dqn_agent.rewards_episode,'D:\\Experiments\\NetworkAttackSimulator\\medium-multi-site\\ NoisyDoubleDueling_PER_rewards_episode_Mar1.csv')
#save_data(dqn_agent.rewards_step,'D:\\Experiments\\NetworkAttackSimulator\\medium-multi-site\\ NoisyDoubleDueling_PER_rewards_step_Mar1.csv')
#dqn_agent.run_eval_episode(render=args.render_eval)
```
#### File: nasim/agents/others.py
```python
import pandas as pd
# excel文件的写入
def save_data(datas,name):
name_attribute=['reward']
test=pd.DataFrame(columns=name_attribute,data=datas)#数据有三列,列名分别为one,two,three
#print(test)
test.to_csv(name,encoding='utf-8')
``` |
{
"source": "joezuntz/CosmoCov",
"score": 2
} |
#### File: cosmolike_core/theory/covNG_resp.py
```python
# ======================================================== #
# Be brute and load all stuff from libraries, and other functions
# ======================================================== #
from numpy import *
from scipy import interpolate, integrate
epsilon_treelevel = 1.0e-9 #using this tiny margin to avoid have cosine angles = 1 (-1); I find this dirty trick works sufficiently well
lpath = "../cosmolike_core/theory/"
def Spline3(xx,yy,Order,X): ## Spline function (Order is the order of the derivative)
ff=interpolate.splrep(xx,yy,k=3)
ss=interpolate.splev(X,ff,Order)
return ss
# The integrand of the standard SPT tree level trispectrum
def alpha(q,k,mu):
return 1. + k*mu/q
def beta(q,k,mu):
return (mu/2.)*(k/q + q/k) + mu**2.
def F2(q1,q2,mu12):
return (1./7)*(5.*alpha(q1,q2,mu12) + 2.*beta(q1,q2,mu12))
def G2(q1,q2,mu12):
return (1./7)*(3.*alpha(q1,q2,mu12) + 4.*beta(q1,q2,mu12))
def F2s(q1,q2,mu12):
return (1./2)*(F2(q1,q2,mu12) + F2(q2,q1,mu12))
def G2s(q1,q2,mu12):
return (1./2)*(G2(q1,q2,mu12) + G2(q2,q1,mu12))
def F3(q1,q2,q3,mu12,mu13,mu23):
term1 = 2.* beta(q1,sqrt(q2**2.+q3**2.+2.*q2*q3*mu23),(q2*mu12+q3*mu13)/sqrt(q2**2.+q3**2.+2.*q2*q3*mu23)) * G2(q2,q3,mu23)
term2 = 7.*alpha(q1,sqrt(q2**2.+q3**2.+2.*q2*q3*mu23),(q2*mu12+q3*mu13)/sqrt(q2**2.+q3**2.+2.*q2*q3*mu23)) * F2(q2,q3,mu23)
term3 = 2.* beta(sqrt(q1**2.+q2**2.+2.*q1*q2*mu12),q3,(q1*mu13+q2*mu23)/sqrt(q1**2.+q2**2.+2.*q1*q2*mu12)) * G2(q1,q2,mu12)
term4 = 7.*alpha(sqrt(q1**2.+q2**2.+2.*q1*q2*mu12),q3,(q1*mu13+q2*mu23)/sqrt(q1**2.+q2**2.+2.*q1*q2*mu12)) * G2(q1,q2,mu12)
return (1./18)*(term1 + term2 + term3 + term4)
def F3s(q1,q2,q3,mu12,mu13,mu23):
out = F3(q1,q2,q3,mu12,mu13,mu23) + F3(q1,q3,q2,mu13,mu12,mu23) + F3(q2,q1,q3,mu12,mu23,mu13) + F3(q2,q3,q1,mu23,mu12,mu13) + F3(q3,q1,q2,mu13,mu23,mu12) + F3(q3,q2,q1,mu23,mu13,mu12)
return out/6.
def cov_stdtree_angle(theta, k1, k2, z):
mu12 = cos(theta)
# Shorthand notation of vector diferences and angles
k1mk2 = sqrt(k1**2. + k2**2. - 2.*k1*k2*mu12) # |\vec{k1}-\vec{k2}|
k1pk2 = sqrt(k1**2. + k2**2. + 2.*k1*k2*mu12) # |\vec{k1}+\vec{k2}|
ang_k1mk2andk2 = (k1*mu12-k2)/k1mk2 # Cosine angle between \vec{k1}-\vec{k2} and \vec{k2}
ang_k1pk2andk2 = (k1*mu12+k2)/k1pk2 # Cosine angle between \vec{k1}+\vec{k2} and \vec{k2}
ang_k2mk1andk1 = (k2*mu12-k1)/k1mk2 # Cosine angle between \vec{k2}-\vec{k1} and \vec{k1}
ang_k2pk1andk1 = (k2*mu12+k1)/k1pk2 # Cosine angle between \vec{k2}+\vec{k1} and \vec{k1}
# Interpolate power spectra
P1 = Plin_int(z, k1)[0]
P2 = Plin_int(z, k2)[0]
P1m2 = flipud(Plin_int(z, flipud(k1mk2))[:,0])
P1p2 = Plin_int(z, k1pk2)[:,0]
# Define terms
term1 = 12.*F3s(k1,k1,k2,-1.+epsilon_treelevel,mu12,-mu12) * P1**2. * P2
term2 = 12.*F3s(k2,k2,k1,-1.+epsilon_treelevel,mu12,-mu12) * P2**2. * P1
term3 = 4.*F2s(k1mk2,k2, ang_k1mk2andk2)**2. * P2**2. * P1m2
term4 = 4.*F2s(k1pk2,k2,-ang_k1pk2andk2)**2. * P2**2. * P1p2
term5 = 4.*F2s(k1mk2,k1, ang_k2mk1andk1)**2. * P1**2. * P1m2
term6 = 4.*F2s(k1pk2,k1,-ang_k2pk1andk1)**2. * P1**2. * P1p2
term7 = 8.*F2s(k1mk2,k2, ang_k1mk2andk2)*F2s(k1mk2,k1, ang_k2mk1andk1) * P1 * P2 * P1m2
term8 = 8.*F2s(k1pk2,k2,-ang_k1pk2andk2)*F2s(k1pk2,k1,-ang_k2pk1andk1) * P1 * P2 * P1p2
return term1 + term2 + term3 + term4 + term5 + term6 + term7 + term8
# ================================================================================ #
# Load spectra and growth-only response tables and create 2D interpolators
# ================================================================================ #
# Create the interpolators that are needed; it does trick of setting to zero (or linear result for responses) for k = 0 (to prevent crashes/problems when k<k_min)
# This needs to be adapted if the format of the files changes
def make_interpolator_forP(path1, path2, value_at_k0):
filetmp = loadtxt(path1, skiprows = 1)
ztmp = loadtxt(path2)
ktmp = append(0., filetmp[:,0])
Ptmp = vstack([value_at_k0*ones(len(ztmp)), filetmp[:,1:]])
return interpolate.interp2d(ztmp, ktmp, Ptmp, kind='cubic')
# This interpolator add a correction for k>kmax (though not the same as used in the file R1RK_SSC.py )
def make_interpolator_forG(path1, path2, value_at_k0):
filetmp = loadtxt(path1, skiprows = 1)
ztmp = loadtxt(path2)
# Set to linear theory for low-k
ktmp = append(0., filetmp[:,0])
Gtmp = vstack([value_at_k0*ones(len(ztmp)), filetmp[:,1:]])
# Set correction for high-k
khig = 10.**linspace(log10(max(ktmp+0.00001)), 3, 500)
kout = append(ktmp, khig)
Ghig = vstack([ Gtmp , outer(ones(len(khig)), Gtmp[-1,:]) * outer((max(ktmp)/khig)**1., ones(len(ztmp))) ])
return interpolate.interp2d(ztmp, kout, Ghig, kind='cubic')
# P_L(z, k)
Plin_int = make_interpolator_forP(lpath+'lookup_tables/P_L_class.dat', lpath+'lookup_tables/P_L_zvalues.dat', 0.0)
# dP_L/dk/P(z, k)
dPlin_int = make_interpolator_forP(lpath+'lookup_tables/dP_L_postclass.dat', lpath+'lookup_tables/P_L_zvalues.dat', 0.0)
# d^2P_L/dk^2/P(z, k)
ddPlin_int = make_interpolator_forP(lpath+'lookup_tables/ddP_L_postclass.dat', lpath+'lookup_tables/P_L_zvalues.dat', 0.0)
# P_m(z, k)
Pnl_int = make_interpolator_forP(lpath+'lookup_tables/P_m_runmode.dat', lpath+'lookup_tables/P_m_zvalues.dat', 0.0)
# dP_m/dk/P(z, k)
dPnl_int = make_interpolator_forP(lpath+'lookup_tables/dP_m_postrunmode.dat', lpath+'lookup_tables/P_m_zvalues.dat', 0.0)
# d^2P_L/dk^2/P(z, k)
ddPnl_int = make_interpolator_forP(lpath+'lookup_tables/ddP_m_postrunmode.dat', lpath+'lookup_tables/P_m_zvalues.dat', 0.0)
# G_1(z, k)
G1_int = make_interpolator_forG(lpath+'lookup_tables/Resp_G1_fromsims.dat', lpath+'lookup_tables/Resp_zvalues_fromsims.dat', 26./21)
# G_2(z, k)
G2_int = make_interpolator_forG(lpath+'lookup_tables/Resp_G2_fromsims.dat', lpath+'lookup_tables/Resp_zvalues_fromsims.dat', 3002./1323)
# ======================================================================================= #
# Define function covNG_resp(k1, k2, z) function
# Interpolators above are treated as global, but can be passed on as arguments if needed be
# ======================================================================================= #
def covNG_resp(k1, k2, z):
fsq = 0.5
k_hard = max([k1, k2])
k_soft = min([k1, k2])
G1_hard = G1_int(z, k_hard)
G1_soft = G1_int(z, k_soft)
G2_hard = G2_int(z, k_hard)
G2_soft = G2_int(z, k_soft)
Plin_soft = Plin_int(z, k_soft)
Pnl_soft = Pnl_int(z, k_soft)
Pnl_hard = Pnl_int(z, k_hard)
# k*P'/P and k^2*P''/P
fk1_hard = k_hard * dPnl_int(z, k_hard)/Pnl_int(z, k_hard)
fk1_soft = k_soft * dPnl_int(z, k_soft)/Pnl_int(z, k_soft)
fk2_hard = k_hard**2. * ddPnl_int(z, k_hard)/Pnl_int(z, k_hard)
fk2_soft = k_soft**2. * ddPnl_int(z, k_soft)/Pnl_int(z, k_soft)
# kG1'
fractk = 0.02
kG1prime_hard = k_hard * ( G1_int(z, (1.+fractk)*k_hard) - G1_int(z, (1.-fractk)*k_hard) ) / (2.*fractk*k_hard)
kG1prime_soft = k_soft * ( G1_int(z, (1.+fractk)*k_soft) - G1_int(z, (1.-fractk)*k_soft) ) / (2.*fractk*k_soft)
# Eulerian responses that are needed: R_2, R_Kdelta, R_K^2, R_K.K, R_KK
R3e_hard = (8./21)*G1_hard + G2_hard + (-(2./9) - (2./3)*G1_hard)*fk1_hard + (1./9)*fk2_hard - (2./3)*kG1prime_hard
R3e_soft = (8./21)*G1_soft + G2_soft + (-(2./9) - (2./3)*G1_soft)*fk1_soft + (1./9)*fk2_soft - (2./3)*kG1prime_soft
R4e_hard = (1518./1813)*((8./21)*G1_hard + G2_hard) + (41./22)*(-2./9 - (2./3)*G1_hard)*fk1_hard + (1./3)*fk2_hard
R4e_soft = (1518./1813)*((8./21)*G1_soft + G2_soft) + (41./22)*(-2./9 - (2./3)*G1_soft)*fk1_soft + (1./3)*fk2_soft
R5e_hard = (1./21)*G1_hard - (1./6)*fk1_hard
R5e_soft = (1./21)*G1_soft - (1./6)*fk1_soft
R6e_hard = -(22./13)*G1_hard + (3./2)*fk1_hard
R6e_soft = -(22./13)*G1_soft + (3./2)*fk1_soft
R7e_hard = (1476./1813)*((8./21)*G1_hard + G2_hard) + (69./44)*(-2./9 - (2./3)*G1_hard)*fk1_hard + (1./2)*fk2_hard
R7e_soft = (1476./1813)*((8./21)*G1_soft + G2_soft) + (69./44)*(-2./9 - (2./3)*G1_soft)*fk1_soft + (1./2)*fk2_soft
# \mathcal{A, B, C} functions in Eq.(2.4) of arXiv:1705.01092
A_hard = (1./2)*R3e_hard + (2./3)*R5e_hard + (2./9)*R6e_hard
A_soft = (1./2)*R3e_soft + (2./3)*R5e_soft + (2./9)*R6e_soft
B_hard = (2./3)*R4e_hard + (2./9)*R6e_hard
B_soft = (2./3)*R4e_soft + (2./9)*R6e_soft
C_hard = (4./9)*R7e_hard
C_soft = (4./9)*R7e_soft
# Compute stitched tree level piece
theta_range = linspace(pi, 2.*pi, 1000)+1.0e-6 #Small correction avoids singularities; half the range suffices by symmetry (choice pi...2pi is to ensure current use of interpolators)
if (k_soft/k_hard <= fsq): # Squeezed, so response
covNG_stitched_tree = (2. * ( (A_hard + B_hard/4. + (11./32)*C_hard ) * Pnl_hard * Plin_soft**2.))[0]
else: # Non-squeezed, so standard
integrand_stdtree = cov_stdtree_angle(theta_range, k1, k2, z)
covNG_stitched_tree = integrate.trapz(integrand_stdtree, theta_range) / (pi)
# Compute 1-loop response piece: the p-integral
# Get knl
p_array = 10.**linspace(-3, 2, 1000)
knl = p_array[where(abs(p_array**3.*Plin_int(z, p_array)[:,0]/2./pi**2.-1.) == min(abs(p_array**3.*Plin_int(z, p_array)[:,0]/2./pi**2.-1.)))][0]
p_array = 10.**linspace(log10(0.01*min([fsq*k_soft, knl])), log10(min([fsq*k_soft, knl])), 1000)
logp_array = log(p_array)
p_integral = integrate.trapz(p_array**3.*Plin_int(z, p_array)[:,0]**2., logp_array)
# Compute 1-loop response piece: include the angular part with responses
covNG_resp1loop = (2.*A_hard*A_soft + B_hard*B_soft/10. + (2./5)*(A_hard*C_soft + A_soft*C_hard) + (B_hard*C_soft + B_soft*C_hard)/35. + (27./280)*C_hard*C_soft)
covNG_resp1loop *= (2. * (Pnl_hard*Pnl_soft/(2.*pi)**2.) * p_integral)[0]
# Return the total result
return covNG_stitched_tree + covNG_resp1loop
``` |
{
"source": "joezuntz/cosmosis",
"score": 2
} |
#### File: cosmosis/cosmosis/main.py
```python
import sys
import configparser
import argparse
import os
import pdb
from .runtime.config import Inifile, CosmosisConfigurationError
from .runtime.pipeline import LikelihoodPipeline
from .runtime import mpi_pool
from .runtime import process_pool
from .runtime.utils import ParseExtraParameters, stdout_redirected, import_by_path
from .samplers.sampler import Sampler, ParallelSampler, Hints
from . import output as output_module
from .runtime.handler import activate_segfault_handling
RUNTIME_INI_SECTION = "runtime"
def demo_1_special (args):
if "demo1.ini" in args.inifile:
print()
print("Congratulations: you have just run cosmosis demo one!")
if os.path.exists("./conda"):
print()
print("You can make plots of the outputs of this using this command:")
print(" postprocess demos/demo1.ini -o plots -p demo1")
print()
print("If you get a message about 'Abort Trap 6' then see the FAQ:")
print("https://bitbucket.org/joezuntz/cosmosis/wiki/FAQ")
print()
print("Then you can try out the other demos...")
print("... and read the information about plotting their output and what they are doing online.")
print("Please get in touch with any problems, ideally by filing an Issue. Thanks!")
else:
print("You can make plots of the outputs of this using the command:")
print()
print("postprocess demos/demo1.ini -o plots -p demo1")
print()
print("Then you can try out the other demos...")
print("... and read the information about plotting their output and what they are doing online.")
print("Please get in touch with any problems, ideally by filing an Issue. Thanks!")
print()
def demo_10_special (args):
if "demo10.ini" in args.inifile and not os.getenv ("HALOFIT", ""):
print()
print("Welcome to demo10!")
print()
print("**PLEASE NOTE**:")
print()
print("There are two flavours of this demo, selected through an ")
print("environment variable called `HALOFIT'; this variable is not ")
print("currently set, so we are giving it the value `halofit'.")
print("Please see the wiki for more information: ")
print("https://bitbucket.org/joezuntz/cosmosis/wiki/Demo10.")
os.environ ["HALOFIT"] = "halofit"
def demo_20a_special (args):
if "demo20a.ini" in args.inifile:
print ()
print ("You have completed demo20a, now run demo20b and compare")
print ("results with demo5!")
def demo_20b_special (args):
if "demo20b.ini" in args.inifile and not os.path.isfile ("./demo20a.txt"):
print ()
print ("********************************************************")
print ("*** YOU MUST RUN demo20a BEFORE YOU CAN RUN demo20b. ***")
print ("********************************************************")
def sampler_main_loop(sampler, output, pool, is_root):
# Run the sampler until convergence
# which really means "finished" here -
# a sampler can "converge" just by reaching the
# limit of the number of samples it is allowed.
if is_root:
while not sampler.is_converged():
sampler.execute()
#Flush any output. This is to stop
#a problem in some MPI cases where loads
#of output is built up before being written.
if output:
output.flush()
# If we are in parallel tell the other processors to end the
# loop and prepare for the next sampler
if pool and sampler.is_parallel_sampler:
pool.close()
else:
if sampler.is_parallel_sampler:
sampler.worker()
def write_header_output(output, params, values, pipeline):
# If there is an output file, save the ini information to
# it as well. We do it here since it's nicer to have it
# after the sampler options that get written in sampler.config.
# Create a buffer to store the output:
output.comment("START_OF_PARAMS_INI")
comment_wrapper = output.comment_file_wrapper()
params.write(comment_wrapper)
output.comment("END_OF_PARAMS_INI")
# Do the same with the values file.
# Unfortunately that means reading it in again;
# if we ever refactor this bit we could eliminate that.
if isinstance(values, Inifile):
values_ini = values
elif values is None:
values_ini=Inifile(pipeline.values_filename)
else:
values_ini=values
output.comment("START_OF_VALUES_INI")
values_ini.write(comment_wrapper)
output.comment("END_OF_VALUES_INI")
# And the same with the priors
output.comment("START_OF_PRIORS_INI")
for priors_file in pipeline.priors_files:
if isinstance(priors_file, Inifile):
prior_ini = priors_file
else:
prior_ini=Inifile(priors_file)
prior_ini.write(comment_wrapper)
output.comment("END_OF_PRIORS_INI")
def setup_output(sampler_class, sampler_number, ini, pool, number_samplers, sample_method, resume):
needs_output = sampler_class.needs_output and \
(pool is None or pool.is_master() or sampler_class.parallel_output)
if not needs_output:
return None
#create the output files and methods.
try:
output_options = dict(ini.items('output'))
except configparser.NoSectionError:
raise ValueError("ERROR:\nFor the sampler (%s) you chose in the [runtime] section of the ini file I also need an [output] section describing how to save results\n\n"%sample_method)
#Additionally we tell the output here if
#we are parallel or not.
if (pool is not None) and (sampler_class.parallel_output):
output_options['rank'] = pool.rank
output_options['parallel'] = pool.size
#Give different output filenames to the different sampling steps
#Only change if this is not the last sampling step - the final
#one retains the name in the output file.
# Change, e.g. demo17.txt to demo17.fisher.txt
if ("filename" in output_options) and (sampler_number<number_samplers-1):
filename = output_options['filename']
filename, ext = os.path.splitext(filename)
filename += '.' + sampler_name
filename += ext
output_options['filename'] = filename
#Generate the output from a factory
output = output_module.output_from_options(output_options, resume)
output.metadata("sampler", sample_method)
if ("filename" in output_options):
print("* Saving output -> {}".format(output_options['filename']))
return output
def run_cosmosis(args, pool=None, ini=None, pipeline=None, values=None):
# In case we need to hand-hold a naive demo-10 user.
# Load configuration.
is_root = (pool is None) or pool.is_master()
if ini is None:
ini = Inifile(args.inifile, override=args.params, print_include_messages=is_root)
pre_script = ini.get(RUNTIME_INI_SECTION, "pre_script", fallback="")
post_script = ini.get(RUNTIME_INI_SECTION, "post_script", fallback="")
if is_root:
# This decodes the exist status
status = os.WEXITSTATUS(os.system(pre_script))
if status:
raise RuntimeError("The pre-run script {} retuned non-zero status {}".format(
pre_script, status))
if is_root and args.mem:
from cosmosis.runtime.memmon import MemoryMonitor
# This launches a memory monitor that prints out (from a new thread)
# the memory usage every args.mem seconds
mem = MemoryMonitor.start_in_thread(interval=args.mem)
# Create pipeline.
if pipeline is None:
cleanup_pipeline = True
pool_stdout = ini.getboolean(RUNTIME_INI_SECTION, "pool_stdout", fallback=False)
if is_root or pool_stdout:
pipeline = LikelihoodPipeline(ini, override=args.variables, values=values, only=args.only)
else:
# Suppress output on everything except the master process
if pool_stdout:
pipeline = LikelihoodPipeline(ini, override=args.variables, only=args.only)
else:
with stdout_redirected():
pipeline = LikelihoodPipeline(ini, override=args.variables, only=args.only)
if pipeline.do_fast_slow:
pipeline.setup_fast_subspaces()
else:
# We should not cleanup a pipeline which we didn't make
cleanup_pipeline = False
# This feature lets us import additional samplers at runtime
sampler_files = ini.get(RUNTIME_INI_SECTION, "import_samplers", fallback="").split()
for i, sampler_file in enumerate(sampler_files):
# give the module a new name to avoid name clashes if people
# just call their thing by the same name
import_by_path('additional_samplers_{}'.format(i), sampler_file)
# determine the type(s) of sampling we want.
sample_methods = ini.get(RUNTIME_INI_SECTION, "sampler", fallback="test").split()
for sample_method in sample_methods:
if sample_method not in Sampler.registry:
raise ValueError("Unknown sampler method %s" % (sample_method,))
#Get that sampler from the system.
sampler_classes = [Sampler.registry[sample_method] for sample_method in sample_methods]
if pool:
if not any(issubclass(sampler_class,ParallelSampler) for sampler_class in sampler_classes):
if len(sampler_classes)>1:
raise ValueError("None of the samplers you chose support parallel execution!")
else:
raise ValueError("The sampler you chose does not support parallel execution!")
for sampler_class in sampler_classes:
if isinstance(pool, process_pool.Pool) and issubclass(sampler_class,ParallelSampler) and not sampler_class.supports_smp:
name = sampler_class.__name__[:-len("Sampler")].lower()
raise ValueError("Sorry, the {} sampler does not support the --smp flag.".format(name))
number_samplers = len(sampler_classes)
#To start with we do not have any estimates of
#anything the samplers might give us like centers
#or covariances.
distribution_hints = Hints()
#Now that we have a sampler we know whether we will need an
#output file or not. By default new samplers do need one.
for sampler_number, (sampler_class, sample_method) in enumerate(
zip(sampler_classes, sample_methods)):
sampler_name = sampler_class.__name__[:-len("Sampler")].lower()
# The resume feature lets us restart from an existing file.
# It's not fully rolled out to all the suitable samplers yet though.
resume = ini.getboolean(RUNTIME_INI_SECTION, "resume", fallback=False)
# Not all samplers can be resumed.
if resume and not sampler_class.supports_resume:
print("NOTE: You set resume=T in the [runtime] section but the sampler {} does not support resuming yet. I will ignore this option.".format(sampler_name))
resume=False
if is_root:
print("****************************")
print("* Running sampler {}/{}: {}".format(sampler_number+1,number_samplers, sampler_name))
output = setup_output(sampler_class, sampler_number, ini, pool, number_samplers, sample_method, resume)
print("****************************")
#Initialize our sampler, with the class we got above.
#It needs an extra pool argument if it is a ParallelSampler.
#All the parallel samplers can also act serially too.
if pool and sampler_class.is_parallel_sampler:
sampler = sampler_class(ini, pipeline, output, pool)
else:
sampler = sampler_class(ini, pipeline, output)
#Set up the sampler - for example loading
#any resources it needs or checking the ini file
#for additional parameters.
sampler.distribution_hints.update(distribution_hints)
sampler.config()
# Potentially resume
if resume and sampler_class.needs_output and \
sampler_class.supports_resume and \
(is_root or sampler_class.parallel_output):
sampler.resume()
if output:
write_header_output(output, ini, values, pipeline)
sampler_main_loop(sampler, output, pool, is_root)
distribution_hints.update(sampler.distribution_hints)
if output:
output.close()
if cleanup_pipeline:
pipeline.cleanup()
if is_root and args.mem:
mem.stop()
# Extra-special actions we take to mollycoddle a brand-new user!
demo_1_special (args)
demo_20a_special (args)
# User can specify in the runtime section a post-run script to launch.
# In general this may be less useful than the pre-run script, because
# often chains time-out instead of actually completing.
# But we still offer it
if post_script and is_root:
# This decodes the exist status
status = os.WEXITSTATUS(os.system(post_script))
if status:
sys.stdout.write("WARNING: The post-run script {} failed with error {}".format(
post_script, error))
return 0
def make_graph(inifile, dotfile, params=None, variables=None):
"""
Make a graphviz "dot" format file, describing the pipeline
and how data is passed from section to section.
Requires pygraphviz.
Parameters
----------
inifile: str
A path to a pipeline file or an Inifile object
dotfile: str
Path to the new graph output file
params: dict or None
Dictionary of parameter overrides
variables: dict or None
Dictionary of value overrides
"""
ini = Inifile(inifile, override=params)
pipeline = LikelihoodPipeline(ini, override=variables)
data = pipeline.run_parameters(pipeline.start_vector())
pipeline.make_graph(data, dotfile)
def main():
try:
parser = argparse.ArgumentParser(description="Run a pipeline with a single set of parameters", add_help=True)
parser.add_argument("inifile", help="Input ini file of parameters")
parser.add_argument("--mpi",action='store_true',help="Run in MPI mode.")
parser.add_argument("--smp",type=int,default=0,help="Run with the given number of processes in shared memory multiprocessing (this is experimental and does not work for multinest).")
parser.add_argument("--pdb",action='store_true',help="Start the python debugger on an uncaught error. Only in serial mode.")
parser.add_argument("--segfaults", "--experimental-fault-handling", action='store_true',help="Activate a mode that gives more info on segfault")
parser.add_argument("--mem", type=int, default=0, help="Print out memory usage every this many seconds from root process")
parser.add_argument("-p", "--params", nargs="*", action=ParseExtraParameters, help="Override parameters in inifile, with format section.name1=value1 section.name2=value2...")
parser.add_argument("-v", "--variables", nargs="*", action=ParseExtraParameters, help="Override variables in values file, with format section.name1=value1 section.name2=value2...")
parser.add_argument("--only", nargs="*", help="Fix all parameters except the ones listed")
parser.add_argument("--graph", type=str, default='', help="Do not run a sampler; instead make a graphviz dot file of the pipeline")
args = parser.parse_args(sys.argv[1:])
if args.graph:
make_graph(args.inifile, args.graph, args.params, args.variables)
return
demo_10_special (args)
demo_20b_special (args)
if args.segfaults:
activate_segfault_handling()
# initialize parallel workers
if args.mpi:
with mpi_pool.MPIPool() as pool:
return run_cosmosis(args,pool)
elif args.smp:
with process_pool.Pool(args.smp) as pool:
return run_cosmosis(args,pool)
else:
try:
return run_cosmosis(args)
except Exception as error:
if args.pdb:
print("There was an exception - starting python debugger because you ran with --pdb")
print(error)
pdb.post_mortem()
else:
raise
except CosmosisConfigurationError as e:
print(e)
return 1
# Extra-special actions we take to mollycoddle a brand-new user!
demo_1_special (args)
demo_20a_special (args)
if __name__=="__main__":
status = main()
sys.exit(status)
``` |
{
"source": "joezuntz/MockMPI",
"score": 2
} |
#### File: MockMPI/mockmpi/comm.py
```python
import numpy as np
# This constant seems to have the same value in MPICH and OpenMPI
# so we reproduce it here since it can be quite important.
IN_PLACE = 1
class MockComm(object):
"""A class to mock up the MPI Comm API using a multiprocessing Pipe.
"""
def __init__(self, rank, size, pipes, barrier):
self.rank = rank
self.size = size
self.pipes = pipes
self.barrier = barrier
def __bool__(self):
return self.size > 0
def Get_rank(self):
return self.rank
def Get_size(self):
return self.size
def send(self, msg, dest):
if dest != self.rank:
self.pipes[dest].send(msg)
else:
self.msg = msg
def Send(self, msg, dest):
if not isinstance(msg, np.ndarray):
raise ValueError(
"Can only use Send with numpy arrays "
"(Mocking code does not handle general buffers)"
)
self.send(msg, dest)
def recv(self, source):
if source != self.rank:
msg = self.pipes[source].recv()
else:
msg = self.msg
return msg
def Recv(self, buffer, source):
msg = self.recv(source)
buffer[:] = msg
def Barrier(self):
self.barrier.wait()
def bcast(self, msg, root=0):
if root == self.rank:
for p in range(self.size):
self.send(msg, p)
msg = self.recv(root)
return msg
def Bcast(self, msg, root=0):
if root == self.rank:
for p in range(self.size):
self.Send(msg, p)
self.Recv(msg, root)
def scatter(self, data, root=0):
if root == self.rank:
for p in range(self.size):
self.send(data[p], p)
data = self.recv(root)
return data
def gather(self, data, root=0):
self.send(data, root)
if root == self.rank:
new_data = []
for p in range(self.size):
new_data.append(self.recv(p))
return new_data
else:
return None
def alltoall(self, data=0):
for p in range(self.size):
self.send(data[p], p)
new_data = []
for p in range(self.size):
new_data.append(self.recv(p))
return new_data
def reduce(self, sendobj, op=None, root=0):
if op is not None:
raise NotImplementedError("Not implemented non-sum reductions in mock MPI")
new_data = self.gather(sendobj, root)
if root == self.rank:
d = new_data[0]
for d2 in new_data[1:]:
d = d + d2
return d
else:
return None
def allreduce(self, sendobj, op=None):
d = self.reduce(sendobj, op)
d = self.bcast(d)
return d
def Reduce(self, sendbuf, recvbuf, op=None, root=0):
if sendbuf is IN_PLACE:
sendbuf = recvbuf.copy()
if not isinstance(sendbuf, np.ndarray):
raise ValueError(
"Cannot use Reduce with non-arrays. "
"(Mocking code does not handle general buffers)"
)
r = self.reduce(sendbuf, op=op, root=root)
if self.rank == root:
recvbuf[:] = r
def Allreduce(self, sendbuf, recvbuf, op=None):
self.Reduce(sendbuf, recvbuf, op)
self.Bcast(recvbuf)
# Instance methods not implemented
def Abort(self, *args, **kwargs):
raise NotImplementedError("The method 'Abort' is not implemented in mockmpi")
def Accept(self, *args, **kwargs):
raise NotImplementedError("The method 'Accept' is not implemented in mockmpi")
def Allgather(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Allgather' is not implemented in mockmpi"
)
def Allgatherv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Allgatherv' is not implemented in mockmpi"
)
def Alltoall(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Alltoall' is not implemented in mockmpi"
)
def Alltoallv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Alltoallv' is not implemented in mockmpi"
)
def Alltoallw(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Alltoallw' is not implemented in mockmpi"
)
def Bsend(self, *args, **kwargs):
raise NotImplementedError("The method 'Bsend' is not implemented in mockmpi")
def Bsend_init(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Bsend_init' is not implemented in mockmpi"
)
def Call_errhandler(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Call_errhandler' is not implemented in mockmpi"
)
def Cart_map(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Cart_map' is not implemented in mockmpi"
)
def Clone(self, *args, **kwargs):
raise NotImplementedError("The method 'Clone' is not implemented in mockmpi")
def Connect(self, *args, **kwargs):
raise NotImplementedError("The method 'Connect' is not implemented in mockmpi")
def Create(self, *args, **kwargs):
raise NotImplementedError("The method 'Create' is not implemented in mockmpi")
def Create_cart(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Create_cart' is not implemented in mockmpi"
)
def Create_dist_graph(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Create_dist_graph' is not implemented in mockmpi"
)
def Create_dist_graph_adjacent(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Create_dist_graph_adjacent' is not implemented in mockmpi"
)
def Create_graph(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Create_graph' is not implemented in mockmpi"
)
def Create_group(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Create_group' is not implemented in mockmpi"
)
def Create_intercomm(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Create_intercomm' is not implemented in mockmpi"
)
def Delete_attr(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Delete_attr' is not implemented in mockmpi"
)
def Disconnect(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Disconnect' is not implemented in mockmpi"
)
def Dup(self, *args, **kwargs):
raise NotImplementedError("The method 'Dup' is not implemented in mockmpi")
def Dup_with_info(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Dup_with_info' is not implemented in mockmpi"
)
def Exscan(self, *args, **kwargs):
raise NotImplementedError("The method 'Exscan' is not implemented in mockmpi")
def Free(self, *args, **kwargs):
raise NotImplementedError("The method 'Free' is not implemented in mockmpi")
def Gather(self, *args, **kwargs):
raise NotImplementedError("The method 'Gather' is not implemented in mockmpi")
def Gatherv(self, *args, **kwargs):
raise NotImplementedError("The method 'Gatherv' is not implemented in mockmpi")
def Get_attr(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Get_attr' is not implemented in mockmpi"
)
def Get_errhandler(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Get_errhandler' is not implemented in mockmpi"
)
def Get_group(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Get_group' is not implemented in mockmpi"
)
def Get_info(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Get_info' is not implemented in mockmpi"
)
def Get_name(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Get_name' is not implemented in mockmpi"
)
def Get_topology(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Get_topology' is not implemented in mockmpi"
)
def Graph_map(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Graph_map' is not implemented in mockmpi"
)
def Iallgather(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Iallgather' is not implemented in mockmpi"
)
def Iallgatherv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Iallgatherv' is not implemented in mockmpi"
)
def Iallreduce(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Iallreduce' is not implemented in mockmpi"
)
def Ialltoall(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Ialltoall' is not implemented in mockmpi"
)
def Ialltoallv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Ialltoallv' is not implemented in mockmpi"
)
def Ialltoallw(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Ialltoallw' is not implemented in mockmpi"
)
def Ibarrier(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Ibarrier' is not implemented in mockmpi"
)
def Ibcast(self, *args, **kwargs):
raise NotImplementedError("The method 'Ibcast' is not implemented in mockmpi")
def Ibsend(self, *args, **kwargs):
raise NotImplementedError("The method 'Ibsend' is not implemented in mockmpi")
def Idup(self, *args, **kwargs):
raise NotImplementedError("The method 'Idup' is not implemented in mockmpi")
def Iexscan(self, *args, **kwargs):
raise NotImplementedError("The method 'Iexscan' is not implemented in mockmpi")
def Igather(self, *args, **kwargs):
raise NotImplementedError("The method 'Igather' is not implemented in mockmpi")
def Igatherv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Igatherv' is not implemented in mockmpi"
)
def Improbe(self, *args, **kwargs):
raise NotImplementedError("The method 'Improbe' is not implemented in mockmpi")
def Iprobe(self, *args, **kwargs):
raise NotImplementedError("The method 'Iprobe' is not implemented in mockmpi")
def Irecv(self, *args, **kwargs):
raise NotImplementedError("The method 'Irecv' is not implemented in mockmpi")
def Ireduce(self, *args, **kwargs):
raise NotImplementedError("The method 'Ireduce' is not implemented in mockmpi")
def Ireduce_scatter(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Ireduce_scatter' is not implemented in mockmpi"
)
def Ireduce_scatter_block(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Ireduce_scatter_block' is not implemented in mockmpi"
)
def Irsend(self, *args, **kwargs):
raise NotImplementedError("The method 'Irsend' is not implemented in mockmpi")
def Is_inter(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Is_inter' is not implemented in mockmpi"
)
def Is_intra(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Is_intra' is not implemented in mockmpi"
)
def Iscan(self, *args, **kwargs):
raise NotImplementedError("The method 'Iscan' is not implemented in mockmpi")
def Iscatter(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Iscatter' is not implemented in mockmpi"
)
def Iscatterv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Iscatterv' is not implemented in mockmpi"
)
def Isend(self, *args, **kwargs):
raise NotImplementedError("The method 'Isend' is not implemented in mockmpi")
def Issend(self, *args, **kwargs):
raise NotImplementedError("The method 'Issend' is not implemented in mockmpi")
def Mprobe(self, *args, **kwargs):
raise NotImplementedError("The method 'Mprobe' is not implemented in mockmpi")
def Probe(self, *args, **kwargs):
raise NotImplementedError("The method 'Probe' is not implemented in mockmpi")
def Recv_init(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Recv_init' is not implemented in mockmpi"
)
def Reduce_scatter(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Reduce_scatter' is not implemented in mockmpi"
)
def Reduce_scatter_block(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Reduce_scatter_block' is not implemented in mockmpi"
)
def Rsend(self, *args, **kwargs):
raise NotImplementedError("The method 'Rsend' is not implemented in mockmpi")
def Rsend_init(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Rsend_init' is not implemented in mockmpi"
)
def Scan(self, *args, **kwargs):
raise NotImplementedError("The method 'Scan' is not implemented in mockmpi")
def Scatter(self, *args, **kwargs):
raise NotImplementedError("The method 'Scatter' is not implemented in mockmpi")
def Scatterv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Scatterv' is not implemented in mockmpi"
)
def Send_init(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Send_init' is not implemented in mockmpi"
)
def Sendrecv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Sendrecv' is not implemented in mockmpi"
)
def Sendrecv_replace(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Sendrecv_replace' is not implemented in mockmpi"
)
def Set_attr(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Set_attr' is not implemented in mockmpi"
)
def Set_errhandler(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Set_errhandler' is not implemented in mockmpi"
)
def Set_info(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Set_info' is not implemented in mockmpi"
)
def Set_name(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Set_name' is not implemented in mockmpi"
)
def Spawn(self, *args, **kwargs):
raise NotImplementedError("The method 'Spawn' is not implemented in mockmpi")
def Spawn_multiple(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Spawn_multiple' is not implemented in mockmpi"
)
def Split(self, *args, **kwargs):
raise NotImplementedError("The method 'Split' is not implemented in mockmpi")
def Split_type(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Split_type' is not implemented in mockmpi"
)
def Ssend(self, *args, **kwargs):
raise NotImplementedError("The method 'Ssend' is not implemented in mockmpi")
def Ssend_init(self, *args, **kwargs):
raise NotImplementedError(
"The method 'Ssend_init' is not implemented in mockmpi"
)
def allgather(self, *args, **kwargs):
raise NotImplementedError(
"The method 'allgather' is not implemented in mockmpi"
)
def barrier(self, *args, **kwargs):
raise NotImplementedError("The method 'barrier' is not implemented in mockmpi")
def bsend(self, *args, **kwargs):
raise NotImplementedError("The method 'bsend' is not implemented in mockmpi")
def exscan(self, *args, **kwargs):
raise NotImplementedError("The method 'exscan' is not implemented in mockmpi")
def ibsend(self, *args, **kwargs):
raise NotImplementedError("The method 'ibsend' is not implemented in mockmpi")
def improbe(self, *args, **kwargs):
raise NotImplementedError("The method 'improbe' is not implemented in mockmpi")
def iprobe(self, *args, **kwargs):
raise NotImplementedError("The method 'iprobe' is not implemented in mockmpi")
def irecv(self, *args, **kwargs):
raise NotImplementedError("The method 'irecv' is not implemented in mockmpi")
def isend(self, *args, **kwargs):
raise NotImplementedError("The method 'isend' is not implemented in mockmpi")
def issend(self, *args, **kwargs):
raise NotImplementedError("The method 'issend' is not implemented in mockmpi")
def mprobe(self, *args, **kwargs):
raise NotImplementedError("The method 'mprobe' is not implemented in mockmpi")
def probe(self, *args, **kwargs):
raise NotImplementedError("The method 'probe' is not implemented in mockmpi")
def py2f(self, *args, **kwargs):
raise NotImplementedError("The method 'py2f' is not implemented in mockmpi")
def scan(self, *args, **kwargs):
raise NotImplementedError("The method 'scan' is not implemented in mockmpi")
def sendrecv(self, *args, **kwargs):
raise NotImplementedError(
"The method 'sendrecv' is not implemented in mockmpi"
)
def ssend(self, *args, **kwargs):
raise NotImplementedError("The method 'ssend' is not implemented in mockmpi")
# Properties not implemented
@property
def topology(self):
raise NotImplementedError(
"The property 'topology' is not implemented in mockmpi"
)
@property
def group(self):
raise NotImplementedError("The property 'group' is not implemented in mockmpi")
@property
def name(self):
raise NotImplementedError("The property 'name' is not implemented in mockmpi")
@property
def is_inter(self):
raise NotImplementedError(
"The property 'is_inter' is not implemented in mockmpi"
)
@property
def is_intra(self):
raise NotImplementedError(
"The property 'is_intra' is not implemented in mockmpi"
)
@property
def is_topo(self):
raise NotImplementedError(
"The property 'is_topo' is not implemented in mockmpi"
)
# 'Info' is the only writeable property
@property
def info(self):
raise NotImplementedError("The property 'info' is not implemented in mockmpi")
@info.setter
def info(self, *args, **kwargs):
raise NotImplementedError("The property 'info' is not implemented in mockmpi")
# Class methods not yet implemented
@classmethod
def Compare(cls, *args, **kwargs):
raise NotImplementedError(
"The class method 'Compare' is not implemented in mockmpi"
)
@classmethod
def Get_parent(cls, *args, **kwargs):
raise NotImplementedError(
"The class method 'Get_parent' is not implemented in mockmpi"
)
@classmethod
def Join(cls, *args, **kwargs):
raise NotImplementedError(
"The class method 'Join' is not implemented in mockmpi"
)
@classmethod
def Create_keyval(cls, *args, **kwargs):
raise NotImplementedError(
"The class method 'Create_keyval' is not implemented in mockmpi"
)
@classmethod
def Free_keyval(cls, *args, **kwargs):
raise NotImplementedError(
"The class method 'Free_keyval' is not implemented in mockmpi"
)
@classmethod
def f2py(cls, *args, **kwargs):
raise NotImplementedError(
"The class method 'f2py' is not implemented in mockmpi"
)
```
#### File: MockMPI/mockmpi/exec.py
```python
import numpy as np
import multiprocessing as mp
from .comm import MockComm
# We use this subclass to help with exception
# handling, as described here:
# https://stackoverflow.com/a/33599967/989692
class Process(mp.Process):
def __init__(self, *args, **kwargs):
mp.Process.__init__(self, *args, **kwargs)
self._pconn, self._cconn = mp.Pipe()
self._exception = None
def run(self):
try:
mp.Process.run(self)
self._cconn.send(None)
except Exception as e:
self._cconn.send(e)
raise e
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
return self._exception
def mock_mpiexec(nproc, target, *args, **kwargs):
"""Run a function, given as target, as though it were an MPI session using mpiexec -n nproc
but using multiprocessing instead of mpi.
"""
mp.set_start_method("spawn", force=True)
# Make the message passing pipes
all_pipes = [{} for p in range(nproc)]
for i in range(nproc):
for j in range(i + 1, nproc):
p1, p2 = mp.Pipe()
all_pipes[i][j] = p1
all_pipes[j][i] = p2
# Make a barrier
barrier = mp.Barrier(nproc)
# Make fake MPI-like comm object
comms = [
MockComm(rank, nproc, pipes, barrier) for rank, pipes in enumerate(all_pipes)
]
# Make processes
procs = [Process(target=target, args=(comm,) + args, kwargs=kwargs) for comm in comms]
for p in procs:
p.start()
for p in procs:
d = p.join()
if p.exception:
raise p.exception.__class__ from p.exception
``` |
{
"source": "joezuntz/qp",
"score": 3
} |
#### File: qp/qp/interp_pdf.py
```python
import numpy as np
from scipy.stats import rv_continuous
from qp.pdf_gen import Pdf_rows_gen
from qp.conversion_funcs import extract_vals_at_x, extract_xy_vals, extract_xy_sparse
from qp.plotting import get_axes_and_xlims, plot_pdf_on_axes
from qp.utils import normalize_interp1d,\
interpolate_unfactored_multi_x_multi_y, interpolate_unfactored_multi_x_y, interpolate_unfactored_x_multi_y,\
interpolate_multi_x_multi_y, interpolate_multi_x_y, interpolate_x_multi_y, reshape_to_pdf_size
from qp.test_data import XBINS, XARRAY, YARRAY, TEST_XVALS
from qp.factory import add_class
class interp_gen(Pdf_rows_gen):
"""Interpolator based distribution
Notes
-----
This implements a PDF using a set of interpolated values.
It simply takes a set of x and y values and uses `scipy.interpolate.interp1d` to
build the PDF.
"""
# pylint: disable=protected-access
name = 'interp'
version = 0
_support_mask = rv_continuous._support_mask
def __init__(self, xvals, yvals, *args, **kwargs):
"""
Create a new distribution by interpolating the given values
Parameters
----------
xvals : array_like
The x-values used to do the interpolation
yvals : array_like
The y-values used to do the interpolation
"""
if xvals.size != np.sum(yvals.shape[1:]): # pragma: no cover
raise ValueError("Shape of xbins in xvals (%s) != shape of xbins in yvals (%s)" % (xvals.size, np.sum(yvals.shape[1:])))
self._xvals = xvals
# Set support
kwargs['a'] = self.a = np.min(self._xvals)
kwargs['b'] = self.b = np.max(self._xvals)
kwargs['shape'] = yvals.shape[:-1]
#self._yvals = normalize_interp1d(xvals, yvals)
self._yvals = reshape_to_pdf_size(yvals, -1)
check_input = kwargs.pop('check_input', True)
if check_input:
self._compute_ycumul()
self._yvals = (self._yvals.T / self._ycumul[:,-1]).T
self._ycumul = (self._ycumul.T / self._ycumul[:,-1]).T
else: # pragma: no cover
self._ycumul = None
super(interp_gen, self).__init__(*args, **kwargs)
self._addmetadata('xvals', self._xvals)
self._addobjdata('yvals', self._yvals)
def _compute_ycumul(self):
copy_shape = np.array(self._yvals.shape)
self._ycumul = np.ndarray(copy_shape)
self._ycumul[:, 0] = 0.5 * self._yvals[:, 0] * (self._xvals[1] - self._xvals[0])
self._ycumul[:, 1:] = np.cumsum((self._xvals[1:] - self._xvals[:-1]) *
0.5 * np.add(self._yvals[:,1:],
self._yvals[:,:-1]), axis=1)
@property
def xvals(self):
"""Return the x-values used to do the interpolation"""
return self._xvals
@property
def yvals(self):
"""Return the y-valus used to do the interpolation"""
return self._yvals
def _pdf(self, x, row):
# pylint: disable=arguments-differ
factored, xr, rr, _ = self._sliceargs(x, row)
if factored:
return interpolate_x_multi_y(xr, self._xvals, self._yvals[rr], bounds_error=False,
fill_value=0.).reshape(x.shape)
return interpolate_unfactored_x_multi_y(xr, rr, self._xvals, self._yvals,
bounds_error=False, fill_value=0.)
def _cdf(self, x, row):
# pylint: disable=arguments-differ
if self._ycumul is None: # pragma: no cover
self._compute_ycumul()
factored, xr, rr, _ = self._sliceargs(x, row)
if factored:
return interpolate_x_multi_y(xr, self._xvals, self._ycumul[rr],
bounds_error=False, fill_value=(0.,1.)).reshape(x.shape)
return interpolate_unfactored_x_multi_y(xr, rr, self._xvals, self._ycumul,
bounds_error=False, fill_value=(0.,1.))
def _ppf(self, x, row):
# pylint: disable=arguments-differ
factored, xr, rr, _ = self._sliceargs(x, row)
if self._ycumul is None: # pragma: no cover
self._compute_ycumul()
if factored:
return interpolate_multi_x_y(xr, self._ycumul[rr], self._xvals, bounds_error=False,
fill_value=(0.,1.)).reshape(x.shape)
return interpolate_unfactored_multi_x_y(xr, rr, self._ycumul, self._xvals,
bounds_error=False, fill_value=(0.,1.))
def _updated_ctor_param(self):
"""
Set the bins as additional constructor argument
"""
dct = super(interp_gen, self)._updated_ctor_param()
dct['xvals'] = self._xvals
dct['yvals'] = self._yvals
return dct
@classmethod
def plot_native(cls, pdf, **kwargs):
"""Plot the PDF in a way that is particular to this type of distibution
For a interpolated PDF this uses the interpolation points
"""
axes, _, kw = get_axes_and_xlims(**kwargs)
return plot_pdf_on_axes(axes, pdf, pdf.dist.xvals, **kw)
@classmethod
def add_mappings(cls):
"""
Add this classes mappings to the conversion dictionary
"""
cls._add_creation_method(cls.create, None)
cls._add_extraction_method(extract_vals_at_x, None)
interp = interp_gen.create
class interp_irregular_gen(Pdf_rows_gen):
"""Interpolator based distribution
Notes
-----
This implements a PDF using a set of interpolated values.
It simply takes a set of x and y values and uses `scipy.interpolate.interp1d` to
build the PDF.
"""
# pylint: disable=protected-access
name = 'interp_irregular'
version = 0
_support_mask = rv_continuous._support_mask
def __init__(self, xvals, yvals, *args, **kwargs):
"""
Create a new distribution by interpolating the given values
Parameters
----------
xvals : array_like
The x-values used to do the interpolation
yvals : array_like
The y-values used to do the interpolation
"""
if xvals.shape != yvals.shape: # pragma: no cover
raise ValueError("Shape of xvals (%s) != shape of yvals (%s)" % (xvals.shape, yvals.shape))
self._xvals = reshape_to_pdf_size(xvals, -1)
# Set support
kwargs['a'] = self.a = np.min(self._xvals)
kwargs['b'] = self.b = np.max(self._xvals)
kwargs['shape'] = xvals.shape[:-1]
check_input = kwargs.pop('check_input', True)
self._yvals = reshape_to_pdf_size(yvals, -1)
if check_input:
self._yvals = normalize_interp1d(self._xvals, self._yvals)
self._ycumul = None
super(interp_irregular_gen, self).__init__(*args, **kwargs)
self._addobjdata('xvals', self._xvals)
self._addobjdata('yvals', self._yvals)
def _compute_ycumul(self):
copy_shape = np.array(self._yvals.shape)
self._ycumul = np.ndarray(copy_shape)
self._ycumul[:,0] = 0.
self._ycumul[:,1:] = np.cumsum(self._xvals[:,1:]*self._yvals[:,1:] - self._xvals[:,:-1]*self._yvals[:,1:], axis=1)
@property
def xvals(self):
"""Return the x-values used to do the interpolation"""
return self._xvals
@property
def yvals(self):
"""Return the y-valus used to do the interpolation"""
return self._yvals
def _pdf(self, x, row):
# pylint: disable=arguments-differ
factored, xr, rr, _ = self._sliceargs(x, row)
if factored:
return interpolate_multi_x_multi_y(xr, self._xvals[rr], self._yvals[rr], bounds_error=False, fill_value=0.).reshape(x.shape)
return interpolate_unfactored_multi_x_multi_y(xr, rr, self._xvals, self._yvals, bounds_error=False, fill_value=0.)
def _cdf(self, x, row):
# pylint: disable=arguments-differ
if self._ycumul is None: # pragma: no cover
self._compute_ycumul()
factored, xr, rr, _ = self._sliceargs(x, row)
if factored:
return interpolate_multi_x_multi_y(xr, self._xvals[rr], self._ycumul[rr], bounds_error=False, fill_value=(0., 1.)).reshape(x.shape)
return interpolate_unfactored_multi_x_multi_y(xr, rr, self._xvals, self._ycumul, bounds_error=False, fill_value=(0., 1.))
def _ppf(self, x, row):
# pylint: disable=arguments-differ
if self._ycumul is None: # pragma: no cover
self._compute_ycumul()
factored, xr, rr, _ = self._sliceargs(x, row)
if factored:
return interpolate_multi_x_multi_y(xr, self._ycumul[rr], self._xvals[rr], bounds_error=False,
fill_value=(self.a, self.b)).reshape(x.shape)
return interpolate_unfactored_multi_x_multi_y(xr, rr, self._ycumul, self._xvals, bounds_error=False,
fill_value=(self.a, self.b))
def _updated_ctor_param(self):
"""
Set the bins as additional constructor argument
"""
dct = super(interp_irregular_gen, self)._updated_ctor_param()
dct['xvals'] = self._xvals
dct['yvals'] = self._yvals
return dct
@classmethod
def plot_native(cls, pdf, **kwargs):
"""Plot the PDF in a way that is particular to this type of distibution
For a interpolated PDF this uses the interpolation points
"""
axes, _, kw = get_axes_and_xlims(**kwargs)
xvals_row = pdf.dist.xvals
return plot_pdf_on_axes(axes, pdf, xvals_row, **kw)
@classmethod
def add_mappings(cls):
"""
Add this classes mappings to the conversion dictionary
"""
cls._add_creation_method(cls.create, None)
cls._add_extraction_method(extract_xy_vals, None)
cls._add_extraction_method(extract_xy_sparse, None)
interp_irregular = interp_irregular_gen.create
interp_irregular_gen.test_data = dict(interp_irregular=dict(gen_func=interp_irregular, ctor_data=dict(xvals=XARRAY, yvals=YARRAY),\
convert_data=dict(xvals=XBINS), test_xvals=TEST_XVALS))
interp_gen.test_data = dict(interp=dict(gen_func=interp, ctor_data=dict(xvals=XBINS, yvals=YARRAY),\
convert_data=dict(xvals=XBINS), test_xvals=TEST_XVALS))
add_class(interp_gen)
add_class(interp_irregular_gen)
```
#### File: qp/qp/plotting.py
```python
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
def init_matplotlib():
"""Initialize matplotlib parameters"""
mpl.rcParams['text.usetex'] = True
mpl.rcParams['mathtext.rm'] = 'serif'
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = 'Times New Roman'
mpl.rcParams['axes.titlesize'] = 16
mpl.rcParams['axes.labelsize'] = 16
mpl.rcParams['savefig.dpi'] = 250
mpl.rcParams['savefig.format'] = 'pdf'
mpl.rcParams['savefig.bbox'] = 'tight'
init_matplotlib()
COLORS = {}
COLORS['truth'] = 'k'
COLORS['mix_mod'] = 'k'
COLORS['gridded'] = 'k'
COLORS['quantiles'] = 'blueviolet'
COLORS['histogram'] = 'darkorange'
COLORS['samples'] = 'forestgreen'
STYLES = {}
STYLES['truth'] = '-'
STYLES['mix_mod'] = ':'
STYLES['gridded'] = '--'
STYLES['quantiles'] = '--'#(0,(5,10))
STYLES['histogram'] = ':'#(0,(3,6))
STYLES['samples'] = '-.'#(0,(1,2))
def make_figure_axes(xlim, **kwargs):
"""
Build a figure and a set of figure axes to plot data on
Parameters
----------
xlim : (float, float)
The x-axis limits of the plot
Keywords
--------
**kwargs : passed directly to the `matplotlib` plot function
Return
------
fig, axes : The figure and axes
"""
xlabel = kwargs.pop('xlabel', r'$z$')
ylabel = kwargs.pop('ylabel', r'$p(z)$')
fig = plt.figure()
axes = fig.add_subplot(111)
axes.set_xlim(xlim[0], xlim[-1])
axes.set_xlabel(xlabel, fontsize=16)
axes.set_ylabel(ylabel, fontsize=16)
return (fig, axes)
def get_axes_and_xlims(**kwargs):
"""Get and return the axes and xlims from the kwargs"""
axes = kwargs.pop('axes', None)
xlim = kwargs.pop('xlim', None)
if axes is None:
if xlim is None: #pragma: no cover
raise ValueError("Either xlim or axes must be provided")
_, axes = make_figure_axes(xlim, **kwargs)
else:
if xlim is not None: #pragma: no cover
raise ValueError("Only one of xlim and axes should be provided")
xlim = axes.get_xlim()
return axes, xlim, kwargs
def plot_pdf_on_axes(axes, pdf, xvals, **kwargs):
"""
Plot a PDF on a set of axes, by evaluating it a set of points
Parameters
----------
axes : `matplotlib.axes` or `None`
The axes we want to plot the data on
pdf : `scipy.stats.rv_frozen`
The distribution we want to plot
xvals : `np.array`
The locations we evaluate the PDF at for plotting
Keywods
-------
Keywords are passed to matplotlib
Return
------
axes : The axes the data are plotted on
"""
yvals = pdf.pdf(xvals)
axes.plot(np.squeeze(xvals), np.squeeze(yvals), **kwargs)
return axes
def plot_dist_pdf(pdf, **kwargs):
"""
Plot a PDF on a set of axes, using the axes limits
Parameters
----------
pdf : `scipy.stats.rv_frozen`
The distribution we want to plot
Keywords
--------
axes : `matplotlib.axes`
The axes to plot on
xlim : (float, float)
The x-axis limits
npts : int
The number of x-axis points
remaining kwargs : passed directly to the `plot_pdf_on_axes` plot function
Return
------
axes : The axes the data are plotted on
"""
axes, xlim, kw = get_axes_and_xlims(**kwargs)
npoints = kw.pop('npts', 101)
xvals = np.linspace(xlim[0], xlim[1], npoints)
return plot_pdf_on_axes(axes, pdf, xvals, **kw)
def plot_pdf_quantiles_on_axes(axes, xvals, yvals, quantiles, **kwargs):
"""
Plot a PDF on a set of axes, by evaluating at the quantiles provided
` Parameters
----------
axes : The axes we want to plot the data on
xvals : array_like
Pdf xvalues
yvals : array_like
Pdf yvalues
quantiles : (`np.array`, `np.array`)
The quantiles that define the distribution pdf
Keywords
--------
npoints : `int`
Number of points to use in the plotting. Evenly spaced along the axis provided.
**kwargs : passed directly to the `matplotlib` plot function
Return
------
axes : The axes the data are plotted on
"""
kwargs.setdefault('label', 'Quantiles')
axes.scatter(quantiles[1], np.zeros(np.shape(quantiles[1])), color=COLORS['quantiles'], marker='|', s=100, alpha=0.75, **kwargs)
kwargs.setdefault('label', 'Quantile Interpolated PDF')
axes.plot(xvals, yvals, color=COLORS['quantiles'], lw=2.0, alpha=1.0, linestyle=STYLES['quantiles'], **kwargs)
return axes
def plot_pdf_histogram_on_axes(axes, hist, **kwargs):
"""
Plot a PDF on a set of axes, by plotting the histogrammed data
Parameters
----------
axes : The axes we want to plot the data on
Keywords
--------
npoints : `int`
Number of points to use in the plotting. Evenly spaced along the axis provided.
**kwargs : passed directly to the `matplotlib` plot function
Return
------
axes : The axes the data are plotted on
"""
axes.scatter(hist[0], np.zeros(np.shape(hist[0])), color=COLORS['histogram'], marker='|', s=100, label='Histogram Bin Ends', alpha=0.75)
bin_centers = (hist[0][0:-1] + hist[0][1:])/2.
kwargs.setdefault('label', 'Histogram Interpolated PDF')
axes.hist(bin_centers, bins=hist[0], weights=np.squeeze(hist[1]), color=COLORS['histogram'], lw=None, alpha=1.0, **kwargs)
return axes
def plot_pdf_samples_on_axes(axes, pdf, samples, **kwargs):
"""
Plot a PDF on a set of axes, by displaying a set of samples from the PDF
Parameters
----------
axes : The axes we want to plot the data on
pdf : `scipy.stats.rv_frozen`
The distribution we want to plot
samples : `np.array`
Points sampled from the PDF
Keywords
--------
**kwargs : passed directly to the `matplotlib` plot function
Return
------
axes : The axes the data are plotted on
"""
kwargs.setdefault('label', 'Samples')
axes.scatter(samples, np.zeros(np.shape(samples)), color=COLORS['samples'], marker='|', s=100, alpha=0.75, **kwargs)
npoints = kwargs.pop('npoints', 101)
xlim = axes.get_xlim()
xvals = np.linspace(xlim[0], xlim[1], npoints)
yvals = np.squeeze(pdf.pdf(xvals))
kwargs.setdefault('label', 'Samples Interpolated PDF')
plt.plot(xvals, yvals, color=COLORS['samples'], lw=2.0, alpha=1.0, linestyle=STYLES['samples'], **kwargs)
return axes
def plot_native(pdf, **kwargs):
"""Utility function to plot a pdf in a format that is specific to that type of pdf"""
if hasattr(pdf, 'plot_native'):
axes = pdf.plot_native(**kwargs)
else:
axes = pdf.dist.plot_native(pdf, **kwargs)
return axes.figure, axes
def plot(pdf, **kwargs):
"""Utility function to plot a pdf in a format that is specific to that type of pdf"""
if hasattr(pdf, 'plot_native'):
axes = pdf.plot(**kwargs)
else:
axes = pdf.dist.plot(pdf, **kwargs)
return axes.figure, axes
```
#### File: qp/qp/sparse_rep.py
```python
__author__ = '<NAME>'
import numpy as np
from scipy.special import voigt_profile
from scipy import linalg as sla
from scipy import integrate as sciint
def shapes2pdf(wa, ma, sa, ga, meta, cut=1.e-5):
"""return a pdf evaluated at the meta['xvals'] values for the
given set of Voigt parameters"""
#input : list of shape parameters for a single object
x = meta['xvals']
pdf = np.zeros_like(x)
for w, m, s, g in zip(wa, ma, sa, ga):
pdft = voigt_profile(x - m, s, g)
pdft = np.where(pdft >= cut, pdft, 0.)
pdft = w * pdft / sla.norm(pdft)
pdf += pdft
pdf = np.where(pdf >= cut, pdf, 0.)
return pdf / sciint.trapz(pdf, x)
def create_basis(metadata, cut=1.e-5):
"""create the Voigt basis matrix out of a metadata dictionary"""
mu = metadata['mu']
Nmu = metadata['dims'][0]
sigma = metadata['sig']
Nsigma = metadata['dims'][1]
Nv = metadata['dims'][2]
xvals = metadata['xvals']
return create_voigt_basis(xvals, mu, Nmu, sigma, Nsigma, Nv, cut=cut)
def create_voigt_basis(xvals, mu, Nmu, sigma, Nsigma, Nv, cut=1.e-5):
"""
Creates a gaussian-voigt dictionary at the same resolution as the original PDF
:param float xvals: the x-axis point values for the PDF
:param float mu: [min_mu, max_mu], range of mean for gaussian
:param int Nmu: Number of values between min_mu and max_mu
:param float sigma: [min_sigma, max_sigma], range of variance for gaussian
:param int Nsigma: Number of values between min_sigma and max_sigma
:param Nv: Number of Voigt profiles per gaussian at given position mu and sigma
:param float cut: Lower cut for gaussians
:return: Dictionary as numpy array with shape (len(xvals), Nmu*Nsigma*Nv)
:rtype: float
"""
means = np.linspace(mu[0], mu[1], Nmu)
sig = np.linspace(sigma[0], sigma[1], Nsigma)
gamma = np.linspace(0, 0.5, Nv)
NA = Nmu * Nsigma * Nv
Npdf = len(xvals)
A = np.zeros((Npdf, NA))
kk = 0
for i in range(Nmu):
for j in range(Nsigma):
for k in range(Nv):
pdft = voigt_profile(xvals - means[i], sig[j], gamma[k])
pdft = np.where(pdft >= cut, pdft, 0.)
A[:, kk] = pdft / sla.norm(pdft)
kk += 1
return A
def sparse_basis(dictionary, query_vec, n_basis, tolerance=None):
"""
Compute sparse representation of a vector given Dictionary (basis)
for a given tolerance or number of basis. It uses Cholesky decomposition to speed the process and to
solve the linear operations adapted from <NAME>., <NAME>. and <NAME>., Technical Report - CS
Technion, April 2008
:param float dictionary: Array with all basis on each column, must has shape (len(vector), total basis) and each column must have euclidean l-2 norm equal to 1
:param float query_vec: vector of which a sparse representation is desired
:param int n_basis: number of desired basis
:param float tolerance: tolerance desired if n_basis is not needed to be fixed, must input a large number for n_basis to assure achieving tolerance
:return: indices, values (2 arrays one with the position and the second with the coefficients)
"""
a_n = np.zeros(dictionary.shape[1])
machine_eps = np.finfo(dictionary.dtype).eps
alpha = np.dot(dictionary.T, query_vec)
res = query_vec
idxs = np.arange(dictionary.shape[1]) # keeping track of swapping
L = np.zeros((n_basis, n_basis), dtype=dictionary.dtype)
L[0, 0] = 1.
for n_active in range(n_basis):
lam = np.argmax(abs(np.dot(dictionary.T, res)))
if lam < n_active or alpha[lam] ** 2 < machine_eps: #pragma: no cover
n_active -= 1
break
if n_active > 0: #pragma: no cover
# Updates the Cholesky decomposition of dictionary
L[n_active, :n_active] = np.dot(dictionary[:, :n_active].T, dictionary[:, lam])
sla.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], lower=True, overwrite_b=True)
v = sla.norm(L[n_active, :n_active]) ** 2
if 1 - v <= machine_eps:
print("Selected basis are dependent or normed are not unity")
break
L[n_active, n_active] = np.sqrt(1 - v)
dictionary[:, [n_active, lam]] = dictionary[:, [lam, n_active]]
alpha[[n_active, lam]] = alpha[[lam, n_active]]
idxs[[n_active, lam]] = idxs[[lam, n_active]]
# solves LL'x = query_vec as a composition of two triangular systems
gamma = sla.cho_solve((L[:n_active + 1, :n_active + 1], True), alpha[:n_active + 1], overwrite_b=False)
res = query_vec - np.dot(dictionary[:, :n_active + 1], gamma)
if tolerance is not None and sla.norm(res) ** 2 <= tolerance:
break
a_n[idxs[:n_active + 1]] = gamma
del dictionary
#return a_n
return idxs[:n_active + 1], gamma
def combine_int(Ncoef, Nbase):
"""
combine index of base (up to 62500 bases) and value (16 bits integer with sign) in a 32 bit integer
First half of word is for the value and second half for the index
:param int Ncoef: Integer with sign to represent the value associated with a base, this is a sign 16 bits integer
:param int Nbase: Integer representing the base, unsigned 16 bits integer
:return: 32 bits integer
"""
return (Ncoef << 16) | Nbase
def get_N(longN):
"""
Extract coefficients fro the 32bits integer,
Extract Ncoef and Nbase from 32 bit integer
return (longN >> 16), longN & 0xffff
:param int longN: input 32 bits integer
:return: Ncoef, Nbase both 16 bits integer
"""
return (longN >> 16), (longN & (2 ** 16 - 1))
def decode_sparse_indices(indices):
"""decode sparse indices into basis indices and weigth array
"""
Ncoef = 32001
sp_ind = np.array(list(map(get_N, indices)))
spi = sp_ind[:, 0, :]
dVals = 1./(Ncoef - 1)
vals = spi * dVals
vals[:, 0] = 1.
return sp_ind[:, 1, :], vals
def indices2shapes(sparse_indices, meta):
"""compute the Voigt shape parameters from the sparse index
Parameters
----------
sparse_index: `np.array`
1D Array of indices for each object in the ensemble
meta: `dict`
Dictionary of metadata to decode the sparse indices
"""
Nmu = meta['dims'][0]
Nsigma = meta['dims'][1]
Nv = meta['dims'][2]
Ncoef = meta['dims'][3]
mu = meta['mu']
sigma = meta['sig']
means_array = np.linspace(mu[0], mu[1], Nmu)
sig_array = np.linspace(sigma[0], sigma[1], Nsigma)
gam_array = np.linspace(0, 0.5, Nv)
#split the sparse indices into pairs (weight, basis_index)
#for each sparse index corresponding to one of the basis function
sp_ind = np.array(list(map(get_N, sparse_indices)))
spi = sp_ind[:, 0, :]
dVals = 1./(Ncoef - 1)
vals = spi * dVals
vals[:, 0] = 1.
Dind2 = sp_ind[:, 1, :]
means = means_array[np.array(Dind2 / (Nsigma * Nv), int)]
sigmas = sig_array[np.array((Dind2 % (Nsigma * Nv)) / Nv, int)]
gammas = gam_array[np.array((Dind2 % (Nsigma * Nv)) % Nv, int)]
return vals, means, sigmas, gammas
def build_sparse_representation(x, P, mu=None, Nmu=None, sig=None, Nsig=None, Nv=3, Nsparse=20, tol=1.e-10, verbose=True):
"""compute the sparse representation of a set of pdfs evaluated on a common x array
"""
#Note : the range for gamma is fixed to [0, 0.5] in create_voigt_basis
Ntot = len(P)
if verbose:
print("Total Galaxies = ", Ntot)
dx = x[1] - x[0]
if mu is None:
mu = [min(x), max(x)]
if Nmu is None:
Nmu = len(x)
if sig is None:
max_sig = (max(x) - min(x)) / 12.
min_sig = dx / 6.
sig = [min_sig, max_sig]
if Nsig is None:
Nsig = int(np.ceil(2. * (max_sig - min_sig) / dx))
if verbose:
print('dx = ', dx)
print('Nmu, Nsig, Nv = ', '[', Nmu, ',', Nsig, ',', Nv, ']')
print('Total bases in dictionary', Nmu * Nsig * Nv)
print('Nsparse (number of bases) = ', Nsparse)
#Create dictionary
print('Creating Dictionary...')
A = create_voigt_basis(x, mu, Nmu, sig, Nsig, Nv)
bigD = {}
Ncoef = 32001
AA = np.linspace(0, 1, Ncoef)
Da = AA[1] - AA[0]
bigD['xvals'] = x
bigD['mu'] = mu
bigD['sig'] = sig
bigD['dims'] = [Nmu, Nsig, Nv, Ncoef, Nsparse]
bigD['Ntot'] = Ntot
if verbose:
print('Creating Sparse representation...')
Sparse_Array = np.zeros((Ntot, Nsparse), dtype='int')
for k in range(Ntot):
pdf0 = P[k]
Dind, Dval = sparse_basis(A, pdf0, Nsparse, tolerance=tol)
if len(Dind) < 1:#pragma: no cover
continue
#bigD[k]['sparse'] = [Dind, Dval]
if max(Dval) > 0:
dval0 = Dval[0]
Dvalm = Dval / np.max(Dval)
index = np.array(list(map(round, (Dvalm / Da))), dtype='int')
index0 = int(round(dval0/Da))
index[0] = index0
else:
index = np.zeros(len(Dind), dtype='int') #pragma: no cover
sparse_ind = np.array(list(map(combine_int, index, Dind)))
Sparse_Array[k, 0:len(sparse_ind)] = sparse_ind
#swap back columns
A[:, [Dind]] = A[:, [np.arange(len(Dind))]]
if verbose:
print('done')
return Sparse_Array, bigD, A
def pdf_from_sparse(sparse_indices, A, xvals, cut=1.e-5):
"""return the array of evaluations at xvals from the sparse indices
"""
indices, vals = decode_sparse_indices(sparse_indices)
pdf_y = (A[:, indices]*vals).sum(axis=-1)
pdf_y = np.where(pdf_y >= cut, pdf_y, 0.)
pdf_x = xvals
norms = sciint.trapz(pdf_y.T, pdf_x)
pdf_y /= norms
return pdf_y
``` |
{
"source": "joezuntz/TreeCorr",
"score": 2
} |
#### File: TreeCorr/devel/mpi_example.py
```python
import numpy as np
import time
import os
import sys
import shutil
import socket
import fitsio
import treecorr
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
# Some parameters you can play with here that will affect both serial (not really "serial", since
# it still uses OpenMP -- just running on 1 node) and parallel runs.
bin_size = 0.01
min_sep = 1 # arcmin
min_sep = 600
bin_slop = 1 # Can dial down to 0 to take longer
low_mem = False # Set to True to use less memory during processing.
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nproc = comm.Get_size()
if 1:
# DES Y1: 80 GB
file_name = "mcal-y1a1.fits"
patch_file = 'y1_patches.fits'
url = "http://desdr-server.ncsa.illinois.edu/despublic/y1a1_files/shear_catalogs/mcal-y1a1-combined-riz-unblind-v4-matched.fits"
ra_col='ra'
dec_col='dec'
ra_units='deg'
dec_units='deg'
g1_col='e1'
g2_col='e2'
flag_col='flags_select'
else:
# Aardvark: 15 MB
file_name= "Aardvark.fit"
patch_file = 'aardvark_patches.fits'
url = "https://github.com/rmjarvis/TreeCorr/wiki/Aardvark.fit"
ra_col='RA'
dec_col='DEC'
ra_units='deg'
dec_units='deg'
g1_col='GAMMA1'
g2_col='GAMMA2'
flag_col=None
def download_file():
if not os.path.exists(file_name):
u = urlopen(url)
print('urlinfo: ')
print(u.info())
file_size = int(u.info().get("Content-Length"))
print("file_size = %d MBytes"%(file_size/1024**2))
with open('/proc/sys/net/core/rmem_default', 'r') as f:
block_sz = int(f.read())
print("block size = %d KBytes"%(block_sz/1024))
with open(file_name, 'wb') as f:
file_size_dl = 0
dot_step = file_size / 400.
next_dot = dot_step
while True:
buffer = u.read(block_sz)
if not buffer: break
file_size_dl += len(buffer)
f.write(buffer)
# Easy status bar
if file_size_dl > next_dot:
sys.stdout.write('.')
sys.stdout.flush()
next_dot += dot_step
print('Done downloading',file_name)
else:
print('Using catalog file %s'%file_name)
# It's helpful to have a separate file for each process. Otherwise they all end up
# fighting over the read and the I/O becomes much slower.
# It's also vv helpful to save a version with only the relevant columns, so fitsio
# doesn't have to scan past all the useless extra information.
fname_0 = file_name.replace('.fits','_0.fits')
if not os.path.exists(fname_0):
all_cols = [ra_col, dec_col, g1_col, g2_col, flag_col]
all_cols = [c for c in all_cols if c is not None]
with fitsio.FITS(file_name, 'r') as fits:
data = fits[1][all_cols][:]
fitsio.write(fname_0, data)
print('wrote',fname_0)
for p in range(nproc):
fname_p = file_name.replace('.fits','%d.fits'%p)
if not os.path.exists(fname_p):
shutil.copyfile(fname_0, fname_p)
print('copied',fname_0,'to',fname_p)
def make_patches():
# First make the patches. Do this on one process.
# For a real-life example, this might be made once and saved.
# Or it might be made from a smaller version of the catalog:
# either with the every_nth option, or maybe on a redmagic catalog or similar,
# which would be smaller than the full source catalog, etc.
# Here, we use every_nth to reduce the catalog size.
if not os.path.exists(patch_file):
print('Making patches')
fname = file_name.replace('.fits','_0.fits')
part_cat = treecorr.Catalog(fname,
ra_col=ra_col, dec_col=ra_col,
ra_units=ra_units, dec_units=dec_units,
g1_col=g1_col, g2_col=g1_col, flag_col=flag_col,
npatch=32, verbose=2)
print('Done loading file: nobj = ',part_cat.nobj,part_cat.ntot)
part_cat.get_patches()
print('Made patches: ',part_cat.patch_centers)
part_cat.write_patch_centers(patch_file)
print('Wrote patch file ',patch_file)
del part_cat
print('Done making patches')
else:
print('Using existing patch file')
def run_serial():
from test_helper import profile
t0 = time.time()
fname = file_name.replace('.fits','_0.fits')
log_file = 'serial.log'
cat = treecorr.Catalog(fname,
ra_col=ra_col, dec_col=ra_col,
ra_units=ra_units, dec_units=dec_units,
g1_col=g1_col, g2_col=g1_col, flag_col=flag_col,
verbose=1, log_file=log_file,
patch_centers=patch_file)
t1 = time.time()
print('Made cat', t1-t0)
gg = treecorr.GGCorrelation(bin_size=bin_size, min_sep=min_sep, max_sep=max_sep,
sep_units='arcmin', bin_slop=bin_slop,
verbose=1, log_file=log_file)
# These next two steps don't need to be done separately. They will automatically
# happen when calling process. But separating them out makes it easier to profile.
with profile():
cat.load()
t2 = time.time()
print('Loaded', t2-t1)
with profile():
cat.get_patches()
t3 = time.time()
print('Made patches', t3-t2)
with profile():
gg.process(cat, low_mem=low_mem)
t4 = time.time()
print('Processed', t4-t3)
print('Done with non-parallel computation',t4-t0)
print('xip = ',gg.xip, flush=True)
def run_parallel():
t0 = time.time()
print(rank,socket.gethostname(),flush=True)
fname = file_name.replace('.fits','%d.fits'%rank)[:]
log_file = 'parallel_%d.log'%rank
# All processes make the full cat with these patches.
# Note: this doesn't actually read anything from disk yet.
cat = treecorr.Catalog(fname,
ra_col=ra_col, dec_col=ra_col,
ra_units=ra_units, dec_units=dec_units,
g1_col=g1_col, g2_col=g1_col, flag_col=flag_col,
verbose=1, log_file=log_file,
patch_centers=patch_file)
t1 = time.time()
print('Made cat', t1-t0, flush=True)
# Everyone needs to make their own Correlation object.
gg = treecorr.GGCorrelation(bin_size=bin_size, min_sep=min_sep, max_sep=max_sep,
sep_units='arcmin', bin_slop=bin_slop,
verbose=1, log_file=log_file)
cat.load()
t2 = time.time()
print(rank,'Loaded', t2-t1, flush=True)
cat.get_patches()
t3 = time.time()
print(rank,'Made patches', t3-t2, flush=True)
# To use the multiple process, just pass comm to the process command.
gg.process(cat, comm=comm, low_mem=low_mem)
t4 = time.time()
print(rank,'Processed', t4-t3, flush=True)
comm.Barrier()
t5 = time.time()
print(rank,'Barrier', t5-t4, flush=True)
print(rank,'Done with parallel computation',t5-t0,flush=True)
# rank 0 has the completed result.
if rank == 0:
print('xip = ',gg.xip, flush=True)
if __name__ == '__main__':
if rank == 0:
download_file()
make_patches()
run_serial()
comm.Barrier()
run_parallel()
```
#### File: TreeCorr/tests/test_gg.py
```python
from __future__ import print_function
import numpy as np
import os
import coord
import time
import treecorr
from test_helper import get_from_wiki, get_script_name, do_pickle, CaptureLog
from test_helper import assert_raises, timer, assert_warns
from numpy import sin, cos, tan, arcsin, arccos, arctan, arctan2, pi
@timer
def test_direct():
# If the catalogs are small enough, we can do a direct calculation to see if comes out right.
# This should exactly match the treecorr result if brute_force=True
ngal = 200
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
w1 = rng.random_sample(ngal)
g11 = rng.normal(0,0.2, (ngal,) )
g21 = rng.normal(0,0.2, (ngal,) )
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
w2 = rng.random_sample(ngal)
g12 = rng.normal(0,0.2, (ngal,) )
g22 = rng.normal(0,0.2, (ngal,) )
cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, g1=g11, g2=g21)
cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g12, g2=g22)
min_sep = 1.
max_sep = 50.
nbins = 50
bin_size = np.log(max_sep/min_sep) / nbins
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, brute=True)
gg.process(cat1, cat2)
true_npairs = np.zeros(nbins, dtype=int)
true_weight = np.zeros(nbins, dtype=float)
true_xip = np.zeros(nbins, dtype=complex)
true_xim = np.zeros(nbins, dtype=complex)
for i in range(ngal):
# It's hard to do all the pairs at once with numpy operations (although maybe possible).
# But we can at least do all the pairs for each entry in cat1 at once with arrays.
rsq = (x1[i]-x2)**2 + (y1[i]-y2)**2
r = np.sqrt(rsq)
logr = np.log(r)
expmialpha = ((x1[i]-x2) - 1j*(y1[i]-y2)) / r
ww = w1[i] * w2
xip = ww * (g11[i] + 1j*g21[i]) * (g12 - 1j*g22)
xim = ww * (g11[i] + 1j*g21[i]) * (g12 + 1j*g22) * expmialpha**4
index = np.floor(np.log(r/min_sep) / bin_size).astype(int)
mask = (index >= 0) & (index < nbins)
np.add.at(true_npairs, index[mask], 1)
np.add.at(true_weight, index[mask], ww[mask])
np.add.at(true_xip, index[mask], xip[mask])
np.add.at(true_xim, index[mask], xim[mask])
true_xip /= true_weight
true_xim /= true_weight
print('true_npairs = ',true_npairs)
print('diff = ',gg.npairs - true_npairs)
np.testing.assert_array_equal(gg.npairs, true_npairs)
print('true_weight = ',true_weight)
print('diff = ',gg.weight - true_weight)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
print('true_xip = ',true_xip)
print('gg.xip = ',gg.xip)
print('gg.xip_im = ',gg.xip_im)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-4, atol=1.e-8)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check that running via the corr2 script works correctly.
config = treecorr.config.read_config('configs/gg_direct.yaml')
cat1.write(config['file_name'])
cat2.write(config['file_name2'])
treecorr.corr2(config)
data = fitsio.read(config['gg_file_name'])
np.testing.assert_allclose(data['r_nom'], gg.rnom)
np.testing.assert_allclose(data['npairs'], gg.npairs)
np.testing.assert_allclose(data['weight'], gg.weight)
np.testing.assert_allclose(data['xip'], gg.xip, rtol=1.e-3)
np.testing.assert_allclose(data['xip_im'], gg.xip_im, rtol=1.e-3)
np.testing.assert_allclose(data['xim'], gg.xim, rtol=1.e-3)
np.testing.assert_allclose(data['xim_im'], gg.xim_im, rtol=1.e-3)
# Repeat with binslop = 0.
# And don't do any top-level recursion so we actually test not going to the leaves.
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=0,
max_top=0)
gg.process(cat1, cat2)
np.testing.assert_array_equal(gg.npairs, true_npairs)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
print('diff = ',gg.xim - true_xim.real)
print('max diff = ',np.max(np.abs(gg.xim - true_xim.real)))
print('rel diff = ',(gg.xim - true_xim.real)/true_xim.real)
# This is the one that is highly affected by the approximation from averaging the shears
# before projecting, rather than averaging each shear projected to its own connecting line.
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-3, atol=3.e-4)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, atol=1.e-3)
# Check a few basic operations with a GGCorrelation object.
do_pickle(gg)
gg2 = gg.copy()
gg2 += gg
np.testing.assert_allclose(gg2.npairs, 2*gg.npairs)
np.testing.assert_allclose(gg2.weight, 2*gg.weight)
np.testing.assert_allclose(gg2.meanr, 2*gg.meanr)
np.testing.assert_allclose(gg2.meanlogr, 2*gg.meanlogr)
np.testing.assert_allclose(gg2.xip, 2*gg.xip)
np.testing.assert_allclose(gg2.xip_im, 2*gg.xip_im)
np.testing.assert_allclose(gg2.xim, 2*gg.xim)
np.testing.assert_allclose(gg2.xim_im, 2*gg.xim_im)
gg2.clear()
gg2 += gg
np.testing.assert_allclose(gg2.npairs, gg.npairs)
np.testing.assert_allclose(gg2.weight, gg.weight)
np.testing.assert_allclose(gg2.meanr, gg.meanr)
np.testing.assert_allclose(gg2.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg2.xip, gg.xip)
np.testing.assert_allclose(gg2.xip_im, gg.xip_im)
np.testing.assert_allclose(gg2.xim, gg.xim)
np.testing.assert_allclose(gg2.xim_im, gg.xim_im)
ascii_name = 'output/gg_ascii.txt'
gg.write(ascii_name, precision=16)
gg3 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
gg3.read(ascii_name)
np.testing.assert_allclose(gg3.npairs, gg.npairs)
np.testing.assert_allclose(gg3.weight, gg.weight)
np.testing.assert_allclose(gg3.meanr, gg.meanr)
np.testing.assert_allclose(gg3.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg3.xip, gg.xip)
np.testing.assert_allclose(gg3.xip_im, gg.xip_im)
np.testing.assert_allclose(gg3.xim, gg.xim)
np.testing.assert_allclose(gg3.xim_im, gg.xim_im)
fits_name = 'output/gg_fits.fits'
gg.write(fits_name)
gg4 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
gg4.read(fits_name)
np.testing.assert_allclose(gg4.npairs, gg.npairs)
np.testing.assert_allclose(gg4.weight, gg.weight)
np.testing.assert_allclose(gg4.meanr, gg.meanr)
np.testing.assert_allclose(gg4.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg4.xip, gg.xip)
np.testing.assert_allclose(gg4.xip_im, gg.xip_im)
np.testing.assert_allclose(gg4.xim, gg.xim)
np.testing.assert_allclose(gg4.xim_im, gg.xim_im)
with assert_raises(TypeError):
gg2 += config
gg4 = treecorr.GGCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins)
with assert_raises(ValueError):
gg2 += gg4
gg5 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins)
with assert_raises(ValueError):
gg2 += gg5
gg6 = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2)
with assert_raises(ValueError):
gg2 += gg6
@timer
def test_direct_spherical():
# Repeat in spherical coords
ngal = 100
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) ) + 200 # Put everything at large y, so small angle on sky
z1 = rng.normal(0,s, (ngal,) )
w1 = rng.random_sample(ngal)
g11 = rng.normal(0,0.2, (ngal,) )
g21 = rng.normal(0,0.2, (ngal,) )
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) ) + 200
z2 = rng.normal(0,s, (ngal,) )
w2 = rng.random_sample(ngal)
g12 = rng.normal(0,0.2, (ngal,) )
g22 = rng.normal(0,0.2, (ngal,) )
ra1, dec1 = coord.CelestialCoord.xyz_to_radec(x1,y1,z1)
ra2, dec2 = coord.CelestialCoord.xyz_to_radec(x2,y2,z2)
cat1 = treecorr.Catalog(ra=ra1, dec=dec1, ra_units='rad', dec_units='rad', w=w1, g1=g11, g2=g21)
cat2 = treecorr.Catalog(ra=ra2, dec=dec2, ra_units='rad', dec_units='rad', w=w2, g1=g12, g2=g22)
min_sep = 1.
max_sep = 10.
nbins = 50
bin_size = np.log(max_sep/min_sep) / nbins
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
sep_units='deg', brute=True)
gg.process(cat1, cat2)
r1 = np.sqrt(x1**2 + y1**2 + z1**2)
r2 = np.sqrt(x2**2 + y2**2 + z2**2)
x1 /= r1; y1 /= r1; z1 /= r1
x2 /= r2; y2 /= r2; z2 /= r2
north_pole = coord.CelestialCoord(0*coord.radians, 90*coord.degrees)
true_npairs = np.zeros(nbins, dtype=int)
true_weight = np.zeros(nbins, dtype=float)
true_xip = np.zeros(nbins, dtype=complex)
true_xim = np.zeros(nbins, dtype=complex)
rad_min_sep = min_sep * coord.degrees / coord.radians
c1 = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra1, dec1)]
c2 = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra2, dec2)]
for i in range(ngal):
for j in range(ngal):
rsq = (x1[i]-x2[j])**2 + (y1[i]-y2[j])**2 + (z1[i]-z2[j])**2
r = np.sqrt(rsq)
logr = np.log(r)
index = np.floor(np.log(r/rad_min_sep) / bin_size).astype(int)
if index < 0 or index >= nbins:
continue
# Rotate shears to coordinates where line connecting is horizontal.
# Original orientation is where north is up.
theta1 = 90*coord.degrees - c1[i].angleBetween(north_pole, c2[j])
theta2 = 90*coord.degrees - c2[j].angleBetween(north_pole, c1[i])
exp2theta1 = np.cos(2*theta1) + 1j * np.sin(2*theta1)
exp2theta2 = np.cos(2*theta2) + 1j * np.sin(2*theta2)
g1 = g11[i] + 1j * g21[i]
g2 = g12[j] + 1j * g22[j]
g1 *= exp2theta1
g2 *= exp2theta2
ww = w1[i] * w2[j]
xip = ww * g1 * np.conjugate(g2)
xim = ww * g1 * g2
true_npairs[index] += 1
true_weight[index] += ww
true_xip[index] += xip
true_xim[index] += xim
true_xip /= true_weight
true_xim /= true_weight
print('true_npairs = ',true_npairs)
print('diff = ',gg.npairs - true_npairs)
np.testing.assert_array_equal(gg.npairs, true_npairs)
print('true_weight = ',true_weight)
print('diff = ',gg.weight - true_weight)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
print('true_xip = ',true_xip)
print('gg.xip = ',gg.xip)
print('gg.xip_im = ',gg.xip_im)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
print('true_xim = ',true_xim)
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-4, atol=1.e-8)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check that running via the corr2 script works correctly.
config = treecorr.config.read_config('configs/gg_direct_spherical.yaml')
cat1.write(config['file_name'])
cat2.write(config['file_name2'])
treecorr.corr2(config)
data = fitsio.read(config['gg_file_name'])
np.testing.assert_allclose(data['r_nom'], gg.rnom)
np.testing.assert_allclose(data['npairs'], gg.npairs)
np.testing.assert_allclose(data['weight'], gg.weight)
np.testing.assert_allclose(data['xip'], gg.xip, rtol=1.e-3)
np.testing.assert_allclose(data['xip_im'], gg.xip_im, rtol=1.e-3)
np.testing.assert_allclose(data['xim'], gg.xim, rtol=1.e-3)
np.testing.assert_allclose(data['xim_im'], gg.xim_im, rtol=1.e-3)
# Repeat with binslop = 0
# And don't do any top-level recursion so we actually test not going to the leaves.
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
sep_units='deg', bin_slop=0, max_top=0)
gg.process(cat1, cat2)
np.testing.assert_array_equal(gg.npairs, true_npairs)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-3, atol=1.e-6)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-3, atol=1.e-6)
diff = np.abs(gg.xim - true_xim.real)
reldiff = diff / true_xim.real
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-3, atol=2.e-4)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-3, atol=2.e-4)
@timer
def test_pairwise():
# Test the pairwise option.
ngal = 1000
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
w1 = rng.random_sample(ngal)
g11 = rng.normal(0,0.2, (ngal,) )
g21 = rng.normal(0,0.2, (ngal,) )
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
w2 = rng.random_sample(ngal)
g12 = rng.normal(0,0.2, (ngal,) )
g22 = rng.normal(0,0.2, (ngal,) )
w1 = np.ones_like(w1)
w2 = np.ones_like(w2)
cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, g1=g11, g2=g21)
cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g12, g2=g22)
min_sep = 5.
max_sep = 50.
nbins = 10
bin_size = np.log(max_sep/min_sep) / nbins
gg = treecorr.GGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
with assert_warns(FutureWarning):
gg.process_pairwise(cat1, cat2)
gg.finalize(cat1.varg, cat2.varg)
true_npairs = np.zeros(nbins, dtype=int)
true_weight = np.zeros(nbins, dtype=float)
true_xip = np.zeros(nbins, dtype=complex)
true_xim = np.zeros(nbins, dtype=complex)
rsq = (x1-x2)**2 + (y1-y2)**2
r = np.sqrt(rsq)
logr = np.log(r)
expmialpha = ((x1-x2) - 1j*(y1-y2)) / r
ww = w1 * w2
xip = ww * (g11 + 1j*g21) * (g12 - 1j*g22)
xim = ww * (g11 + 1j*g21) * (g12 + 1j*g22) * expmialpha**4
index = np.floor(np.log(r/min_sep) / bin_size).astype(int)
mask = (index >= 0) & (index < nbins)
np.add.at(true_npairs, index[mask], 1)
np.add.at(true_weight, index[mask], ww[mask])
np.add.at(true_xip, index[mask], xip[mask])
np.add.at(true_xim, index[mask], xim[mask])
true_xip /= true_weight
true_xim /= true_weight
np.testing.assert_array_equal(gg.npairs, true_npairs)
np.testing.assert_allclose(gg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
np.testing.assert_allclose(gg.xip, true_xip.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xip_im, true_xip.imag, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim, true_xim.real, rtol=1.e-4, atol=1.e-8)
np.testing.assert_allclose(gg.xim_im, true_xim.imag, rtol=1.e-4, atol=1.e-8)
# If cats have names, then the logger will mention them.
# Also, test running with optional args.
cat1.name = "first"
cat2.name = "second"
with CaptureLog() as cl:
gg.logger = cl.logger
with assert_warns(FutureWarning):
gg.process_pairwise(cat1, cat2, metric='Euclidean', num_threads=2)
assert "for cats first, second" in cl.output
@timer
def test_gg():
# cf. http://adsabs.harvard.edu/abs/2002A%26A...389..729S for the basic formulae I use here.
#
# Use gamma_t(r) = gamma0 r^2/r0^2 exp(-r^2/2r0^2)
# i.e. gamma(r) = -gamma0 exp(-r^2/2r0^2) (x+iy)^2 / r0^2
#
# The Fourier transform is: gamma~(k) = -2 pi gamma0 r0^4 k^2 exp(-r0^2 k^2/2) / L^2
# P(k) = (1/2pi) <|gamma~(k)|^2> = 2 pi gamma0^2 r0^8 k^4 / L^4 exp(-r0^2 k^2)
# xi+(r) = (1/2pi) int( dk k P(k) J0(kr) )
# = pi/16 gamma0^2 (r0/L)^2 exp(-r^2/4r0^2) (r^4 - 16r^2r0^2 + 32r0^4)/r0^4
# xi-(r) = (1/2pi) int( dk k P(k) J4(kr) )
# = pi/16 gamma0^2 (r0/L)^2 exp(-r^2/4r0^2) r^4/r0^4
# Note: I'm not sure I handled the L factors correctly, but the units at the end need
# to be gamma^2, so it needs to be (r0/L)^2.
gamma0 = 0.05
r0 = 10.
if __name__ == "__main__":
ngal = 1000000
L = 50.*r0 # Not infinity, so this introduces some error. Our integrals were to infinity.
tol_factor = 1
else:
ngal = 100000
L = 50.*r0
# Rather than have a single set tolerance, we tune the tolerances for the above
# __main__ setup, but scale up by a factor of 5 for the quicker run.
tol_factor = 5
rng = np.random.RandomState(8675309)
x = (rng.random_sample(ngal)-0.5) * L
y = (rng.random_sample(ngal)-0.5) * L
r2 = (x**2 + y**2)/r0**2
g1 = -gamma0 * np.exp(-r2/2.) * (x**2-y**2)/r0**2
g2 = -gamma0 * np.exp(-r2/2.) * (2.*x*y)/r0**2
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, x_units='arcmin', y_units='arcmin')
gg = treecorr.GGCorrelation(bin_size=0.1, min_sep=1., max_sep=100., sep_units='arcmin',
verbose=1)
gg.process(cat)
# log(<R>) != <logR>, but it should be close:
print('meanlogr - log(meanr) = ',gg.meanlogr - np.log(gg.meanr))
np.testing.assert_allclose(gg.meanlogr, np.log(gg.meanr), atol=1.e-3)
r = gg.meanr
temp = np.pi/16. * gamma0**2 * (r0/L)**2 * np.exp(-0.25*r**2/r0**2)
true_xip = temp * (r**4 - 16.*r**2*r0**2 + 32.*r0**4)/r0**4
true_xim = temp * r**4/r0**4
print('gg.xip = ',gg.xip)
print('true_xip = ',true_xip)
print('ratio = ',gg.xip / true_xip)
print('diff = ',gg.xip - true_xip)
print('max diff = ',max(abs(gg.xip - true_xip)))
# It's within 10% everywhere except at the zero crossings.
np.testing.assert_allclose(gg.xip, true_xip, rtol=0.1 * tol_factor, atol=1.e-7 * tol_factor)
print('xip_im = ',gg.xip_im)
np.testing.assert_allclose(gg.xip_im, 0, atol=2.e-7 * tol_factor)
print('gg.xim = ',gg.xim)
print('true_xim = ',true_xim)
print('ratio = ',gg.xim / true_xim)
print('diff = ',gg.xim - true_xim)
print('max diff = ',max(abs(gg.xim - true_xim)))
np.testing.assert_allclose(gg.xim, true_xim, rtol=0.1 * tol_factor, atol=2.e-7 * tol_factor)
print('xim_im = ',gg.xim_im)
np.testing.assert_allclose(gg.xim_im, 0, atol=1.e-7 * tol_factor)
# Should also work as a cross-correlation with itself
gg.process(cat,cat)
np.testing.assert_allclose(gg.meanlogr, np.log(gg.meanr), atol=1.e-3)
assert max(abs(gg.xip - true_xip)) < 3.e-7 * tol_factor
assert max(abs(gg.xip_im)) < 2.e-7 * tol_factor
assert max(abs(gg.xim - true_xim)) < 3.e-7 * tol_factor
assert max(abs(gg.xim_im)) < 1.e-7 * tol_factor
# We check the accuracy of the MapSq calculation below in test_mapsq.
# Here we just check that it runs, round trips correctly through an output file,
# and gives the same answer when run through corr2.
mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = gg.calculateMapSq()
print('mapsq = ',mapsq)
print('mxsq = ',mxsq)
mapsq_file = 'output/gg_m2.txt'
gg.writeMapSq(mapsq_file, precision=16)
data = np.genfromtxt(os.path.join('output','gg_m2.txt'), names=True)
np.testing.assert_allclose(data['Mapsq'], mapsq)
np.testing.assert_allclose(data['Mxsq'], mxsq)
# Check that we get the same result using the corr2 function:
cat.write(os.path.join('data','gg.dat'))
config = treecorr.read_config('configs/gg.yaml')
config['verbose'] = 0
config['precision'] = 8
treecorr.corr2(config)
corr2_output = np.genfromtxt(os.path.join('output','gg.out'), names=True, skip_header=1)
print('gg.xip = ',gg.xip)
print('from corr2 output = ',corr2_output['xip'])
print('ratio = ',corr2_output['xip']/gg.xip)
print('diff = ',corr2_output['xip']-gg.xip)
np.testing.assert_allclose(corr2_output['xip'], gg.xip, rtol=1.e-4)
print('gg.xim = ',gg.xim)
print('from corr2 output = ',corr2_output['xim'])
print('ratio = ',corr2_output['xim']/gg.xim)
print('diff = ',corr2_output['xim']-gg.xim)
np.testing.assert_allclose(corr2_output['xim'], gg.xim, rtol=1.e-4)
print('xip_im from corr2 output = ',corr2_output['xip_im'])
print('max err = ',max(abs(corr2_output['xip_im'])))
np.testing.assert_allclose(corr2_output['xip_im'], 0, atol=2.e-7 * tol_factor)
print('xim_im from corr2 output = ',corr2_output['xim_im'])
print('max err = ',max(abs(corr2_output['xim_im'])))
np.testing.assert_allclose(corr2_output['xim_im'], 0, atol=2.e-7 * tol_factor)
# Check m2 output
corr2_output2 = np.genfromtxt(os.path.join('output','gg_m2.out'), names=True)
print('mapsq = ',mapsq)
print('from corr2 output = ',corr2_output2['Mapsq'])
print('ratio = ',corr2_output2['Mapsq']/mapsq)
print('diff = ',corr2_output2['Mapsq']-mapsq)
np.testing.assert_allclose(corr2_output2['Mapsq'], mapsq, rtol=1.e-4)
print('mxsq = ',mxsq)
print('from corr2 output = ',corr2_output2['Mxsq'])
print('ratio = ',corr2_output2['Mxsq']/mxsq)
print('diff = ',corr2_output2['Mxsq']-mxsq)
np.testing.assert_allclose(corr2_output2['Mxsq'], mxsq, rtol=1.e-4)
# OK to have m2 output, but not gg
del config['gg_file_name']
treecorr.corr2(config)
corr2_output2 = np.genfromtxt(os.path.join('output','gg_m2.out'), names=True)
np.testing.assert_allclose(corr2_output2['Mapsq'], mapsq, rtol=1.e-4)
np.testing.assert_allclose(corr2_output2['Mxsq'], mxsq, rtol=1.e-4)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check the fits write option
out_file_name = os.path.join('output','gg_out.fits')
gg.write(out_file_name)
data = fitsio.read(out_file_name)
np.testing.assert_allclose(data['r_nom'], np.exp(gg.logr))
np.testing.assert_allclose(data['meanr'], gg.meanr)
np.testing.assert_allclose(data['meanlogr'], gg.meanlogr)
np.testing.assert_allclose(data['xip'], gg.xip)
np.testing.assert_allclose(data['xim'], gg.xim)
np.testing.assert_allclose(data['xip_im'], gg.xip_im)
np.testing.assert_allclose(data['xim_im'], gg.xim_im)
np.testing.assert_allclose(data['sigma_xip'], np.sqrt(gg.varxip))
np.testing.assert_allclose(data['sigma_xim'], np.sqrt(gg.varxim))
np.testing.assert_allclose(data['weight'], gg.weight)
np.testing.assert_allclose(data['npairs'], gg.npairs)
# Check the read function
gg2 = treecorr.GGCorrelation(bin_size=0.1, min_sep=1., max_sep=100., sep_units='arcmin')
gg2.read(out_file_name)
np.testing.assert_allclose(gg2.logr, gg.logr)
np.testing.assert_allclose(gg2.meanr, gg.meanr)
np.testing.assert_allclose(gg2.meanlogr, gg.meanlogr)
np.testing.assert_allclose(gg2.xip, gg.xip)
np.testing.assert_allclose(gg2.xim, gg.xim)
np.testing.assert_allclose(gg2.xip_im, gg.xip_im)
np.testing.assert_allclose(gg2.xim_im, gg.xim_im)
np.testing.assert_allclose(gg2.varxip, gg.varxip)
np.testing.assert_allclose(gg2.varxim, gg.varxim)
np.testing.assert_allclose(gg2.weight, gg.weight)
np.testing.assert_allclose(gg2.npairs, gg.npairs)
assert gg2.coords == gg.coords
assert gg2.metric == gg.metric
assert gg2.sep_units == gg.sep_units
assert gg2.bin_type == gg.bin_type
# Also check the Schneider version.
mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = gg.calculateMapSq(m2_uform='Schneider')
print('Schneider mapsq = ',mapsq)
print('mxsq = ',mxsq)
print('max = ',max(abs(mxsq)))
# And GamSq.
gamsq, vargamsq = gg.calculateGamSq()
print('gamsq = ',gamsq)
gamsq, vargamsq, gamsq_e, gamsq_b, vargamsq_eb = gg.calculateGamSq(eb=True)
print('gamsq_e = ',gamsq_e)
print('gamsq_b = ',gamsq_b)
# The Gamsq columns were already output in the above m2_output run of corr2.
np.testing.assert_allclose(corr2_output2['Gamsq'], gamsq, rtol=1.e-4)
@timer
def test_mapsq():
# Use the same gamma(r) as in test_gg.
# This time, rather than use a smaller catalog in the nosetests run, we skip the run
# in that case and just read in the output file. This way we can test the Map^2 formulae
# on the more precise output.
# When running from the command line, the output file is made from scratch.
gamma0 = 0.05
r0 = 10.
L = 50.*r0
cat_name = os.path.join('data','gg_map.dat')
out_name = os.path.join('data','gg_map.out')
gg = treecorr.GGCorrelation(bin_size=0.1, min_sep=1, nbins=47, sep_units='arcmin',
verbose=1)
if __name__ == "__main__":
ngal = 1000000
rng = np.random.RandomState(8675309)
x = (rng.random_sample(ngal)-0.5) * L
y = (rng.random_sample(ngal)-0.5) * L
r2 = (x**2 + y**2)/r0**2
g1 = -gamma0 * np.exp(-r2/2.) * (x**2-y**2)/r0**2
g2 = -gamma0 * np.exp(-r2/2.) * (2.*x*y)/r0**2
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, x_units='arcmin', y_units='arcmin')
cat.write(cat_name)
gg.process(cat)
gg.write(out_name, precision=16)
else:
gg.read(out_name)
# Check MapSq calculation:
# cf. http://adsabs.harvard.edu/abs/2004MNRAS.352..338J
# Use Crittenden formulation, since the analytic result is simpler:
# Map^2(R) = int 1/2 r/R^2 (T+(r/R) xi+(r) + T-(r/R) xi-(r)) dr
# = 6 pi gamma0^2 r0^8 R^4 / (L^2 (r0^2+R^2)^5)
# Mx^2(R) = int 1/2 r/R^2 (T+(r/R) xi+(r) - T-(r/R) xi-(r)) dr
# = 0
# where T+(s) = (s^4-16s^2+32)/128 exp(-s^2/4)
# T-(s) = s^4/128 exp(-s^2/4)
#
# Note: Another way to calculate this, which will turn out to be helpful when we do the
# Map^3 calculation in test_ggg.py is as follows:
# Map(u,v) = int( g(x,y) * ((u-x) -I(v-y))^2 / ((u-x)^2 + (v-y)^2) * Q(u-x, v-y) )
# = 1/2 gamma0 r0^4 R^2 / (R^2+r0^2)^5 x
# ((u^2+v^2)^2 - 8 (u^2+v^2) (R^2+r0^2) + 8 (R^2+r0^2)^2) x
# exp(-1/2 (u^2+v^2) / (R^2+r0^2))
# Then, you can directly compute <Map^2>:
# <Map^2> = int(Map(u,v)^2, u=-inf..inf, v=-inf..inf) / L^2
# = 6 pi gamma0^2 r0^8 R^4 / (r0^2+R^2)^5 / L^2 (i.e. the same answer as above.)
r = gg.meanr
true_mapsq = 6.*np.pi * gamma0**2 * r0**8 * r**4 / (L**2 * (r**2+r0**2)**5)
mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = gg.calculateMapSq()
print('mapsq = ',mapsq)
print('true_mapsq = ',true_mapsq)
print('ratio = ',mapsq/true_mapsq)
print('diff = ',mapsq-true_mapsq)
print('max diff = ',max(abs(mapsq - true_mapsq)))
print('max diff[16:] = ',max(abs(mapsq[16:] - true_mapsq[16:])))
# It's pretty ratty near the start where the integral is poorly evaluated, but the
# agreement is pretty good if we skip the first 16 elements.
# Well, it gets bad again at the end, but those values are small enough that they still
# pass this test.
np.testing.assert_allclose(mapsq[16:], true_mapsq[16:], rtol=0.1, atol=1.e-9)
print('mxsq = ',mxsq)
print('max = ',max(abs(mxsq)))
print('max[16:] = ',max(abs(mxsq[16:])))
np.testing.assert_allclose(mxsq[16:], 0., atol=3.e-8)
mapsq_file = 'output/gg_m2.txt'
gg.writeMapSq(mapsq_file, precision=16)
data = np.genfromtxt(os.path.join('output','gg_m2.txt'), names=True)
np.testing.assert_allclose(data['Mapsq'], mapsq)
np.testing.assert_allclose(data['Mxsq'], mxsq)
# Check providing a specific range of R values
# (We provide the range where the results worked out well above.)
R = gg.rnom[16::2]
print('R = ',R)
mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = gg.calculateMapSq(R)
true_mapsq = true_mapsq[16::2]
print('mapsq = ',mapsq)
print('true_mapsq = ',true_mapsq)
print('ratio = ',mapsq/true_mapsq)
print('diff = ',mapsq-true_mapsq)
print('max diff = ',max(abs(mapsq - true_mapsq)))
np.testing.assert_allclose(mapsq, true_mapsq, rtol=0.1, atol=1.e-9)
print('mxsq = ',mxsq)
print('max = ',max(abs(mxsq)))
np.testing.assert_allclose(mxsq, 0., atol=3.e-8)
mapsq_file = 'output/gg_m2b.txt'
gg.writeMapSq(mapsq_file, R=R, precision=16)
data = np.genfromtxt(mapsq_file, names=True)
np.testing.assert_allclose(data['Mapsq'], mapsq)
np.testing.assert_allclose(data['Mxsq'], mxsq)
# Also check the Schneider version. The math isn't quite as nice here, but it is tractable
# using a different formula than I used above:
# Map^2(R) = int k P(k) W(kR) dk
# = 576 pi gamma0^2 r0^6/(L^2 R^4) exp(-R^2/2r0^2) (I4(R^2/2r0^2)
# where I4 is the modified Bessel function with nu=4.
try:
from scipy.special import iv, jv
except ImportError:
# Don't require scipy if the user doesn't have it.
print('Skipping tests of Schneider aperture mass, since scipy.special not available.')
return
x = 0.5*r**2/r0**2
true_mapsq = 144.*np.pi * gamma0**2 * r0**2 / (L**2 * x**2) * np.exp(-x) * iv(4,x)
mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = gg.calculateMapSq(m2_uform='Schneider')
print('Schneider mapsq = ',mapsq)
print('true_mapsq = ',true_mapsq)
print('ratio = ',mapsq/true_mapsq)
print('diff = ',mapsq-true_mapsq)
print('max diff = ',max(abs(mapsq - true_mapsq)))
print('max diff[26:] = ',max(abs(mapsq[26:] - true_mapsq[26:])))
# This one stays ratty longer, so we need to skip the first 26.
np.testing.assert_allclose(mapsq[26:], true_mapsq[26:], rtol=0.1, atol=1.e-9)
print('mxsq = ',mxsq)
print('max = ',max(abs(mxsq)))
print('max[26:] = ',max(abs(mxsq[26:])))
np.testing.assert_allclose(mxsq[26:], 0, atol=3.e-8)
# Finally, check the <gamma^2>(R) calculation.
# Gam^2(R) = int k P(k) Wth(kR) dk
# = 2pi gamma0^2 (r0/L)^2 exp(-r^2/2r0^2) *
# (BesselI(0, r^2/2r0^2) - BesselI(1, r^2/2r0^2))
x = 0.5*r**2/r0**2
true_gamsq = 2.*np.pi*gamma0**2 * r0**2 / L**2 * np.exp(-x) * (iv(0,x) - iv(1,x))
gamsq, vargamsq = gg.calculateGamSq()
print('gamsq = ',gamsq)
print('true_gamsq = ',true_gamsq)
print('ratio = ',gamsq/true_gamsq)
print('diff = ',gamsq-true_gamsq)
print('max diff = ',max(abs(gamsq - true_gamsq)))
print('max rel diff[12:33] = ',max(abs((gamsq[12:33] - true_gamsq[12:33])/true_gamsq[12:33])))
# This is only close in a narrow range of scales
np.testing.assert_allclose(gamsq[12:33], true_gamsq[12:33], rtol=0.1)
# Everywhere else it is less (since integral misses unmeasured power at both ends).
np.testing.assert_array_less(gamsq, true_gamsq)
# With E/B decomposition, it's ok over a larger range of scales.
gamsq, vargamsq, gamsq_e, gamsq_b, vargamsq_eb = gg.calculateGamSq(eb=True)
print('gamsq_e = ',gamsq_e)
print('true_gamsq = ',true_gamsq)
print('ratio = ',gamsq_e/true_gamsq)
print('diff = ',gamsq_e-true_gamsq)
print('max diff = ',max(abs(gamsq_e - true_gamsq)))
print('rel diff[6:41] = ',(gamsq_e[6:41] - true_gamsq[6:41])/true_gamsq[6:41])
print('max rel diff[6:41] = ',max(abs((gamsq_e[6:41] - true_gamsq[6:41])/true_gamsq[6:41])))
# This is only close in a narrow range of scales
np.testing.assert_allclose(gamsq_e[6:41], true_gamsq[6:41], rtol=0.1)
print('gamsq_b = ',gamsq_b)
np.testing.assert_allclose(gamsq_b[6:41], 0, atol=1.e-6)
# Check providing a specific range of R values
# (We provide the range where the results worked out well above.)
R = gg.rnom[6:40:4]
print('R = ',R)
gamsq, vargamsq, gamsq_e, gamsq_b, vargamsq_eb = gg.calculateGamSq(R, eb=True)
true_gamsq = true_gamsq[6:40:4]
print('gamsq_e = ',gamsq_e)
print('true_gamsq = ',true_gamsq)
print('ratio = ',gamsq_e/true_gamsq)
print('diff = ',gamsq_e-true_gamsq)
print('max diff = ',max(abs(gamsq_e - true_gamsq)))
np.testing.assert_allclose(gamsq_e, true_gamsq, rtol=0.1)
print('gamsq_b = ',gamsq_b)
np.testing.assert_allclose(gamsq_b, 0, atol=1.e-6)
# Not valid with TwoD or Linear binning
gg2 = treecorr.GGCorrelation(bin_size=0.1, min_sep=1., max_sep=100., sep_units='arcmin',
bin_type='Linear')
with assert_raises(ValueError):
gg2.calculateMapSq()
with assert_raises(ValueError):
gg2.calculateGamSq()
gg3 = treecorr.GGCorrelation(bin_size=0.1, min_sep=1., max_sep=100., sep_units='arcmin',
bin_type='TwoD')
with assert_raises(ValueError):
gg3.calculateMapSq()
with assert_raises(ValueError):
gg3.calculateGamSq()
@timer
def test_spherical():
# This is the same field we used for test_gg, but put into spherical coords.
# We do the spherical trig by hand using the obvious formulae, rather than the clever
# optimizations that are used by the TreeCorr code, thus serving as a useful test of
# the latter.
gamma0 = 0.05
r0 = 10. * coord.arcmin / coord.radians
if __name__ == "__main__":
nsource = 1000000
L = 50.*r0 # Not infinity, so this introduces some error. Our integrals were to infinity.
tol_factor = 1
else:
nsource = 100000
L = 50.*r0
tol_factor = 5
rng = np.random.RandomState(8675309)
x = (rng.random_sample(nsource)-0.5) * L
y = (rng.random_sample(nsource)-0.5) * L
r2 = x**2 + y**2
g1 = -gamma0 * np.exp(-r2/2./r0**2) * (x**2-y**2)/r0**2
g2 = -gamma0 * np.exp(-r2/2./r0**2) * (2.*x*y)/r0**2
r = np.sqrt(r2)
theta = arctan2(y,x)
gg = treecorr.GGCorrelation(bin_size=0.1, min_sep=1., max_sep=100., sep_units='arcmin',
verbose=1)
r1 = np.exp(gg.logr) * (coord.arcmin / coord.radians)
temp = np.pi/16. * gamma0**2 * (r0/L)**2 * np.exp(-0.25*r1**2/r0**2)
true_xip = temp * (r1**4 - 16.*r1**2*r0**2 + 32.*r0**4)/r0**4
true_xim = temp * r1**4/r0**4
# Test this around several central points
if __name__ == '__main__':
ra0_list = [ 0., 1., 1.3, 232., 0. ]
dec0_list = [ 0., -0.3, 1.3, -1.4, pi/2.-1.e-6 ]
else:
ra0_list = [ 232.]
dec0_list = [ -1.4 ]
for ra0, dec0 in zip(ra0_list, dec0_list):
# Use spherical triangle with A = point, B = (ra0,dec0), C = N. pole
# a = Pi/2-dec0
# c = 2*asin(r/2) (lambert projection)
# B = Pi/2 - theta
c = 2.*arcsin(r/2.)
a = pi/2. - dec0
B = pi/2. - theta
B[x<0] *= -1.
B[B<-pi] += 2.*pi
B[B>pi] -= 2.*pi
# Solve the rest of the triangle with spherical trig:
cosb = cos(a)*cos(c) + sin(a)*sin(c)*cos(B)
b = arccos(cosb)
cosA = (cos(a) - cos(b)*cos(c)) / (sin(b)*sin(c))
#A = arccos(cosA)
A = np.zeros_like(cosA)
A[abs(cosA)<1] = arccos(cosA[abs(cosA)<1])
A[cosA<=-1] = pi
cosC = (cos(c) - cos(a)*cos(b)) / (sin(a)*sin(b))
#C = arccos(cosC)
C = np.zeros_like(cosC)
C[abs(cosC)<1] = arccos(cosC[abs(cosC)<1])
C[cosC<=-1] = pi
C[x<0] *= -1.
ra = ra0 - C
dec = pi/2. - b
# Rotate shear relative to local west
# gamma_sph = exp(2i beta) * gamma
# where beta = pi - (A+B) is the angle between north and "up" in the tangent plane.
beta = pi - (A+B)
beta[x>0] *= -1.
cos2beta = cos(2.*beta)
sin2beta = sin(2.*beta)
g1_sph = g1 * cos2beta - g2 * sin2beta
g2_sph = g2 * cos2beta + g1 * sin2beta
cat = treecorr.Catalog(ra=ra, dec=dec, g1=g1_sph, g2=g2_sph, ra_units='rad',
dec_units='rad')
gg = treecorr.GGCorrelation(bin_size=0.1, min_sep=1., max_sep=100., sep_units='arcmin',
verbose=1)
gg.process(cat)
print('ra0, dec0 = ',ra0,dec0)
print('gg.xip = ',gg.xip)
print('true_xip = ',true_xip)
print('ratio = ',gg.xip / true_xip)
print('diff = ',gg.xip - true_xip)
print('max diff = ',max(abs(gg.xip - true_xip)))
# The 3rd and 4th centers are somewhat less accurate. Not sure why.
# The math seems to be right, since the last one that gets all the way to the pole
# works, so I'm not sure what is going on. It's just a few bins that get a bit less
# accurate. Possibly worth investigating further at some point...
assert max(abs(gg.xip - true_xip)) < 3.e-7 * tol_factor
print('gg.xim = ',gg.xim)
print('true_xim = ',true_xim)
print('ratio = ',gg.xim / true_xim)
print('diff = ',gg.xim - true_xim)
print('max diff = ',max(abs(gg.xim - true_xim)))
assert max(abs(gg.xim - true_xim)) < 2.e-7 * tol_factor
# One more center that can be done very easily. If the center is the north pole, then all
# the tangential shears are pure (positive) g1.
ra0 = 0
dec0 = pi/2.
ra = theta
dec = pi/2. - 2.*arcsin(r/2.)
gammat = -gamma0 * r2/r0**2 * np.exp(-r2/2./r0**2)
cat = treecorr.Catalog(ra=ra, dec=dec, g1=gammat, g2=np.zeros_like(gammat), ra_units='rad',
dec_units='rad')
gg.process(cat)
print('gg.xip = ',gg.xip)
print('gg.xip_im = ',gg.xip_im)
print('true_xip = ',true_xip)
print('ratio = ',gg.xip / true_xip)
print('diff = ',gg.xip - true_xip)
print('max diff = ',max(abs(gg.xip - true_xip)))
assert max(abs(gg.xip - true_xip)) < 3.e-7 * tol_factor
assert max(abs(gg.xip_im)) < 3.e-7 * tol_factor
print('gg.xim = ',gg.xim)
print('gg.xim_im = ',gg.xim_im)
print('true_xim = ',true_xim)
print('ratio = ',gg.xim / true_xim)
print('diff = ',gg.xim - true_xim)
print('max diff = ',max(abs(gg.xim - true_xim)))
assert max(abs(gg.xim - true_xim)) < 2.e-7 * tol_factor
assert max(abs(gg.xim_im)) < 2.e-7 * tol_factor
# Check that we get the same result using the corr2 function
cat.write(os.path.join('data','gg_spherical.dat'))
config = treecorr.read_config('configs/gg_spherical.yaml')
config['verbose'] = 0
treecorr.corr2(config)
corr2_output = np.genfromtxt(os.path.join('output','gg_spherical.out'), names=True,
skip_header=1)
print('gg.xip = ',gg.xip)
print('from corr2 output = ',corr2_output['xip'])
print('ratio = ',corr2_output['xip']/gg.xip)
print('diff = ',corr2_output['xip']-gg.xip)
np.testing.assert_allclose(corr2_output['xip'], gg.xip, rtol=1.e-3)
print('gg.xim = ',gg.xim)
print('from corr2 output = ',corr2_output['xim'])
print('ratio = ',corr2_output['xim']/gg.xim)
print('diff = ',corr2_output['xim']-gg.xim)
np.testing.assert_allclose(corr2_output['xim'], gg.xim, rtol=1.e-3)
print('xip_im from corr2 output = ',corr2_output['xip_im'])
assert max(abs(corr2_output['xip_im'])) < 3.e-7 * tol_factor
print('xim_im from corr2 output = ',corr2_output['xim_im'])
assert max(abs(corr2_output['xim_im'])) < 2.e-7 * tol_factor
@timer
def test_aardvark():
try:
import fitsio
except ImportError:
print('Skipping Aardvark test, since fitsio is not installed')
return
# <NAME> did a brute force calculation of the Aardvark catalog, so it is useful to
# compare the output from my code with that.
get_from_wiki('Aardvark.fit')
file_name = os.path.join('data','Aardvark.fit')
config = treecorr.read_config('Aardvark.yaml')
config['verbose'] = 1
cat1 = treecorr.Catalog(file_name, config)
gg = treecorr.GGCorrelation(config)
gg.process(cat1)
direct_file_name = os.path.join('data','Aardvark.direct')
direct_data = np.genfromtxt(direct_file_name)
direct_xip = direct_data[:,3]
direct_xim = direct_data[:,4]
#print('gg.xip = ',gg.xip)
#print('direct.xip = ',direct_xip)
xip_err = gg.xip - direct_xip
print('xip_err = ',xip_err)
print('max = ',max(abs(xip_err)))
assert max(abs(xip_err)) < 2.e-7
print('xip_im = ',gg.xip_im)
print('max = ',max(abs(gg.xip_im)))
assert max(abs(gg.xip_im)) < 3.e-7
xim_err = gg.xim - direct_xim
print('xim_err = ',xim_err)
print('max = ',max(abs(xim_err)))
assert max(abs(xim_err)) < 1.e-7
print('xim_im = ',gg.xim_im)
print('max = ',max(abs(gg.xim_im)))
assert max(abs(gg.xim_im)) < 1.e-7
# However, after some back and forth about the calculation, we concluded that Eric hadn't
# done the spherical trig correctly to get the shears relative to the great circle joining
# the two positions. So let's compare with my own brute force calculation.
# This also has the advantage that the radial bins are done the same way -- uniformly
# spaced in log of the chord distance, rather than the great circle distance.
bs0_file_name = os.path.join('data','Aardvark.bs0')
bs0_data = np.genfromtxt(bs0_file_name)
bs0_xip = bs0_data[:,2]
bs0_xim = bs0_data[:,3]
#print('gg.xip = ',gg.xip)
#print('bs0.xip = ',bs0_xip)
xip_err = gg.xip - bs0_xip
print('xip_err = ',xip_err)
print('max = ',max(abs(xip_err)))
assert max(abs(xip_err)) < 1.e-7
xim_err = gg.xim - bs0_xim
print('xim_err = ',xim_err)
print('max = ',max(abs(xim_err)))
assert max(abs(xim_err)) < 5.e-8
# Check that we get the same result using the corr2 function
# There's nothing new here coverage-wise, so only do this when running from command line.
if __name__ == '__main__':
treecorr.corr2(config)
corr2_output = np.genfromtxt(os.path.join('output','Aardvark.out'), names=True,
skip_header=1)
print('gg.xip = ',gg.xip)
print('from corr2 output = ',corr2_output['xip'])
print('ratio = ',corr2_output['xip']/gg.xip)
print('diff = ',corr2_output['xip']-gg.xip)
np.testing.assert_allclose(corr2_output['xip'], gg.xip, rtol=1.e-3)
print('gg.xim = ',gg.xim)
print('from corr2 output = ',corr2_output['xim'])
print('ratio = ',corr2_output['xim']/gg.xim)
print('diff = ',corr2_output['xim']-gg.xim)
np.testing.assert_allclose(corr2_output['xim'], gg.xim, rtol=1.e-3)
print('xip_im from corr2 output = ',corr2_output['xip_im'])
print('max err = ',max(abs(corr2_output['xip_im'])))
assert max(abs(corr2_output['xip_im'])) < 3.e-7
print('xim_im from corr2 output = ',corr2_output['xim_im'])
print('max err = ',max(abs(corr2_output['xim_im'])))
assert max(abs(corr2_output['xim_im'])) < 1.e-7
# As bin_slop decreases, the agreement should get even better.
# This test is slow, so only do it if running test_gg.py directly.
if __name__ == '__main__':
config['bin_slop'] = 0.2
gg = treecorr.GGCorrelation(config)
gg.process(cat1)
#print('gg.xip = ',gg.xip)
#print('bs0.xip = ',bs0_xip)
xip_err = gg.xip - bs0_xip
print('xip_err = ',xip_err)
print('max = ',max(abs(xip_err)))
assert max(abs(xip_err)) < 2.e-8
xim_err = gg.xim - bs0_xim
print('xim_err = ',xim_err)
print('max = ',max(abs(xim_err)))
assert max(abs(xim_err)) < 3.e-8
@timer
def test_shuffle():
# Check that the code is insensitive to shuffling the input data vectors.
# Might as well use the same function as above, although I reduce L a bit.
ngal = 10000
gamma0 = 0.05
r0 = 10.
L = 5. * r0
rng = np.random.RandomState(8675309)
x = (rng.random_sample(ngal)-0.5) * L
y = (rng.random_sample(ngal)-0.5) * L
r2 = (x**2 + y**2)/r0**2
g1 = -gamma0 * np.exp(-r2/2.) * (x**2-y**2)/r0**2
g2 = -gamma0 * np.exp(-r2/2.) * (2.*x*y)/r0**2
cat_u = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
gg_u = treecorr.GGCorrelation(bin_size=0.1, min_sep=1., max_sep=30., verbose=1)
gg_u.process(cat_u)
# Put these in a single 2d array so we can easily use np.random.shuffle
data = np.array( [x, y, g1, g2] ).T
print('data = ',data)
rng.shuffle(data)
cat_s = treecorr.Catalog(x=data[:,0], y=data[:,1], g1=data[:,2], g2=data[:,3])
gg_s = treecorr.GGCorrelation(bin_size=0.1, min_sep=1., max_sep=30., verbose=1)
gg_s.process(cat_s)
print('gg_u.xip = ',gg_u.xip)
print('gg_s.xip = ',gg_s.xip)
print('ratio = ',gg_u.xip / gg_s.xip)
print('diff = ',gg_u.xip - gg_s.xip)
print('max diff = ',max(abs(gg_u.xip - gg_s.xip)))
assert max(abs(gg_u.xip - gg_s.xip)) < 1.e-14
@timer
def test_haloellip():
"""Test that the constant and quadrupole versions of the Clampitt halo ellipticity calculation
are equivalent to xi+ and xi- (respectively) of the shear-shear cross correlation, where
the halo ellipticities are normalized to |g_lens|=1.
Joseph's original formulation: (cf. Issue #36, although I correct what I believe is an error
in his gamma_Qx formula.)
gamma_Q = Sum_i (w_i * g1_i * cos(4theta) + w_i * g2_i * sin(4theta)) / Sum_i (w_i)
gamma_C = Sum_i (w_i * g1_i) / Sum_i (w_i)
gamma_Qx = Sum_i (w_i * g2_i * cos(4theta) - w_i * g1_i * sin(4theta)) / Sum_i (w_i)
gamma_Cx = Sum_i (w_i * g2_i) / Sum_i (w_i)
where g1,g2 and theta are measured w.r.t. the coordinate system where the halo ellitpicity
is along the x-axis. Converting this to complex notation, we obtain:
gamma_C + i gamma_Cx = < g1 + i g2 >
= < gobs exp(-2iphi) >
= < gobs elens* >
gamma_Q + i gamma_Qx = < (g1 + i g2) (cos(4t) - isin(4t) >
= < gobs exp(-2iphi) exp(-4itheta) >
= < gobs exp(2iphi) exp(-4i(theta+phi)) >
= < gobs elens exp(-4i(theta+phi)) >
where gobs is the observed shape of the source in the normal world coordinate system, and
elens = exp(2iphi) is the unit-normalized shape of the lens in that same coordinate system.
Note that the combination theta+phi is the angle between the line joining the two points
and the E-W coordinate, which means that
gamma_C + i gamma_Cx = xi+(elens, gobs)
gamma_Q + i gamma_Qx = xi-(elens, gobs)
We test this result here using the above formulation with both unit weights and weights
proportional to the halo ellitpicity. We also try keeping the magnitude of elens rather
than normalizing it.
"""
if __name__ == '__main__':
# It's hard to get enough sources/lenses to get very high precision on these tests.
# We settle on a number that lead to 3% accuracy. Increasing nlens and nsource
# lead to high accuracy.
nlens = 1000
nsource = 10000 # sources per lens
tol = 3.e-2
else:
# For nosetests runs, use 10x fewer lenses and 2x larger tolerance
nlens = 100
nsource = 10000
tol = 6.e-2
ntot = nsource * nlens
L = 100000. # The side length in which the lenses are placed
R = 10. # The (rms) radius of the associated sources from the lenses
# In this case, we want L >> R so that most sources are only associated
# with the one lens we used for assigning its shear value.
# Lenses are randomly located with random shapes.
rng = np.random.RandomState(8675309)
lens_g1 = rng.normal(0., 0.1, (nlens,))
lens_g2 = rng.normal(0., 0.1, (nlens,))
lens_g = lens_g1 + 1j * lens_g2
lens_absg = np.abs(lens_g)
lens_x = (rng.random_sample(nlens)-0.5) * L
lens_y = (rng.random_sample(nlens)-0.5) * L
print('Made lenses')
e_a = 0.17 # The amplitude of the constant part of the signal
e_b = 0.23 # The amplitude of the quadrupole part of the signal
source_g1 = np.empty(ntot)
source_g2 = np.empty(ntot)
source_x = np.empty(ntot)
source_y = np.empty(ntot)
# For the sources, place 100 galaxies around each lens with the expected azimuthal pattern
# I just use a constant |g| for the amplitude, not a real radial pattern.
for i in range(nlens):
# First build the signal as it appears in the coordinate system where the halo
# is oriented along the x-axis
dx = rng.normal(0., 10., (nsource,))
dy = rng.normal(0., 10., (nsource,))
z = dx + 1j * dy
exp2iphi = z**2 / np.abs(z)**2
source_g = e_a + e_b * exp2iphi**2
# Now rotate the whole system by the phase of the lens ellipticity.
exp2ialpha = lens_g[i] / lens_absg[i]
expialpha = np.sqrt(exp2ialpha)
source_g *= exp2ialpha
z *= expialpha
# Also scale the signal by |lens_g|
source_g *= lens_absg[i]
# Place the source galaxies at this dx,dy with this shape
source_x[i*nsource: (i+1)*nsource] = lens_x[i] + z.real
source_y[i*nsource: (i+1)*nsource] = lens_y[i] + z.imag
source_g1[i*nsource: (i+1)*nsource] = source_g.real
source_g2[i*nsource: (i+1)*nsource] = source_g.imag
print('Made sources')
source_cat = treecorr.Catalog(x=source_x, y=source_y, g1=source_g1, g2=source_g2)
gg = treecorr.GGCorrelation(min_sep=1, bin_size=0.1, nbins=35)
lens_mean_absg = np.mean(lens_absg)
print('mean_absg = ',lens_mean_absg)
# First the original version where we only use the phase of the lens ellipticities:
lens_cat1 = treecorr.Catalog(x=lens_x, y=lens_y, g1=lens_g1/lens_absg, g2=lens_g2/lens_absg)
gg.process(lens_cat1, source_cat)
print('gg.xim = ',gg.xim)
# The net signal here is just <absg> * e_b
print('expected signal = ',e_b * lens_mean_absg)
np.testing.assert_allclose(gg.xim, e_b * lens_mean_absg, rtol=tol)
print('gg.xip = ',gg.xip)
print('expected signal = ',e_a * lens_mean_absg)
np.testing.assert_allclose(gg.xip, e_a * lens_mean_absg, rtol=tol)
# Next weight the lenses by their absg.
lens_cat2 = treecorr.Catalog(x=lens_x, y=lens_y, g1=lens_g1/lens_absg, g2=lens_g2/lens_absg,
w=lens_absg)
gg.process(lens_cat2, source_cat)
print('gg.xim = ',gg.xim)
# Now the net signal is
# sum(w * e_b*absg[i]) / sum(w)
# = sum(absg[i]^2 * e_b) / sum(absg[i])
# = <absg^2> * e_b / <absg>
lens_mean_gsq = np.mean(lens_absg**2)
print('expected signal = ',e_b * lens_mean_gsq / lens_mean_absg)
np.testing.assert_allclose(gg.xim, e_b * lens_mean_gsq / lens_mean_absg, rtol=tol)
print('gg.xip = ',gg.xip)
print('expected signal = ',e_a * lens_mean_gsq / lens_mean_absg)
np.testing.assert_allclose(gg.xip, e_a * lens_mean_gsq / lens_mean_absg, rtol=tol)
# Finally, use the unnormalized lens_g for the lens ellipticities
lens_cat3 = treecorr.Catalog(x=lens_x, y=lens_y, g1=lens_g1, g2=lens_g2)
gg.process(lens_cat3, source_cat)
print('gg.xim = ',gg.xim)
# Now the net signal is
# sum(absg[i] * e_b*absg[i]) / N
# = sum(absg[i]^2 * e_b) / N
# = <absg^2> * e_b
print('expected signal = ',e_b * lens_mean_gsq)
# This one is slightly less accurate. But easily passes at 4% accuracy.
np.testing.assert_allclose(gg.xim, e_b * lens_mean_gsq, rtol=tol*1.5)
print('gg.xip = ',gg.xip)
print('expected signal = ',e_a * lens_mean_gsq)
np.testing.assert_allclose(gg.xip, e_a * lens_mean_gsq, rtol=tol*1.5)
# It's worth noting that exactly half the signal is in each of g1, g2, so for things
# like SDSS, you can use only g2, for instance, which avoids some insidious systematic
# errors related to the scan direction.
source_cat2 = treecorr.Catalog(x=source_x, y=source_y,
g1=np.zeros_like(source_g2), g2=source_g2)
gg.process(lens_cat1, source_cat2)
print('gg.xim = ',gg.xim)
print('expected signal = ',e_b * lens_mean_absg / 2.)
# The precision of this is a bit less though, since we now have more shape noise.
# Naively, I would expect sqrt(2) worse, but since the agreement in this test is largely
# artificial, as I placed the exact signal down with no shape noise, the increased shape
# noise is a lot more than previously here. So I had to drop the precision by a factor of
# 5 relative to what I did above.
np.testing.assert_allclose(gg.xim, e_b * lens_mean_absg/2., rtol=tol*5)
print('gg.xip = ',gg.xip)
print('expected signal = ',e_a * lens_mean_absg / 2.)
np.testing.assert_allclose(gg.xip, e_a * lens_mean_absg/2., rtol=tol*5)
@timer
def test_varxi():
# Test that varxip, varxim are correct (or close) based on actual variance of many runs.
# Same gamma pattern as in test_gg(). Although the signal doesn't actually matter at all here.
gamma0 = 0.05
r0 = 10.
L = 50.*r0
rng = np.random.RandomState(8675309)
# Note: to get a good estimate of var(xi), you need a lot of runs. The number of
# runs matters much more than the number of galaxies for getting this to pass.
if __name__ == '__main__':
ngal = 1000
nruns = 50000
tol_factor = 1
else:
ngal = 100
nruns = 5000
tol_factor = 5
all_ggs = []
for run in range(nruns):
# In addition to the shape noise below, there is shot noise from the random x,y positions.
x = (rng.random_sample(ngal)-0.5) * L
y = (rng.random_sample(ngal)-0.5) * L
# Varied weights are hard, but at least check that non-unit weights work correctly.
w = np.ones_like(x) * 5
r2 = (x**2 + y**2)/r0**2
g1 = -gamma0 * np.exp(-r2/2.) * (x**2-y**2)/r0**2
g2 = -gamma0 * np.exp(-r2/2.) * (2.*x*y)/r0**2
# This time, add some shape noise (different each run).
g1 += rng.normal(0, 0.3, size=ngal)
g2 += rng.normal(0, 0.3, size=ngal)
cat = treecorr.Catalog(x=x, y=y, w=w, g1=g1, g2=g2, x_units='arcmin', y_units='arcmin')
gg = treecorr.GGCorrelation(bin_size=0.1, min_sep=10., max_sep=100., sep_units='arcmin',
verbose=1)
gg.process(cat)
all_ggs.append(gg)
mean_xip = np.mean([gg.xip for gg in all_ggs], axis=0)
var_xip = np.var([gg.xip for gg in all_ggs], axis=0)
mean_xim = np.mean([gg.xim for gg in all_ggs], axis=0)
var_xim = np.var([gg.xim for gg in all_ggs], axis=0)
mean_varxip = np.mean([gg.varxip for gg in all_ggs], axis=0)
mean_varxim = np.mean([gg.varxim for gg in all_ggs], axis=0)
print('mean_xip = ',mean_xip)
print('mean_xim = ',mean_xim)
print('mean_varxip = ',mean_varxip)
print('mean_varxim = ',mean_varxim)
print('var_xip = ',var_xip)
print('ratio = ',var_xip / mean_varxip)
print('var_xim = ',var_xim)
print('ratio = ',var_xim / mean_varxim)
print('max relerr for xip = ',np.max(np.abs((var_xip - mean_varxip)/var_xip)))
print('max relerr for xim = ',np.max(np.abs((var_xim - mean_varxim)/var_xim)))
np.testing.assert_allclose(mean_varxip, var_xip, rtol=0.02 * tol_factor)
np.testing.assert_allclose(mean_varxim, var_xim, rtol=0.02 * tol_factor)
if __name__ == '__main__':
test_direct()
test_direct_spherical()
test_pairwise()
test_gg()
test_mapsq()
test_spherical()
test_aardvark()
test_shuffle()
test_haloellip()
test_varxi
```
#### File: TreeCorr/treecorr/util.py
```python
import treecorr
import numpy as np
import os
import warnings
import coord
def ensure_dir(target):
d = os.path.dirname(target)
if d != '':
if not os.path.exists(d):
os.makedirs(d)
def set_omp_threads(num_threads, logger=None):
"""Set the number of OpenMP threads to use in the C++ layer.
:param num_threads: The target number of threads to use
:param logger: If desired, a logger object for logging any warnings here. (default: None)
:returns: The number of threads OpenMP reports that it will use. Typically this
matches the input, but OpenMP reserves the right not to comply with
the requested number of threads.
"""
input_num_threads = num_threads # Save the input value.
# If num_threads is auto, get it from cpu_count
if num_threads is None or num_threads <= 0:
import multiprocessing
num_threads = multiprocessing.cpu_count()
if logger:
logger.debug('multiprocessing.cpu_count() = %d',num_threads)
# Tell OpenMP to use this many threads
if logger:
logger.debug('Telling OpenMP to use %d threads',num_threads)
num_threads = treecorr._lib.SetOMPThreads(num_threads)
# Report back appropriately.
if logger:
logger.debug('OpenMP reports that it will use %d threads',num_threads)
if num_threads > 1:
logger.info('Using %d threads.',num_threads)
elif input_num_threads is not None and input_num_threads != 1:
# Only warn if the user specifically asked for num_threads != 1.
logger.warning("Unable to use multiple threads, since OpenMP is not enabled.")
return num_threads
def get_omp_threads():
"""Get the number of OpenMP threads currently set to be used in the C++ layer.
:returns: The number of threads OpenMP reports that it will use.
"""
return treecorr._lib.GetOMPThreads()
def gen_write(file_name, col_names, columns, params=None, precision=4, file_type=None, logger=None):
"""Write some columns to an output file with the given column names.
We do this basic functionality a lot, so put the code to do it in one place.
:param file_name: The name of the file to write to.
:param col_names: A list of columns names for the given columns.
:param columns: A list of numpy arrays with the data to write.
:param params: A dict of extra parameters to write at the top of the output file (for
ASCII output) or in the header (for FITS output). (default: None)
:param precision: Output precision for ASCII. (default: 4)
:param file_type: Which kind of file to write to. (default: determine from the file_name
extension)
:param logger: If desired, a logger object for logging. (default: None)
"""
if len(col_names) != len(columns):
raise ValueError("col_names and columns are not the same length.")
if len(columns) == 0:
raise ValueError("len(columns) == 0")
for col in columns[1:]:
if col.shape != columns[0].shape:
raise ValueError("columns are not all the same shape")
columns = [ col.flatten() for col in columns ]
ensure_dir(file_name)
# Figure out which file type the catalog is
if file_type is None:
import os
name, ext = os.path.splitext(file_name)
if ext.lower().startswith('.fit'):
file_type = 'FITS'
else:
file_type = 'ASCII'
if logger: # pragma: no branch (We always provide a logger.)
logger.info("file_type assumed to be %s from the file name.",file_type)
if file_type.upper() == 'FITS':
try:
import fitsio
except ImportError:
logger.error("Unable to import fitsio. Cannot write to %s"%file_name)
raise
gen_write_fits(file_name, col_names, columns, params)
elif file_type.upper() == 'ASCII':
gen_write_ascii(file_name, col_names, columns, params, precision=precision)
else:
raise ValueError("Invalid file_type %s"%file_type)
def gen_write_ascii(file_name, col_names, columns, params, precision=4):
"""Write some columns to an output ASCII file with the given column names.
:param file_name: The name of the file to write to.
:param col_names: A list of columns names for the given columns. These will be written
in a header comment line at the top of the output file.
:param columns: A list of numpy arrays with the data to write.
:param params: A dict of extra parameters to write at the top of the output file.
:param precision: Output precision for ASCII. (default: 4)
"""
ncol = len(col_names)
data = np.empty( (len(columns[0]), ncol) )
for i,col in enumerate(columns):
data[:,i] = col
width = precision+8
# Note: python 2.6 needs the numbers, so can't just do "{:^%d}"*ncol
# Also, I have the first one be 1 shorter to allow space for the initial #.
header_form = "{0:^%d}"%(width-1)
for i in range(1,ncol):
header_form += " {%d:^%d}"%(i,width)
header = header_form.format(*col_names)
fmt = '%%%d.%de'%(width,precision)
ensure_dir(file_name)
with open(file_name, 'wb') as fid:
if params is not None:
s = '## %r\n'%(params)
fid.write(s.encode())
h = '#' + header + '\n'
fid.write(h.encode())
np.savetxt(fid, data, fmt=fmt)
def gen_write_fits(file_name, col_names, columns, params):
"""Write some columns to an output FITS file with the given column names.
:param file_name: The name of the file to write to.
:param col_names: A list of columns names for the given columns.
:param columns: A list of numpy arrays with the data to write.
:param params: A dict of extra parameters to write in the FITS header.
"""
import fitsio
ensure_dir(file_name)
data = np.empty(len(columns[0]), dtype=[ (name,'f8') for name in col_names ])
for (name, col) in zip(col_names, columns):
data[name] = col
fitsio.write(file_name, data, header=params, clobber=True)
def gen_read(file_name, file_type=None, logger=None):
"""Read some columns from an input file.
We do this basic functionality a lot, so put the code to do it in one place.
.. note::
The input file is expected to have been written by TreeCorr using the
`gen_write` function, so we don't have a lot of flexibility in the input structure.
:param file_name: The name of the file to read.
:param file_type: Which kind of file to read. (default: determine from the file_name
extension)
:param logger: If desired, a logger object for logging. (default: None)
:returns: (data, params), a numpy ndarray with named columns, and a dict of extra parameters.
"""
# Figure out which file type the catalog is
if file_type is None:
import os
name, ext = os.path.splitext(file_name)
if ext.lower().startswith('.fit'):
file_type = 'FITS'
else:
file_type = 'ASCII'
if logger: # pragma: no branch (We always provide a logger.)
logger.info("file_type assumed to be %s from the file name.",file_type)
if file_type.upper() == 'FITS':
try:
import fitsio
except ImportError:
logger.error("Unable to import fitsio. Cannot read %s"%file_name)
raise
data = fitsio.read(file_name)
params = fitsio.read_header(file_name, 1)
elif file_type.upper() == 'ASCII':
with open(file_name) as fid:
header = fid.readline()
params = {}
skip = 0
if header[1] == '#': # pragma: no branch (All our files have this.)
assert header[0] == '#'
params = eval(header[2:].strip())
header = fid.readline()
skip = 1
data = np.genfromtxt(file_name, names=True, skip_header=skip)
else:
raise ValueError("Invalid file_type %s"%file_type)
return data, params
class LRU_Cache:
""" Simplified Least Recently Used Cache.
Mostly stolen from http://code.activestate.com/recipes/577970-simplified-lru-cache/,
but added a method for dynamic resizing. The least recently used cached item is
overwritten on a cache miss.
:param user_function: A python function to cache.
:param maxsize: Maximum number of inputs to cache. [Default: 1024]
Usage
-----
>>> def slow_function(*args) # A slow-to-evaluate python function
>>> ...
>>>
>>> v1 = slow_function(*k1) # Calling function is slow
>>> v1 = slow_function(*k1) # Calling again with same args is still slow
>>> cache = galsim.utilities.LRU_Cache(slow_function)
>>> v1 = cache(*k1) # Returns slow_function(*k1), slowly the first time
>>> v1 = cache(*k1) # Returns slow_function(*k1) again, but fast this time.
Methods
-------
>>> cache.resize(maxsize) # Resize the cache, either upwards or downwards. Upwards resizing
# is non-destructive. Downwards resizing will remove the least
# recently used items first.
"""
def __init__(self, user_function, maxsize=1024):
# Link layout: [PREV, NEXT, KEY, RESULT]
self.root = [None, None, None, None]
self.user_function = user_function
self.cache = {}
last = self.root
for i in range(maxsize):
key = object()
self.cache[key] = last[1] = last = [last, self.root, key, None]
self.root[0] = last
self.count = 0
def __call__(self, *key, **kwargs):
link = self.cache.get(key)
if link is not None:
# Cache hit: move link to last position
link_prev, link_next, _, result = link
link_prev[1] = link_next
link_next[0] = link_prev
last = self.root[0]
last[1] = self.root[0] = link
link[0] = last
link[1] = self.root
return result
# Cache miss: evaluate and insert new key/value at root, then increment root
# so that just-evaluated value is in last position.
result = self.user_function(*key, **kwargs)
self.root[2] = key
self.root[3] = result
oldroot = self.root
self.root = self.root[1]
self.root[2], oldkey = None, self.root[2]
self.root[3], oldvalue = None, self.root[3]
self.cache[key] = oldroot
del self.cache[oldkey]
if self.count < self.size: self.count += 1
return result
def values(self):
"""Lists all items stored in the cache"""
return list([v[3] for v in self.cache.values() if v[3] is not None])
@property
def last_value(self):
"""Return the most recently used value"""
return self.root[0][3]
def resize(self, maxsize):
""" Resize the cache. Increasing the size of the cache is non-destructive, i.e.,
previously cached inputs remain in the cache. Decreasing the size of the cache will
necessarily remove items from the cache if the cache is already filled. Items are removed
in least recently used order.
:param maxsize: The new maximum number of inputs to cache.
"""
oldsize = len(self.cache)
if maxsize == oldsize:
return
else:
if maxsize < 0:
raise ValueError("Invalid maxsize")
elif maxsize < oldsize:
for i in range(oldsize - maxsize):
# Delete root.next
current_next_link = self.root[1]
new_next_link = self.root[1] = self.root[1][1]
new_next_link[0] = self.root
del self.cache[current_next_link[2]]
self.count = min(self.count, maxsize)
else: # maxsize > oldsize:
for i in range(maxsize - oldsize):
# Insert between root and root.next
key = object()
self.cache[key] = link = [self.root, self.root[1], key, None]
self.root[1][0] = link
self.root[1] = link
def clear(self):
""" Clear all items from the cache.
"""
maxsize = len(self.cache)
self.cache.clear()
last = self.root
for i in range(maxsize):
last[3] = None # Sever pointer to any existing result.
key = object()
self.cache[key] = last[1] = last = [last, self.root, key, None]
self.root[0] = last
self.count = 0
@property
def size(self):
return len(self.cache)
def double_ptr(x):
"""
Cast x as a double* to pass to library C functions
:param x: A numpy array assumed to have dtype = float.
:returns: A version of the array that can be passed to cffi C functions.
"""
if x is None:
return treecorr._ffi.cast('double*', 0)
else:
# This fails if x is read_only
#return treecorr._ffi.cast('double*', treecorr._ffi.from_buffer(x))
# This works, presumably by ignoring the numpy read_only flag. Although, I think it's ok.
return treecorr._ffi.cast('double*', x.ctypes.data)
def long_ptr(x):
"""
Cast x as a long* to pass to library C functions
:param x: A numpy array assumed to have dtype = int.
:returns: A version of the array that can be passed to cffi C functions.
"""
if x is None: # pragma: no cover (I don't ever have x=None for this one.)
return treecorr._ffi.cast('long*', 0)
else:
return treecorr._ffi.cast('long*', x.ctypes.data)
def parse_metric(metric, coords, coords2=None, coords3=None):
"""
Convert a string metric into the corresponding enum to pass to the C code.
"""
if coords2 is None:
auto = True
else:
auto = False
# Special Rlens doesn't care about the distance to the sources, so spherical is fine
# for cat2, cat3 in that case.
if metric == 'Rlens':
if coords2 == 'spherical': coords2 = '3d'
if coords3 == 'spherical': coords3 = '3d'
if metric == 'Arc':
# If all coords are 3d, then leave it 3d, but if any are spherical,
# then convert to spherical.
if all([c in [None, '3d'] for c in [coords, coords2, coords3]]):
# Leave coords as '3d'
pass
elif any([c not in [None, 'spherical', '3d'] for c in [coords, coords2, coords3]]):
raise ValueError("Arc metric is only valid for catalogs with spherical positions.")
elif any([c == 'spherical' for c in [coords, coords2, coords3]]):
# Switch to spherical
coords = 'spherical'
else: # pragma: no cover
# This is impossible now, but here in case we add additional coordinates.
raise ValueError("Cannot correlate catalogs with different coordinate systems.")
else:
if ( (coords2 != coords) or (coords3 is not None and coords3 != coords) ):
raise ValueError("Cannot correlate catalogs with different coordinate systems.")
if coords not in ['flat', 'spherical', '3d']:
raise ValueError("Invalid coords %s"%coords)
if metric not in ['Euclidean', 'Rperp', 'OldRperp', 'FisherRperp', 'Rlens', 'Arc', 'Periodic']:
raise ValueError("Invalid metric %s"%metric)
if metric in ['Rperp', 'OldRperp', 'FisherRperp'] and coords != '3d':
raise ValueError("%s metric is only valid for catalogs with 3d positions."%metric)
if metric == 'Rlens' and auto:
raise ValueError("Rlens metric is only valid for cross correlations.")
if metric == 'Rlens' and coords != '3d':
raise ValueError("Rlens metric is only valid for catalogs with 3d positions.")
if metric == 'Arc' and coords not in ['spherical', '3d']:
raise ValueError("Arc metric is only valid for catalogs with spherical positions.")
return coords, metric
def coord_enum(coords):
"""Return the C++-layer enum for the given string value of coords.
"""
if coords == 'flat':
return treecorr._lib.Flat
elif coords == 'spherical':
return treecorr._lib.Sphere
elif coords == '3d':
return treecorr._lib.ThreeD
else:
raise ValueError("Invalid coords %s"%coords)
def metric_enum(metric):
"""Return the C++-layer enum for the given string value of metric.
"""
if metric == 'Euclidean':
return treecorr._lib.Euclidean
elif metric == 'Rperp':
return metric_enum(treecorr.Rperp_alias)
elif metric == 'FisherRperp':
return treecorr._lib.Rperp
elif metric in ['OldRperp']:
return treecorr._lib.OldRperp
elif metric == 'Rlens':
return treecorr._lib.Rlens
elif metric == 'Arc':
return treecorr._lib.Arc
elif metric == 'Periodic':
return treecorr._lib.Periodic
else:
raise ValueError("Invalid metric %s"%metric)
def parse_xyzsep(args, kwargs, _coords):
"""Parse the different options for passing a coordinate and separation.
The allowed parameters are:
1. If _coords == Flat:
:param x: The x coordinate of the location for which to count nearby points.
:param y: The y coordinate of the location for which to count nearby points.
:param sep: The separation distance
2. If _coords == ThreeD:
Either
:param x: The x coordinate of the location for which to count nearby points.
:param y: The y coordinate of the location for which to count nearby points.
:param z: The z coordinate of the location for which to count nearby points.
:param sep: The separation distance
Or
:param ra: The right ascension of the location for which to count nearby points.
:param dec: The declination of the location for which to count nearby points.
:param r: The distance to the location for which to count nearby points.
:param sep: The separation distance
3. If _coords == Sphere:
:param ra: The right ascension of the location for which to count nearby points.
:param dec: The declination of the location for which to count nearby points.
:param sep: The separation distance as an angle
For all angle parameters (ra, dec, sep), this quantity may be a coord.Angle instance, or
units maybe be provided as ra_units, dec_units or sep_units respectively.
Finally, in cases where ra, dec are allowed, a coord.CelestialCoord instance may be
provided as the first argument.
:returns: The effective (x, y, z, sep) as a tuple.
"""
radec = False
if _coords == treecorr._lib.Flat:
if len(args) == 0:
if 'x' not in kwargs:
raise TypeError("Missing required argument x")
if 'y' not in kwargs:
raise TypeError("Missing required argument y")
if 'sep' not in kwargs:
raise TypeError("Missing required argument sep")
x = kwargs.pop('x')
y = kwargs.pop('y')
sep = kwargs.pop('sep')
elif len(args) == 1:
raise TypeError("x,y should be given as either args or kwargs, not mixed.")
elif len(args) == 2:
if 'sep' not in kwargs:
raise TypeError("Missing required argument sep")
x,y = args
sep = kwargs.pop('sep')
elif len(args) == 3:
x,y,sep = args
else:
raise TypeError("Too many positional args")
z = 0
elif _coords == treecorr._lib.ThreeD:
if len(args) == 0:
if 'x' in kwargs:
if 'y' not in kwargs:
raise TypeError("Missing required argument y")
if 'z' not in kwargs:
raise TypeError("Missing required argument z")
x = kwargs.pop('x')
y = kwargs.pop('y')
z = kwargs.pop('z')
else:
if 'ra' not in kwargs:
raise TypeError("Missing required argument ra")
if 'dec' not in kwargs:
raise TypeError("Missing required argument dec")
ra = kwargs.pop('ra')
dec = kwargs.pop('dec')
radec = True
if 'r' not in kwargs:
raise TypeError("Missing required argument r")
r = kwargs.pop('r')
if 'sep' not in kwargs:
raise TypeError("Missing required argument sep")
sep = kwargs.pop('sep')
elif len(args) == 1:
if not isinstance(args[0], coord.CelestialCoord):
raise TypeError("Invalid unnamed argument %r"%args[0])
ra = args[0].ra
dec = args[0].dec
radec = True
if 'r' not in kwargs:
raise TypeError("Missing required argument r")
r = kwargs.pop('r')
if 'sep' not in kwargs:
raise TypeError("Missing required argument sep")
sep = kwargs.pop('sep')
elif len(args) == 2:
if isinstance(args[0], coord.CelestialCoord):
ra = args[0].ra
dec = args[0].dec
radec = True
r = args[1]
else:
ra, dec = args
radec = True
if 'r' not in kwargs:
raise TypeError("Missing required argument r")
r = kwargs.pop('r')
if 'sep' not in kwargs:
raise TypeError("Missing required argument sep")
sep = kwargs.pop('sep')
elif len(args) == 3:
if isinstance(args[0], coord.CelestialCoord):
ra = args[0].ra
dec = args[0].dec
radec = True
r = args[1]
sep = args[2]
elif isinstance(args[0], coord.Angle):
ra, dec, r = args
radec = True
if 'sep' not in kwargs:
raise TypeError("Missing required argument sep")
sep = kwargs.pop('sep')
elif 'ra_units' in kwargs or 'dec_units' in kwargs:
ra, dec, r = args
radec = True
if 'sep' not in kwargs:
raise TypeError("Missing required argument sep")
sep = kwargs.pop('sep')
else:
x, y, z = args
if 'sep' not in kwargs:
raise TypeError("Missing required argument sep")
sep = kwargs.pop('sep')
elif len(args) == 4:
if isinstance(args[0], coord.Angle):
ra, dec, r, sep = args
radec = True
elif 'ra_units' in kwargs or 'dec_units' in kwargs:
ra, dec, r, sep = args
radec = True
else:
x, y, z, sep = args
else:
raise TypeError("Too many positional args")
else: # Sphere
if len(args) == 0:
if 'ra' not in kwargs:
raise TypeError("Missing required argument ra")
if 'dec' not in kwargs:
raise TypeError("Missing required argument dec")
ra = kwargs.pop('ra')
dec = kwargs.pop('dec')
radec = True
if 'sep' not in kwargs:
raise TypeError("Missing required argument sep")
sep = kwargs.pop('sep')
elif len(args) == 1:
if not isinstance(args[0], coord.CelestialCoord):
raise TypeError("Invalid unnamed argument %r"%args[0])
ra = args[0].ra
dec = args[0].dec
radec = True
if 'sep' not in kwargs:
raise TypeError("Missing required argument sep")
sep = kwargs.pop('sep')
elif len(args) == 2:
if isinstance(args[0], coord.CelestialCoord):
ra = args[0].ra
dec = args[0].dec
radec = True
sep = args[1]
else:
ra, dec = args
radec = True
if 'sep' not in kwargs:
raise TypeError("Missing required argument sep")
sep = kwargs.pop('sep')
elif len(args) == 3:
ra, dec, sep = args
radec = True
else:
raise TypeError("Too many positional args")
if not isinstance(sep, coord.Angle):
if 'sep_units' not in kwargs:
raise TypeError("Missing required argument sep_units")
sep = sep * coord.AngleUnit.from_name(kwargs.pop('sep_units'))
# We actually want the chord distance for this angle.
sep = 2. * np.sin(sep/2.)
if radec:
if not isinstance(ra, coord.Angle):
if 'ra_units' not in kwargs:
raise TypeError("Missing required argument ra_units")
ra = ra * coord.AngleUnit.from_name(kwargs.pop('ra_units'))
if not isinstance(dec, coord.Angle):
if 'dec_units' not in kwargs:
raise TypeError("Missing required argument dec_units")
dec = dec * coord.AngleUnit.from_name(kwargs.pop('dec_units'))
x,y,z = coord.CelestialCoord(ra, dec).get_xyz()
if _coords == treecorr._lib.ThreeD:
x *= r
y *= r
z *= r
if len(kwargs) > 0:
raise TypeError("Invalid kwargs: %s"%(kwargs))
return float(x), float(y), float(z), float(sep)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.