metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jmlipman/RatLesNetv2",
"score": 3
} |
#### File: RatLesNetv2/lib/metric.py
```python
import numpy as np
from skimage import measure
from scipy import ndimage
def _border_np(y):
"""Calculates the border of a 3D binary map.
From NiftyNet.
"""
return y - ndimage.binary_erosion(y)
def _border_distance(y_pred, y_true):
"""Distance between two borders.
From NiftyNet.
y_pred and y_true are WHD
"""
border_seg = _border_np(y_pred)
border_ref = _border_np(y_true)
distance_ref = ndimage.distance_transform_edt(1 - border_ref)
distance_seg = ndimage.distance_transform_edt(1 - border_seg)
distance_border_seg = border_ref * distance_seg
distance_border_ref = border_seg * distance_ref
return distance_border_ref, distance_border_seg
class Metric:
def __init__(self, y_pred, y_true):
self.y_pred = y_pred
self.y_true = y_true
def dice(self):
"""This function calculates the Dice coefficient.
Args:
`y_pred`: batch containing the predictions. BDWHC.
`y_true`: batch containing the predictions. BDWHC.
Returns:
Dice coefficient. BC (B: batch, C: classes)
"""
num_samples = self.y_pred.shape[0]
num_classes = self.y_pred.shape[1]
results = np.zeros((num_samples, num_classes))
y_pred = np.argmax(self.y_pred, axis=1)
y_true = np.argmax(self.y_true, axis=1)
for i in range(num_samples):
for c in range(num_classes):
a = y_pred[i] == c
b = y_true[i] == c
if np.sum(b) == 0: # If no lesion in the y_true
if np.sum(a) == 0: # No lesion predicted
result = 1.0
else:
result = (np.sum(b==0)-np.sum(a))*1.0 / np.sum(b==0)
else: # Actual Dice
num = 2 * np.sum(a * b)
denom = np.sum(a) + np.sum(b)
result = num / denom
results[i, c] = result
return results
def hausdorff_distance(self):
"""Hausdorff distance.
From NiftyNet.
2-classes only!
"""
num_samples = self.y_pred.shape[0]
results = np.zeros(num_samples)
for i in range(num_samples):
y_pred = np.argmax(self.y_pred[i], axis=0)
y_true = np.argmax(self.y_true[i], axis=0)
ref_border_dist, seg_border_dist = _border_distance(y_pred, y_true)
results[i] = np.max([np.max(ref_border_dist), np.max(seg_border_dist)])
return results
def compactness(self):
"""surface^1.5 / volume
"""
num_samples = self.y_pred.shape[0]
results = np.zeros(num_samples)
for i in range(num_samples):
pred = np.argmax(self.y_pred[i], axis=0)
surface = np.sum(_border_np(pred))
volume = np.sum(pred)
results[i] = (surface**1.5)/volume
return results
```
#### File: RatLesNetv2/lib/RatLesNetv2Blocks.py
```python
from torch import nn
from torch.nn import Conv3d, BatchNorm3d, ReLU, Sigmoid
import numpy as np
import torch
class ResNet(nn.Module):
def __init__(self, in_filters):
super(ResNet, self).__init__()
self.seq = nn.Sequential(
ReLU(),
BatchNorm3d(in_filters),
Conv3d(in_filters, in_filters, 3, padding=1),
ReLU(),
BatchNorm3d(in_filters),
Conv3d(in_filters, in_filters, 3, padding=1)
)
def forward(self, x):
return x + self.seq(x)
def __str__(self):
return "ResNet"
class Bottleneck(nn.Module):
def __init__(self, in_filters, out_filters):
super(Bottleneck, self).__init__()
self.seq = nn.Sequential(
ReLU(),
BatchNorm3d(in_filters),
Conv3d(in_filters, out_filters, 1)
)
def forward(self, x):
return self.seq(x)
def __str__(self):
return "Bottleneck"
``` |
{
"source": "jml/omnimetrics",
"score": 2
} |
#### File: src/omnimetrics/_database.py
```python
from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
from pprint import pprint
from typing import Any, Callable, Iterable, Optional, TypeVar
from appscript import app, k
OMNIFOCUS = app("OmniFocus")
S = TypeVar("S")
T = TypeVar("T")
def optional(f: Callable[[S], T], x: Optional[S]) -> Optional[T]:
"""Apply ``f`` to ``x`` if ``x`` is not ``None``."""
if x is None:
return None
return f(x)
def resolve_missing_value(value: T) -> Optional[T]:
return value if value != k.missing_value else None
@dataclass(frozen=True)
class TagReference:
name: str
@classmethod
def from_reference(cls, tag_reference) -> TagReference:
return cls(name=tag_reference.name())
@dataclass(frozen=True)
class ProjectReference:
name: str
@classmethod
def from_reference(cls, project_reference) -> ProjectReference:
return cls(name=project_reference.name())
@dataclass(frozen=True)
class Task:
"""An OmniFocus Task.
See https://omni-automation.com/omnifocus/task.html
"""
id: str
# The title of the task.
name: str
creation_date: datetime
modification_date: datetime
# If set, the Task should be completed by this date.
due_date: Optional[datetime]
# Returns the computed effective due date for the Task, based on its local dateDue and those of its containers.
effective_due_date: Optional[datetime]
next_due_date: Optional[datetime]
# If set, the Task is not actionable until this date.
defer_date: Optional[datetime]
# Returns the computed effective defer date for the Task, based on its local deferDate and those of its containers.
effective_defer_date: Optional[datetime]
next_defer_date: Optional[datetime]
# If set, the Task is dropped.
dropped_date: Optional[datetime]
# If set, the Task is completed.
completion_date: Optional[datetime]
primary_tag: Optional[TagReference]
parent_task: Optional[TaskReference]
# The estimated number of minutes this task will take to finish, or null if no estimate has been made.
estimated_minutes: Optional[int]
containing_project: Optional[ProjectReference]
is_next: bool
num_available_tasks: int
num_completed_tasks: int
num_tasks: int
# Returns true if the task is a direct child of the inbox, but not if the task is contained by another task that is in the inbox.
is_in_inbox: bool
is_sequential: bool
# The flagged status of the task.
is_flagged: bool
# True if the task has been marked completed.
# Note that a task may be effectively considered completed if a containing task is marked completed.
is_completed: bool
is_dropped: bool
is_blocked: bool
# If set, the Task will be automatically marked completed when its last child Task is marked completed.
is_completed_by_children: bool
is_effectively_dropped: bool
is_effectively_completed: bool
should_use_floating_timezone: bool
#repetition = "<null>"
# The object holding the repetition properties for this task, or null if it is not repeating. See related documentation.
#repetitionRule = "<null>";
"""Missing properties.
This list comes from the OmniFocus Task documentation.
They are properties of the task that are not returned by the ``properties`` method.
after (Task.ChildInsertionLocation r/o) • A positional indicator that reference the list posotion directly following this task instance.
assignedContainer (Project, Task, or Inbox or null) • For tasks in the inbox, the tentatively assigned project or parent task, which will be applied on cleanup.
attachments (Array of FileWrapper) • An array of FileWrapper objects representing the attachments associated with the task. See related documentation.
before (Task.ChildInsertionLocation r/o) • A positional indicator that references the list posotion immediately preceding this task instance.
beginning (Task.ChildInsertionLocation r/o) • A positional indicator that references the very start of the task’s container object.
children (Array of Task r/o) • Returns all the child tasks of this task, sorted by library order.
containingProject (Project or null r/o) • The Project that this Task is contained in, either as the root of the project or indirectly from a parent task. If this task is in the inbox, then this will be null.
effectiveCompletedDate (Date or null r/o) • (v3.8) Returns the computed effective completion date for the Task, based on its local completionDate and those of its containers.
effectiveDropDate (Date or null r/o) • (v3.8) Returns the computed effective drop date for the Task, based on its local dropDate and those of its containers.
effectiveFlagged (Boolean r/o) • Returns the computed effective flagged status for the Task, based on its local flagged and those of its containers.
ending (Task.ChildInsertionLocation r/o) • A positional indicator that references the position at the very end of the task’s container object.
flattenedChildren (TaskArray r/o) • An alias for flattenedTasks.
flattenedTasks (TaskArray r/o) • Returns a flat array of all tasks contained within this task. Tasks are sorted by their order in the database. This flat array is often used for processing the entire task hierarchy of a specific task.
hasChildren (Boolean r/o) • Returns true if this task has children, more efficiently than checking if children is empty.
linkedFileURLs (Array of URL r/o) • The list of file URLs linked to this task. The files at these URLs are not present in the database, rather the database holds bookmarks leading to these files. These links can be read on iOS, but not written to.
note (String) • The note of the task.
notifications (Array of Task.Notification r/o) • An array of the notifications that are active for this task. (see related documentation)
project (Project or null r/o) • The Project that this Task is the root task of, or null if this task is in the inbox or contained by another task.
sequential (Boolean) • If true, then children of this task form a dependency chain. For example, the first task blocks the second one until the first is completed.
shouldUseFloatingTimeZone (Boolean) • (v3.6) When set, the dueDate and deferDate properties will use floating time zones. (Note: if a Task has no due or defer dates assigned, this property will revert to the database’s default setting.)
tags (TagArray r/o) • Returns the Tags associated with this Task.
taskStatus (Task.Status r/o) • Returns the current status of the task.
tasks (TaskArray r/o) • Returns all the tasks contained directly in this task, sorted by their library order.
"""
@classmethod
def from_omnifocus_task(cls, task) -> Task:
properties = task.properties()
try:
return cls(
id=properties[k.id],
name=properties[k.name],
creation_date=properties[k.creation_date],
modification_date=properties[k.modification_date],
due_date=resolve_missing_value(properties[k.due_date]),
effective_due_date=resolve_missing_value(properties[k.effective_due_date]),
next_due_date=resolve_missing_value(properties[k.next_due_date]),
defer_date=resolve_missing_value(properties[k.defer_date]),
effective_defer_date=resolve_missing_value(properties[k.effective_defer_date]),
next_defer_date=resolve_missing_value(properties[k.next_defer_date]),
dropped_date=resolve_missing_value(properties[k.dropped_date]),
completion_date=resolve_missing_value(properties[k.completion_date]),
primary_tag=optional(TagReference.from_reference, resolve_missing_value(properties[k.primary_tag])),
parent_task=optional(TaskReference.from_reference, resolve_missing_value(properties[k.parent_task])),
estimated_minutes=resolve_missing_value(properties[k.estimated_minutes]),
containing_project=optional(ProjectReference.from_reference, resolve_missing_value(properties[k.containing_project])),
is_next=properties[k.next_],
num_available_tasks=properties[k.number_of_available_tasks],
num_completed_tasks=properties[k.number_of_completed_tasks],
num_tasks=properties[k.number_of_tasks],
is_in_inbox=properties[k.in_inbox],
is_sequential=properties[k.sequential],
is_flagged=properties[k.flagged],
is_completed=properties[k.completed],
is_dropped=properties[k.dropped],
is_blocked=properties[k.blocked],
is_completed_by_children=properties[k.completed_by_children],
is_effectively_dropped=properties[k.effectively_dropped],
is_effectively_completed=properties[k.effectively_completed],
should_use_floating_timezone=properties[k.should_use_floating_time_zone],
)
except KeyError:
pprint(properties)
raise
@dataclass(frozen=True)
class TaskReference:
id: str
name: str
@classmethod
def from_reference(cls, task_reference) -> TaskReference:
return cls(id=task_reference.id(), name=task_reference.name())
def load_tasks(omni_database: Any) -> Iterable[Task]:
for task in omni_database.flattened_tasks():
yield Task.from_omnifocus_task(task)
```
#### File: src/omnimetrics/_script.py
```python
import json
import tempfile
from dataclasses import asdict
from datetime import datetime
from pathlib import Path
from typing import IO
import click
from google.cloud import bigquery, storage
from omnimetrics._database import OMNIFOCUS, load_tasks
@click.group()
def omnimetrics():
"""Top-level omnimetrics command."""
@omnimetrics.command()
@click.argument("output", type=click.File("w"))
def dump_file(output: IO[str]) -> None:
_dump_omnifocus(output)
@omnimetrics.command()
@click.option("--filename", default="omnifocus-%Y%m%d-%H%M%S.json", type=str)
@click.argument("directory", type=click.Path(file_okay=False, dir_okay=True, exists=True))
def dump(filename: str, directory: str) -> None:
now = datetime.now()
filename = now.strftime(filename)
path = Path(directory).joinpath(filename)
with path.open("w") as output:
_dump_omnifocus(output)
def _dump_omnifocus(output: IO[str]) -> None:
for task in load_tasks(OMNIFOCUS.default_document):
task_dict = asdict(task)
output.write(json.dumps(task_dict, default=jsonify))
output.write("\n")
@omnimetrics.command()
@click.option("--gcs-bucket-prefix", type=str, default="")
@click.option("--filename", default="omnifocus-%Y%m%d-%H%M%S.json", type=str)
@click.argument("gcs-bucket", type=str)
@click.argument("destination-table", type=str)
def run_pipeline(gcs_bucket_prefix: str, filename: str, gcs_bucket: str, destination_table: str) -> None:
"""Extract data from Omnifocus and load it to GCS and BigQuery.
The BigQuery destination table needs to exist, and needs to be partitioned.
"""
now = datetime.now()
with tempfile.NamedTemporaryFile("w") as temp_file:
# Extract Omnifocus data to a file
_dump_omnifocus(temp_file)
# Load it to GCS
storage_client = storage.Client()
bucket = storage_client.bucket(gcs_bucket)
filename = now.strftime(filename)
gcs_path = str(Path(gcs_bucket_prefix) / Path(filename))
blob = bucket.blob(gcs_path)
blob.upload_from_filename(temp_file.name)
# TODO: Create the table if it doesn't exist.
_load_to_bigquery(gcs_bucket, gcs_path, f"{destination_table}${now.strftime('%Y%m%d')}")
def _load_to_bigquery(gcs_bucket: str, gcs_path: str, destination_table: str) -> None:
gcs_url = f"gs://{gcs_bucket}/{gcs_path}"
bigquery_client = bigquery.Client()
job = bigquery_client.load_table_from_uri(
[gcs_url],
destination_table,
job_config=bigquery.LoadJobConfig(
autodetect=True,
source_format=bigquery.SourceFormat.NEWLINE_DELIMITED_JSON,
schema_update_options=[
bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION,
bigquery.SchemaUpdateOption.ALLOW_FIELD_RELAXATION,
],
write_disposition=bigquery.WriteDisposition.WRITE_TRUNCATE
)
)
job.result()
def jsonify(o):
if isinstance(o, datetime):
return o.isoformat()
return o
"""
Here's what we need to do.
- [x] use default_document to get the whole database
- [ ] use `flattened` versions to get at the actual contents
- [x] `flattened_tasks`
- [ ] `flattened_folders`
- [ ] `flattened_tags`
- [ ] `flattened_projects`
- build types for each of those flattened versions
- [x] https://omni-automation.com/omnifocus/OF-API.html#Task
- [ ] folder
- [ ] tag
- [ ] project
- serialisers for each of these
- [x] JSON
- [ ] Avro? https://avro.apache.org/docs/1.10.0/gettingstartedpython.html
- [x] Figure out how to use launchd to run a script daily
- [x] https://www.launchd.info/
- [x] Put a plist file in ~/Library/LaunchAgents
- [ ] Create a Terraform module that:
- [ ] creates a GCP project
- [x] creates a GCS bucket
- [ ] creates a BigQuery dataset
- [ ] Create a script that uploads the serialised, flattened things to GCS
- [ ] Create another script that loads those flattened things to BigQuery
- [ ] Build a dbt module that transforms the raw data into something useful
- [ ] Build a DataStudio dashboard based on that data.
Strategy
- don't worry about serialising *everything* to start with
- also don't worry about fully correctly exporting any given type
- focus on getting all the pieces working end-to-end, such that we're exporting *something* from omnifocus to bigquery
"""
``` |
{
"source": "jmlon/trafficgenerator",
"score": 3
} |
#### File: jmlon/trafficgenerator/parser.py
```python
import logging
# logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("scene_parser")
logger.setLevel(logging.INFO)
scenario = { 'flows': [] }
def parse_scenario(filename):
with open(filename, 'rt') as fh:
for line in fh:
tokens = line.strip().split()
# print(tokens)
if len(tokens)>0 and not tokens[0].startswith('#'):
first = tokens.pop(0)
# print(f'||{l}||')
if first.startswith("type:"):
set_type(tokens)
elif first.startswith("repeat:"):
set_repeat(tokens)
elif first.startswith("on:"):
set_on(tokens)
elif first.startswith("off:"):
set_off(tokens)
elif first.startswith("end"):
set_end(tokens)
return scenario
def set_type(t):
scenario['type'] = t.pop(0)
def set_repeat(t):
scenario['repeat'] = int(t.pop(0))
def set_off(t):
if t.pop(0).startswith('time:'):
offtime = float(t.pop(0))
scenario['flows'].append({ 'on': False, 'offtime':offtime })
def set_end(t):
pass
def set_on(t):
quantity = on_quantity(t)
interval = on_interval(t)
size = on_size(t)
destination = on_destination(t)
flow = { 'on':True, 'quantity':quantity, 'interval':interval, 'size':size, 'dest':destination }
scenario['flows'].append(flow)
def on_quantity(t):
token = t.pop(0)
qty = {} # { 'mode':None, 'secs':None, 'packets':None }
if token=="time:":
qty['mode']='time'
qty['secs']=float(t.pop(0))
elif token=="packet:":
qty['mode']='packet'
qty['packets']=int(t.pop(0))
else:
logger.error('Invalid quantity')
logger.debug(qty)
return qty
def on_interval(t):
token = t.pop(0)
interval = {}
if token=='greedy':
interval['mode']='greedy'
elif token=='const':
interval['mode']='const'
interval['time']=float(t.pop(0))
elif token=='uniform':
interval['mode']='uniform'
interval['min'] = float(t.pop(0))
interval['max'] = float(t.pop(0))
elif token=='exponential':
interval['mode']= 'exponential'
interval['mean']= float(t.pop(0))
interval['min'] = float(t.pop(0))
interval['max'] = float(t.pop(0))
else:
logger.error('Invalid interval')
logger.debug(interval)
return interval
def on_size(t):
token = t.pop(0)
assert(token=='length:')
token = t.pop(0)
size = {}
if token=='const':
size['mode']='const'
size['packet_size']=int(t.pop(0))
elif token=='uniform':
size['mode']='uniform'
size['min'] = int(t.pop(0))
size['max'] = int(t.pop(0))
elif token=='exponential':
size['mode']= 'exponential'
size['mean']= float(t.pop(0))
size['min'] = int(t.pop(0))
size['max'] = int(t.pop(0))
else:
logger.error('Invalid size')
logger.debug(size)
return size
def on_destination(t):
token = t.pop(0)
dst = {}
if token=='dest:':
dst['addr']=t.pop(0)
dst['port']=int(t.pop(0))
dst['mac']=t.pop(0)
else:
logger.error('Invalid destination')
return dst
if __name__ == "__main__":
parse_scenario('flow-10.0.0.2') #('sample_scenery')
print(scenario)
``` |
{
"source": "jmlopez-rod/assist",
"score": 3
} |
#### File: assist/command/install.py
```python
import sys
import site
import textwrap
import os.path as pth
DESC = """
Create the .assistrc file and source it in your .bash_profile or
.bashrc file.
"""
def add_parser(subp, raw):
"""Add a parser to the main subparser. """
subp.add_parser('install', help='install assist',
formatter_class=raw,
description=textwrap.dedent(DESC))
def disp(msg):
"Print message to stdout. "
sys.stdout.write(msg)
def source_assistrc():
"""Source the .assistrc file in the .bashrc file. """
bash_map = {
'Darwin': '.bash_profile',
'darwin': '.bash_profile',
'Linux': '.bashrc',
'linux': '.bashrc',
}
platform = sys.platform
try:
bashrc_path = pth.expandvars('$HOME/%s' % bash_map[platform])
except KeyError:
disp('Error: %s platform not supported\n' % platform)
exit(2)
disp('checking %s ... ' % bashrc_path)
if pth.exists(bashrc_path):
expr = [
'source ~/.assistrc\n',
'source $HOME/.assistrc\n',
pth.expandvars('source $HOME/.assistrc\n'),
]
for content_line in open(bashrc_path, 'r'):
for line in expr:
if line == content_line:
disp('ok\n')
return
with open(bashrc_path, 'a') as content_file:
disp('\n including .assistrc\n')
content_file.write('source ~/.assistrc\n')
def append_variable(cfile, var, val):
"""Writes a bash command to check if a variable is missing a
value."""
cfile.write('if [[ ":$%s:" != *":%s:"* ]]; then\n' % (var, val))
cfile.write(' export %s=%s:$%s\n' % (var, val, var))
cfile.write('fi\n')
def assistrc():
"""Create the assistrc file. """
rc_path = pth.expandvars('$HOME/.assistrc')
disp('writing %s ... ' % rc_path)
with open(rc_path, 'w') as rcfile:
disp('\n system wide path: %s/bin\n' % sys.prefix)
append_variable(rcfile, 'PATH', '%s/bin' % sys.prefix)
disp(' user path: %s/bin\n' % site.getuserbase())
append_variable(rcfile, 'PATH', '%s/bin' % site.getuserbase())
path = pth.abspath(pth.dirname(__file__)+'/../include')
disp(' include: %s\n' % path)
append_variable(rcfile, 'C_INCLUDE_PATH', path)
append_variable(rcfile, 'CPLUS_INCLUDE_PATH', path)
def run(_):
"""Run the command. """
source_assistrc()
assistrc()
``` |
{
"source": "jmlopez-rod/pyjoint",
"score": 3
} |
#### File: pyjoint/pyjoint/__main__.py
```python
import argparse
import textwrap
from pyjoint.__version__ import VERSION
def parse_options():
"""Interpret the command line inputs and options. """
desc = """
pyjoint computes the miter and tilt angles of polyhedron faces.
"""
ver = "pyjoint %s" % VERSION
epi = """
more info:
http://jmlopez-rod.github.com/pyjoint
version:
pyjoint %s
""" % VERSION
raw = argparse.RawDescriptionHelpFormatter
argp = argparse.ArgumentParser(formatter_class=raw, version=ver,
description=textwrap.dedent(desc),
epilog=textwrap.dedent(epi))
argp.add_argument('inputfile', type=str,
help='input file to process')
return argp.parse_args()
def run():
"""Run pyjoint from the command line. """
arg = parse_options()
print arg
if __name__ == '__main__':
run()
```
#### File: pyjoint/pyjoint/__version__.py
```python
VERSION_INFO = (0, 0, 0, 'alpha', 0)
def get_version(version_info):
"""Return a PEP-386 compliant version number from version_info."""
assert len(version_info) == 5
assert version_info[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if version_info[2] == 0 else 3
main = '.'.join([str(part) for part in version_info[:parts]])
sub = ''
if version_info[3] == 'alpha' and version_info[4] == 0:
sub = '.dev'
elif version_info[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version_info[3]] + str(version_info[4])
return str(main + sub)
VERSION = get_version(VERSION_INFO)
``` |
{
"source": "JMLourier/PySyft",
"score": 2
} |
#### File: syft/ast/method_test.py
```python
from syft.ast.callable import Callable
from syft.ast.method import Method
def test_method_constructor() -> None:
method = Method()
assert issubclass(type(method), Callable)
```
#### File: lib/torch/device_test.py
```python
import torch as th
# syft absolute
import syft as sy
from syft.core.common.uid import UID
from syft.lib.python import String
def test_device() -> None:
device = th.device("cuda")
assert device.type == "cuda"
assert device.index is None
def test_device_init() -> None:
bob = sy.VirtualMachine(name="Bob")
client = bob.get_client()
torch = client.torch
type_str = String("cuda:0")
str_pointer = type_str.send(client)
device_pointer = torch.device(str_pointer)
assert type(device_pointer).__name__ == "devicePointer"
assert isinstance(device_pointer.id_at_location, UID)
``` |
{
"source": "jmloveyj/autoSweep",
"score": 3
} |
#### File: jmloveyj/autoSweep/mkMap.py
```python
import numpy as np
import cv2
from matplotlib import pyplot as plt
currentWindowId = 0
def prepareWindow(img, title, ifGray = False):
global currentWindowId
currentWindowId = currentWindowId +1
plt.subplot(3,3,currentWindowId)
plt.title(title)
plt.xticks([])
plt.yticks([])
if(ifGray):
plt.imshow(img,'gray')
else:
plt.imshow(img)
img = cv2.imread('room2.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
bg = thresh
prepareWindow(bg,"original", True)
kernel = np.ones((3,3),np.uint8)
#去噪音
bigObstacle = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE,kernel, iterations=2)
prepareWindow(bigObstacle,"real map", True)
cv2.imwrite('multiRoom.pgm',bigObstacle)
greyBigObstacle = bigObstacle
greyBigObstacle[bigObstacle<256] = 220
prepareWindow(greyBigObstacle,"graph",True)
cv2.imwrite('room_grey.pgm',greyBigObstacle)
plt.show()
``` |
{
"source": "jmlowe/amieclient",
"score": 3
} |
#### File: amieclient/packet/user.py
```python
from .base import Packet, PacketInvalidData
class NotifyUserModify(Packet):
_packet_type = 'notify_account_inactivate'
_expected_reply = [{'type': 'inform_transaction_complete', 'timeout': 30240}]
_data_keys_required = [
'ActionType',
'PersonID',
]
_data_keys_not_required_in_reply = []
_data_keys_allowed = [
'AcademicDegree',
'BusinessPhoneComment',
'BusinessPhoneExtension',
'BusinessPhoneNumber',
'City',
'Country',
'Department',
'DnList',
'Email',
'Fax',
'FirstName',
'HomePhoneComment',
'HomePhoneExtension',
'HomePhoneNumber',
'LastName',
'MiddleName',
'Organization',
'OrgCode',
'State'
]
def validate_data(self, raise_on_invalid=False):
action_type = self.ActionType
if action_type not in ['add', 'delete', 'replace']:
if raise_on_invalid:
error_str = 'Invalid action type for notify_user_modify: "{}"'.format(action_type)
raise PacketInvalidData(error_str)
else:
return False
return super().validate_data(raise_on_invalid)
class RequestUserModify(Packet):
_packet_type = 'request_user_modify'
_expected_reply = [{'type': 'inform_transaction_complete', 'timeout': 30240}]
_data_keys_required = [
'ActionType',
'PersonID',
]
_data_keys_not_required_in_reply = []
_data_keys_allowed = [
'AcademicDegree',
'BusinessPhoneComment',
'BusinessPhoneExtension',
'BusinessPhoneNumber',
'CitizenshipList',
'CitizenshipList',
'City',
'Country',
'Department',
'DnList',
'Email',
'Fax',
'FirstName',
'HomePhoneComment',
'HomePhoneExtension',
'HomePhoneNumber',
'LastName',
'MiddleName',
'NsfStatusCode',
'Organization',
'OrgCode',
'State',
'StreetAddress',
'StreetAddress2',
'Title',
'Zip',
]
def validate_data(self, raise_on_invalid=False):
action_type = self.ActionType
if action_type not in ['add', 'delete', 'replace']:
if raise_on_invalid:
error_str = 'Invalid action type for request_user_modify: "{}"'.format(action_type)
raise PacketInvalidData(error_str)
else:
return False
return super().validate_data(raise_on_invalid)
``` |
{
"source": "jmloyola/early-classification",
"score": 2
} |
#### File: early-classification/src/early_text_classifier.py
```python
import glob
from preprocess_dataset import PreprocessDataset
from context_information import ContextInformation
from partial_information_classifier import PartialInformationClassifier
from decision_classifier import DecisionClassifier
import numpy as np
from sklearn.metrics import recall_score, accuracy_score, f1_score, precision_score, confusion_matrix,\
classification_report
import pprint as pp
import pickle
import os
class EarlyTextClassifier:
def __init__(self, etc_kwargs, preprocess_kwargs, cpi_kwargs, context_kwargs, dmc_kwargs, unique_labels=None,
dictionary=None, verbose=True):
self.dataset_path = etc_kwargs['dataset_path']
self.dataset_name = etc_kwargs['dataset_name']
self.initial_step = etc_kwargs['initial_step']
self.step_size = etc_kwargs['step_size']
self.preprocess_kwargs = preprocess_kwargs
self.cpi_kwargs = cpi_kwargs
self.context_kwargs = context_kwargs
self.dmc_kwargs = dmc_kwargs
self.cpi_kwargs['initial_step'] = self.initial_step
self.cpi_kwargs['step_size'] = self.step_size
self.context_kwargs['initial_step'] = self.initial_step
self.context_kwargs['step_size'] = self.step_size
self.dictionary = dictionary
self.ci = None
self.cpi = None
self.dmc = None
self.unique_labels = unique_labels
self.is_loaded = False
self.verbose = verbose
self.load_model()
def verboseprint(self, *args, **kwargs):
if self.verbose:
print(*args, **kwargs)
def has_same_parameters(self, model):
if (self.dataset_name == model.dataset_name) and \
(self.initial_step == model.initial_step) and \
(self.step_size == model.step_size) and \
(self.preprocess_kwargs == model.preprocess_kwargs) and \
(self.cpi_kwargs['train_dataset_percentage'] == model.cpi_kwargs['train_dataset_percentage']) and \
(self.cpi_kwargs['test_dataset_percentage'] == model.cpi_kwargs['test_dataset_percentage']) and \
(self.cpi_kwargs['doc_rep'] == model.cpi_kwargs['doc_rep']) and \
(self.cpi_kwargs['cpi_clf'].get_params() == model.cpi_kwargs['cpi_clf'].get_params()) and \
(self.context_kwargs == model.context_kwargs) and \
(self.dmc_kwargs['train_dataset_percentage'] == model.dmc_kwargs['train_dataset_percentage']) and \
(self.dmc_kwargs['test_dataset_percentage'] == model.dmc_kwargs['test_dataset_percentage']) and \
(self.dmc_kwargs['dmc_clf'].get_params() == model.dmc_kwargs['dmc_clf'].get_params()):
return True
else:
return False
def copy_attributes(self, model):
self.ci = model.ci
self.cpi = model.cpi
self.dmc = model.dmc
def load_model(self):
possible_files = glob.glob(f'models/{self.dataset_name}/*.pickle')
for file in possible_files:
with open(file, 'rb') as f:
loaded_model = pickle.load(f)
if self.has_same_parameters(loaded_model):
self.verboseprint('Model already trained. Loading it.')
self.copy_attributes(loaded_model)
self.is_loaded = True
def print_params_information(self):
print("Dataset name: {}".format(self.dataset_name))
print("Dataset path: {}".format(self.dataset_path))
print('-'*80)
print('Pre-process params:')
pp.pprint(self.preprocess_kwargs)
print('-' * 80)
print('CPI params:')
pp.pprint(self.cpi_kwargs)
print('-' * 80)
print('Context Information params:')
pp.pprint(self.context_kwargs)
print('-' * 80)
print('DMC params:')
pp.pprint(self.dmc_kwargs)
print('-' * 80)
def preprocess_dataset(self):
self.verboseprint('Pre-processing dataset')
# Search dataset for the dataset, both training and test sets.
train_path = glob.glob(f'{self.dataset_path}/**/{self.dataset_name}_train.txt', recursive=True)[0]
test_path = glob.glob(f'{self.dataset_path}/**/{self.dataset_name}_test.txt', recursive=True)[0]
prep_data = PreprocessDataset(self.preprocess_kwargs, self.verbose)
self.dictionary = prep_data.build_dict(train_path)
Xtrain = prep_data.transform_into_numeric_array(train_path)
ytrain, self.unique_labels = prep_data.get_labels(train_path)
Xtest = prep_data.transform_into_numeric_array(test_path)
ytest, _ = prep_data.get_labels(test_path)
self.verboseprint(f'Xtrain.shape: {Xtrain.shape}')
self.verboseprint(f'ytrain.shape: {ytrain.shape}')
self.verboseprint(f'Xtest.shape: {Xtest.shape}')
self.verboseprint(f'ytest.shape: {ytest.shape}')
return Xtrain, ytrain, Xtest, ytest
def fit(self, Xtrain, ytrain):
if not self.is_loaded:
self.verboseprint('Training EarlyTextClassifier model')
self.ci = ContextInformation(self.context_kwargs, self.dictionary, self.verbose)
self.ci.get_training_information(Xtrain, ytrain)
self.cpi = PartialInformationClassifier(self.cpi_kwargs, self.dictionary, self.verbose)
cpi_Xtrain, cpi_ytrain, cpi_Xtest, cpi_ytest = self.cpi.split_dataset(Xtrain, ytrain)
self.verboseprint(f'cpi_Xtrain.shape: {cpi_Xtrain.shape}')
self.verboseprint(f'cpi_ytrain.shape: {cpi_ytrain.shape}')
self.verboseprint(f'cpi_Xtest.shape: {cpi_Xtest.shape}')
self.verboseprint(f'cpi_ytest.shape: {cpi_ytest.shape}')
self.cpi.fit(cpi_Xtrain, cpi_ytrain)
cpi_predictions, cpi_percentages = self.cpi.predict(cpi_Xtest)
dmc_X, dmc_y = self.ci.generate_dmc_dataset(cpi_Xtest, cpi_ytest, cpi_predictions)
self.dmc = DecisionClassifier(self.dmc_kwargs, self.verbose)
dmc_Xtrain, dmc_ytrain, dmc_Xtest, dmc_ytest = self.dmc.split_dataset(dmc_X, dmc_y)
self.verboseprint(f'dmc_Xtrain.shape: {dmc_Xtrain.shape}')
self.verboseprint(f'dmc_ytrain.shape: {dmc_ytrain.shape}')
self.verboseprint(f'dmc_Xtest.shape: {dmc_Xtest.shape}')
self.verboseprint(f'dmc_ytest.shape: {dmc_ytest.shape}')
self.dmc.fit(dmc_Xtrain, dmc_ytrain)
dmc_prediction, _ = self.dmc.predict(dmc_Xtest)
else:
self.verboseprint('EarlyTextClassifier model already trained')
def predict(self, Xtest, ytest):
self.verboseprint('Predicting with the EarlyTextClassifier model')
cpi_predictions, cpi_percentages = self.cpi.predict(Xtest)
dmc_X, dmc_y = self.ci.generate_dmc_dataset(Xtest, ytest, cpi_predictions)
dmc_prediction, prediction_time = self.dmc.predict(dmc_X)
return cpi_percentages, cpi_predictions, dmc_prediction, prediction_time, dmc_y
def time_penalization(self, k, penalization_type, time_threshold):
if penalization_type == 'Losada-Crestani':
return 1.0 - ((1.0 + np.exp(k - time_threshold))**(-1))
return 0.0
def score(self, y_true, cpi_prediction, cpi_percentages, prediction_time, penalization_type, time_threshold, costs,
print_ouput=True):
y_pred = []
k = []
num_docs = len(y_true)
for i in range(num_docs):
t = prediction_time[i]
p = cpi_percentages[prediction_time[i]]
y_pred.append(cpi_prediction[t, i])
k.append(p)
y_pred = np.array(y_pred)
k = np.array(k)
error_score = np.zeros(num_docs)
if len(self.unique_labels) > 2:
for idx in range(num_docs):
if y_true[idx] == y_pred[idx]:
error_score[idx] = self.time_penalization(k[idx], penalization_type, time_threshold) * costs['c_tp']
else:
error_score[idx] = costs['c_fn'] + np.sum(y_true == y_true[idx]) / num_docs
else:
for idx in range(num_docs):
if (y_true[idx] == 1) and (y_pred[idx] == 1):
error_score[idx] = self.time_penalization(k[idx], penalization_type, time_threshold) * costs['c_tp']
elif (y_true[idx] == 1) and (y_pred[idx] == 0):
error_score[idx] = costs['c_fn']
elif (y_true[idx] == 0) and (y_pred[idx] == 1):
if costs['c_fp'] == 'proportion_positive_cases':
# TODO: document this case.
error_score[idx] = np.sum(y_true == 1) / num_docs
else:
error_score[idx] = costs['c_fp']
elif (y_true[idx] == 0) and (y_pred[idx] == 0):
error_score[idx] = 0
precision_etc = precision_score(y_true, y_pred, average='macro')
recall_etc = recall_score(y_true, y_pred, average='macro')
f1_etc = f1_score(y_true, y_pred, average='macro')
accuracy_etc = accuracy_score(y_true, y_pred)
ede_score = error_score.mean()
confusion_matrix_etc = confusion_matrix(y_true, y_pred)
if print_ouput:
print(f'{"Score ETC":^50}')
print('-'*50)
print(f'{"Precision average=macro:":>25} {precision_etc:.3}')
print(f'{"Recall average=macro:":>25} {recall_etc:.3}')
print(f'{"F1 Measure average=macro:":>25} {f1_etc:.3}')
print(f'{"Accuracy:":>25} {accuracy_etc:.3}')
print(f'{"EDE o=":>21}{time_threshold:<3}: {ede_score:.3}')
print('-' * 50)
print(classification_report(y_true, y_pred, target_names=self.unique_labels))
# The reported averages are a prevalence-weighted macro-average across classes (equivalent to
# precision_recall_fscore_support with average='weighted').
# 'weighted': Calculate metrics for each label, and find their average, weighted by support (the number of
# true instances for each label). This alters ‘macro’ to account for label imbalance; it can result in an
# F-score that is not between precision and recall.
print('-' * 50)
print('Confusion matrix:')
pp.pprint(confusion_matrix_etc)
return precision_etc, recall_etc, f1_etc, accuracy_etc, ede_score
def save_model(self):
if not self.is_loaded:
self.verboseprint('Saving model')
existing_files = glob.glob(f'models/{self.dataset_name}/*.pickle')
existing_file_names = [int(os.path.splitext(os.path.basename(x))[0]) for x in existing_files]
max_file_name = max(existing_file_names) if existing_files != [] else 0
file_path = f'models/{self.dataset_name}/{max_file_name + 1}.pickle'
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'wb') as fp:
pickle.dump(self, fp)
else:
self.verboseprint('Model already in disk')
```
#### File: early-classification/src/preprocess_dataset.py
```python
import numpy as np
class PreprocessDataset:
def __init__(self, preprocess_kwargs, verbose=True):
self.min_word_length = preprocess_kwargs['min_word_length']
self.max_number_words = preprocess_kwargs['max_number_words']
self.representation = 'word_tf'
self.dictionary = None
self.unique_labels = None
self.verbose = verbose
def verboseprint(self, *args, **kwargs):
if self.verbose:
print(*args, **kwargs)
def read_raw_dataset(self, path):
""""
Read raw dataset and returns a list of tuples (label, document) for every document in the dataset.
Inputs:
- path: path to the file of the raw dataset
Output: List of tuples (label, document)
"""
# Store the dataset as a list of tuples (label, document).
dataset = []
i = 0
with open(path, 'r') as f:
for line in f:
try:
label, document = line.split(maxsplit=1)
except:
print(
'Error while reading dataset: {}. The {}º line does not follow the form \"label\tdocument\"'.format(
path, i))
continue
# Remove new line character from document.
document = document[:-1]
dataset.append((label, document))
i = i + 1
return dataset
def build_dict(self, path):
"""
Returns a dictionary with the words of the train dataset.
The dictionary contains the most relevant words with an index indicating its position in the list of number of times
each word appears.
It should be noted that the value zero of the index is reserved for the words UNKOWN.
Inputs:
- path: path to the file containing the raw dataset.
- min_word_length: minimum number of characters that every word in the new dictionary must have.
- max_number_words: maximum number of words for the dictionary. Use 'all' to consider all the terms in training.
- representation: document representation ['word_tf', 'word_tf_idf', 'character_3_gram_tf',
'character_3_gram_tf_idf', 'word_3_gram_tf', 'word_3_gram_tf_idf'].
Output: dictionary containing the most relevant words in the corpus ordered by the amount of times they appear.
"""
dataset = self.read_raw_dataset(path)
documents = [x[1] for x in dataset]
self.verboseprint('Building dictionary')
wordcount = dict()
for ss in documents:
words = ss.strip().lower().split()
for w in words:
if len(w) < self.min_word_length:
continue
if w not in wordcount:
wordcount[w] = 1
else:
wordcount[w] += 1
# Para Python 3 hace falta transformar en lista. Por ejemplo: list(wordcount.values())
counts = list(wordcount.values())
keys = list(wordcount.keys())
self.verboseprint(np.sum(counts), ' total words ', len(keys), ' unique words')
# Hace un slicing del arreglo de counts ordenados.
# En este caso el slicing toma todos los elementos, pero el paso es negativo, indicando que lo procesa en el orden
# inverso.
# numpy.argsort(counts)[::-1] es lo mismo que numpy.argsort(counts)[0:len(counts):-1]
# Assume n is the number of elements in the dimension being sliced. Then, if i is not given it defaults to 0 for
# k > 0 and n - 1 for k < 0 . If j is not given it defaults to n for k > 0 and -n-1 for k < 0 . If k is not given
# it defaults to 1. Note that :: is the same as : and means select all indices along this axis.
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
sorted_idx = np.argsort(counts)[::-1]
worddict = dict()
for idx, ss in enumerate(sorted_idx):
if (self.max_number_words != 'all') and (self.max_number_words <= idx):
self.verboseprint(f'Considering only {self.max_number_words} unique terms plus the UNKOWN token')
break
worddict[keys[ss]] = idx + 1 # leave 0 (UNK)
self.dictionary = worddict
return worddict
def transform_into_numeric_array(self, path):
"""
Given the path to a dataset, this function transform a list of documents with a numpy array of shape (num_docs,
max_length+1), where the words are replaced for the index given by the
dictionary.
Inputs:
- path: path to the file containing the raw dataset.
- dictionary: dictionary with the words that matter.
Output: Numpy array of shape (num_docs, max_length+1) of documents with the words replaced for the index given by
the dictionary.
Note: The index the dictionary gives is the position of the word if we were to arrange them from more to less taking
into account the number of occurrences of every words in the training dataset.
The number 0 is reserved for the UNKOWN token and the number -1 is reserved to indicate the end of the document.
"""
dataset = self.read_raw_dataset(path)
num_docs = len(dataset)
seqs = [None] * num_docs
max_length = 0
for idx, line in enumerate(dataset):
document = line[1]
words = document.strip().lower().split()
seqs[idx] = [self.dictionary[w] if w in self.dictionary else 0 for w in words]
length_doc = len(words)
if max_length < length_doc:
max_length = length_doc
preprocess_dataset = -2 * np.ones((num_docs, max_length + 1), dtype=int)
for idx in range(num_docs):
length_doc = len(seqs[idx])
preprocess_dataset[idx, 0:length_doc] = seqs[idx]
preprocess_dataset[idx, length_doc] = -1
return preprocess_dataset
def get_labels(self, path):
""""
Read raw dataset and returns a tuple (final_labels, unique_labels).
final_labels is a numpy.array of integers containing the label of every document.
unique_labels is list containing every label (without repetition) ordered. Only used for the test set.
Inputs:
- path: path to the file of the raw dataset
- unique_labels: list containing every label (without repetition) ordered.
Output: tuple (final_labels, unique_labels).
- final_labels is a numpy.array of integers containing the label of every document.
- unique_labels is list containing every label (without repetition) ordered.
"""
labels = []
ul = [] if self.unique_labels is None else self.unique_labels
with open(path, 'r') as f:
for idx, line in enumerate(f):
try:
label, _ = line.split(maxsplit=1)
except RuntimeWarning: # Arbitrary exception type
print(
'Error while reading dataset: {}. The {}º line does not follow the form \"label\tdocument\"'.format(
path, idx))
continue
labels.append(label)
if (self.unique_labels is None) and (label not in ul):
ul.append(label)
ul.sort() # Sort list of unique_labels.
num_documents = len(labels)
final_labels = np.empty([num_documents], dtype=int)
for idx, l in enumerate(labels):
final_labels[idx] = ul.index(l)
self.unique_labels = ul
return final_labels, ul
``` |
{
"source": "jml/pyhazard",
"score": 2
} |
#### File: jml/pyhazard/example.py
```python
import random
from pyrsistent import pmap
# TODO: Export from public names.
from hazard._client import (
get_game_info,
get_round_info,
join_game,
play_turn,
register_game,
register_user,
)
from hazard._rules import iter_valid_plays
from hazard._client import _make_credentials
"""
The server running Hazard. https://haverer.jml.io/ in production.
"""
BASE_URL = 'http://localhost:3000'
# TODO: These endpoints ought to be in the library, rather than something that
# users need to know.
USERS_ENDPOINT = BASE_URL + '/users'
GAMES_ENDPOINT = BASE_URL + '/games'
def get_game_endpoint(game):
# TODO: This also should be in the library.
return BASE_URL + game['url']
def get_round_endpoint(round_url):
# TODO: This also should be in the library.
return BASE_URL + round_url
def player_info(round_info, player_id):
for info in round_info['players']:
if info['id'] == player_id:
return info
def get_status(player):
if player['active']:
if player['protected']:
return ' (protected)'
else:
return ''
else:
return ' (eliminated)'
def print_round_info(round_info):
current_player = round_info['currentPlayer']
print 'Players:'
for player in round_info['players']:
status = get_status(player)
if player['id'] == current_player:
current = '* '
else:
current = ' '
print '{}{}{}: {}'.format(
current, player['id'], status, player['discards'])
print
def choose_play(hand, dealt_card, myself, others):
valid_plays = list(iter_valid_plays(hand, dealt_card, myself, others))
try:
return random.choice(valid_plays)
except IndexError:
return None
def play_round(users, round_url):
while True:
# Figure out whose turn it is
round_info = get_round_info(None, round_url)
print_round_info(round_info)
current_player_id = round_info.get('currentPlayer', None)
if not current_player_id:
return round_info['winners']
# Play as that person
current_player = users[current_player_id]
current_player_creds = _make_credentials(current_player)
current_player_view = get_round_info(current_player_creds, round_url)
# Figure out their hand
dealt_card = current_player_view['dealtCard']
hand = player_info(current_player_view, current_player_id)['hand']
others = [
p['id'] for p in round_info['players']
if p['id'] != current_player_id]
# Choose a play at random.
play = choose_play(dealt_card, hand, current_player_id, others)
print 'Playing: {}'.format(play)
response = play_turn(current_player_creds, round_url, play)
print 'Result: {}'.format(response)
def main():
# Register two users, 'foo' and 'bar'.
foo = register_user(USERS_ENDPOINT, 'foo')
foo_creds = _make_credentials(foo)
bar = register_user(USERS_ENDPOINT, 'bar')
bar_creds = _make_credentials(bar)
users = pmap({
foo['id']: foo,
bar['id']: bar,
})
# 'foo' creates a 2-player game
game = register_game(foo_creds, GAMES_ENDPOINT, 2)
game_url = get_game_endpoint(game)
# 'bar' joins the game, and the game begins.
join_game(bar_creds, game_url)
while True:
game = get_game_info(None, game_url)
print 'Game: {}'.format(game)
if game['state'] != 'in-progress':
break
current_round_url = get_round_endpoint(game['currentRound'])
winners = play_round(users, current_round_url)
print 'Round over. Winners: {}'.format(winners)
print 'Game over. Winners: {}'.format(game['winners'])
if __name__ == '__main__':
main()
```
#### File: pyhazard/hazard/_client.py
```python
import json
import requests
from pyrsistent import pmap
def register_user(users_endpoint, username):
data = {'username': username}
result = pmap(requests.post(users_endpoint, data=json.dumps(data)).json())
return result.update(data)
def _make_credentials(registration_data):
return (registration_data['username'], registration_data['password'])
def get_user_info(credentials, user_info_endpoint):
return pmap(
requests.get(
user_info_endpoint, auth=credentials,
headers={'Accept': 'application/json'}
).json())
# TODO: Handle errors
# TODO: Encode routes in functions, rather than requiring full URLs to be
# passed in.
# TODO: Double check that when we play Clown we actually find out what they've
# got.
def register_game(credentials, game_endpoint, num_players, turn_timeout=3600):
data = {'numPlayers': num_players, 'turnTimeout': turn_timeout}
response = requests.post(
game_endpoint, data=json.dumps(data), auth=credentials)
result = response.json()
result['url'] = response.headers['location']
return pmap(result)
def join_game(credentials, game_endpoint):
return pmap(requests.post(game_endpoint, data='', auth=credentials).json())
def get_game_info(credentials, game_endpoint):
return pmap(
requests.get(
game_endpoint, auth=credentials,
headers={'Accept': 'application/json'},
).json())
def get_round_info(credentials, round_endpoint):
return pmap(
requests.get(
round_endpoint, auth=credentials,
headers={'Accept': 'application/json'},
).json())
def play_turn(credentials, round_endpoint, play):
# XXX: Necessary for JSON serialization. Is there a better way?
if play is not None:
play = dict(play.items())
return pmap(
requests.post(
round_endpoint, auth=credentials, data=json.dumps(play)).json())
``` |
{
"source": "jml/quay-admin",
"score": 2
} |
#### File: quay-admin/quayadmin/_impl.py
```python
import argparse
import json
import os
import sys
# We need to make sure we know where our SSL certificates are.
# See https://stackoverflow.com/questions/34358935/python-treq-fails-with-twisted-openssl-error-due-to-empty-trust-store-on-windows
import certifi
os.environ["SSL_CERT_FILE"] = certifi.where()
import attr
import treq
from twisted.internet.defer import gatherResults, inlineCallbacks
QUAY_IO_ENDPOINT = 'https://quay.io/api/v1'
QUAY_TOKEN_ENV_NAME = 'QUAY_TOKEN'
@attr.s(frozen=True)
class Registry(object):
"""A quay.io registry."""
endpoint = attr.ib(default=QUAY_IO_ENDPOINT)
token = attr.ib(default=None)
def _request(self, method, path, headers=None, **kwargs):
url = '%s/%s' % (self.endpoint, path)
headers = headers if headers else {}
if self.token:
headers['Authorization'] = 'Bearer %s' % (self.token,)
return treq.request(method, url, headers=headers, **kwargs).addCallback(treq.json_content)
@inlineCallbacks
def list_repositories(self, namespace):
"""List all the repositories in a given namespace.
Ignores pagination completely.
"""
repos = yield self._request(
'GET', 'repository', params={'namespace': namespace})
return [Repository(**repo) for repo in repos['repositories']]
@inlineCallbacks
def get_user_permissions(self, repo_spec):
"""Get the user permissions for a repository."""
path = 'repository/%s/permissions/user/' % (repo_spec,)
perms = yield self._request('GET', path)
return map(UserPermission.from_dict, perms['permissions'].values())
@inlineCallbacks
def get_team_permissions(self, repo_spec):
path = 'repository/%s/permissions/team/' % (repo_spec,)
perms = yield self._request('GET', path)
return map(TeamPermission.from_dict, perms['permissions'].values())
@attr.s(frozen=True, cmp=True)
class Repository(object):
"""A quay.io repository."""
namespace = attr.ib()
name = attr.ib()
kind = attr.ib()
is_starred = attr.ib()
is_public = attr.ib()
description = attr.ib()
@property
def spec(self):
return '%s/%s' % (self.namespace, self.name)
@classmethod
def from_dict(cls, data):
return cls(**data)
@inlineCallbacks
def get_repository_permissions(repository, registry):
"""Get the user and team permissions for a repository.
Returns a Deferred RepositoryPermissions.
"""
[user_perms, team_perms] = yield gatherResults([
registry.get_user_permissions(repository.spec),
registry.get_team_permissions(repository.spec),
])
return RepositoryPermissions(
repository=repository,
user_permissions=user_perms,
team_permissions=team_perms,
)
@attr.s(frozen=True)
class Avatar(object):
color = attr.ib()
hash = attr.ib()
kind = attr.ib()
name = attr.ib()
@attr.s(frozen=True)
class Permission(object):
"""Base permission class."""
avatar = attr.ib()
name = attr.ib()
role = attr.ib()
@classmethod
def from_dict(cls, data):
avatar_data = data.pop('avatar')
avatar = Avatar(**avatar_data)
return cls(avatar=avatar, **data)
@attr.s(frozen=True)
class UserPermission(Permission):
"""A permission a user has."""
is_org_member = attr.ib()
is_robot = attr.ib()
@attr.s(frozen=True)
class TeamPermission(Permission):
"""A permission a team has."""
@attr.s(frozen=True)
class RepositoryPermissions(object):
"""The permissions for a repository."""
repository = attr.ib()
user_permissions = attr.ib()
team_permissions = attr.ib()
@classmethod
def from_dict(cls, data):
return cls(
repository=Repository.from_dict(data['repository']),
user_permissions=map(UserPermission.from_dict, data['user_permissions']),
team_permissions=map(TeamPermission.from_dict, data['team_permissions']),
)
@attr.s(frozen=True)
class AllRepositoryPermissions(object):
_repository_permissions = attr.ib()
@classmethod
@inlineCallbacks
def from_registry(cls, registry, namespace):
repos = yield registry.list_repositories(namespace)
perms = yield map_concurrently(get_repository_permissions, repos, registry)
return cls(perms)
@classmethod
def from_json_file(cls, state_file_path):
with open(state_file_path, 'r') as state_file:
raw_perms = json.load(state_file)
return cls([RepositoryPermissions.from_dict(perm) for perm in raw_perms])
def to_json_file(self, state_file_path):
with open(state_file_path, 'w') as state_file:
json.dump([attr.asdict(perm) for perm in self._repository_permissions], state_file)
def find_repos_with_external_users(self):
repos = {}
for perm in self._repository_permissions:
external_users = [user for user in perm.user_permissions
if not user.is_org_member]
if external_users:
repos[perm.repository] = external_users
return repos
def map_concurrently(f, xs, *args, **kwargs):
"""Run 'f' concurrently over each 'x' in 'xs'.
Also passes through '*args' and '**kwargs'.
Assumes 'f' returns Deferred values.
"""
deferreds = [f(x, *args, **kwargs) for x in xs]
return gatherResults(deferreds)
def make_argument_parser():
parser = argparse.ArgumentParser(description='Show information about quay.io permissions')
parser.add_argument('namespace', type=str, help='Namespace to look in')
parser.add_argument(
'--from-state', type=str,
help='If provided, get quay.io state from a file, rather than an API')
parser.add_argument(
'--api-root', type=str,
default=QUAY_IO_ENDPOINT,
help='Root of quay.io API. Ignored if --from-state provided.')
parser.add_argument(
'--dump-state', type=str,
help='If provided, dump state to a file. Will overwrite file if it exists.')
return parser
@inlineCallbacks
def main(reactor, *args):
parser = make_argument_parser()
config = parser.parse_args(args)
if config.from_state:
perms = AllRepositoryPermissions.from_json_file(config.from_state)
else:
quay_token = os.environ.get(QUAY_TOKEN_ENV_NAME, None)
registry = Registry(endpoint=config.api_root, token=quay_token)
perms = yield AllRepositoryPermissions.from_registry(registry, config.namespace)
external = perms.find_repos_with_external_users()
for repo, users in external.items():
print(repo.spec)
for user in users:
print('- %s [%s]%s' % (
user.name,
user.role,
' (robot)' if user.is_robot else '',
))
print()
if config.dump_state:
perms.to_json_file(config.dump_state)
if external:
sys.exit(1)
def script():
"""Command-line script for quay-admin."""
from twisted.internet.task import react
react(main, sys.argv[1:])
``` |
{
"source": "jmlrt/redis-tools",
"score": 3
} |
#### File: redis-tools/redistools/tools.py
```python
import logging
import os
import time
from .redis_instances import RedisInstance
def get_config():
"""
Get configuration from environment variables
"""
config = {'dry_run': os.getenv('DRY_RUN', "yes"),
'interval': int(os.getenv('INTERVAL', '30')),
'redis_endpoint': os.getenv('REDIS_ENDPOINT', 'localhost:6379'),
'redis_namespace': os.getenv('REDIS_NAMESPACE', '*'),
'redis_target_endpoint': os.getenv('REDIS_TARGET_ENDPOINT', 'none')}
# Get configuration from environment variables
# Logging configuration
log_level = os.getenv('LOG_LEVEL', 'INFO')
logging.basicConfig(
format='%(name)s %(asctime)s %(levelname)s %(message)s',
level=log_level
)
return config
def compare_keys(namespace, redis_instance, redis_target_instance):
"""
Compare Redis keys between source and target Redis instances
"""
# Get keys for both Redis
source_keys = redis_instance.list_keys(namespace)
target_keys = redis_target_instance.list_keys(namespace)
# Get keys present only in source Redis
new_keys = source_keys.difference(target_keys)
if len(new_keys) > 0:
logging.info('New keys: {0}'.format(new_keys))
else:
logging.info('No new keys found on {0}'.format(redis_instance.get_endpoint()))
return new_keys
def sync():
"""
Copy new keys from source Redis to target Redis
"""
# Initialize configuration
config = get_config()
# Initialize redis instances
redis_instance = RedisInstance(config['redis_endpoint'])
redis_target_instance = RedisInstance(config['redis_target_endpoint'])
while True:
new_keys = compare_keys(config['redis_namespace'], redis_instance, redis_target_instance)
# Do nothing in dry run mode
if config['dry_run'] != "yes":
# Start copy keys only if there in new keys to copy
if len(new_keys) > 0:
logging.info('New key to add on {0}'.format(redis_target_instance.get_endpoint()))
keys_detailed = redis_instance.get_keys(new_keys)
redis_target_instance.set_keys(keys_detailed)
else:
logging.info('Every keys are already existing in {}'.format(redis_target_instance.get_endpoint()))
# Sleep if interval is set or exit
if config['interval'] > 0:
time.sleep(config['interval'])
else:
break
def monitor():
"""
Monitor Redis instances
:return:
"""
# Initialize configuration
config = get_config()
# Initialize source Redis Instance
redis_instance = RedisInstance(config['redis_endpoint'])
while True:
# List keys
source_keys = redis_instance.list_keys(config['redis_namespace'])
logging.info('Keys for Redis Source {0}: {1}'.format(redis_instance.get_endpoint(), source_keys))
# Initialize target Redis
if config['redis_target_endpoint'] != 'none':
redis_target_instance = RedisInstance(config['redis_target_endpoint'])
target_keys = redis_target_instance.list_keys(config['redis_namespace'])
logging.info('Keys for Redis Target {0}: {1}'.format(redis_target_instance.get_endpoint(), target_keys))
# Sleep if interval is set or exit
if config['interval'] > 0:
time.sleep(config['interval'])
else:
break
``` |
{
"source": "jmlrt/spreadsheet-api",
"score": 3
} |
#### File: spreadsheet-api/spreadsheet_api/worksheet.py
```python
import json
from collections import namedtuple
from aiogoogle import Aiogoogle
from aiogoogle.auth.creds import ServiceAccountCreds
SCOPES = ["https://www.googleapis.com/auth/spreadsheets.readonly"]
class Worksheet:
"""A class representing a worksheet from a Google Spreadsheet"""
def __init__(self, service_account, sid, srange, column_names):
"""Initialize a worksheet from a Google Spreadsheet"""
self.service_account = service_account
self.id = sid
self.range = srange
row_default_values = [""] * (len(column_names) - 1)
self.row_named_tuple = namedtuple(
"Row", column_names, defaults=row_default_values
)
self.rows = {}
async def load_sheet(self, clear=False):
"""Load all worksheet rows"""
if clear:
self.rows.clear()
else:
# TODO check file modification date instead
# https://developers.google.com/drive/api/v3/reference/files
if len(self.rows) > 0:
return
async for row in self.load_sheet_row():
try:
key = len(self.rows) + 1
row = self.row_named_tuple(*row)
self.rows[key] = row
# IndexError handle error for row with 0 columns
# TypeError handle error for row returning None
except (IndexError, TypeError):
continue
async def load_sheet_row(self):
"""Fetch the worksheet values and return line by line"""
with open(self.service_account) as f:
service_account_key = json.load(f)
creds = ServiceAccountCreds(scopes=SCOPES, **service_account_key)
async with Aiogoogle(service_account_creds=creds) as aiogoogle:
service = await aiogoogle.discover("sheets", "v4")
sheet = await aiogoogle.as_service_account(
service.spreadsheets.values.get(spreadsheetId=self.id, range=self.range)
)
for line in sheet["values"]:
yield line
async def get_sheet(self):
"""Return all sheet rows"""
await self.load_sheet()
rows = []
for key in self.rows.keys():
rows.append(await self.get_row(key))
return rows
async def get_row(self, key):
"""Return the row for a given key"""
await self.load_sheet()
# TODO: filter fields starting by "empty_columns"
row = {}
row["id"] = key
# This is using the union operator for merging dict introduced in
# Python 3.9 (https://peps.python.org/pep-0584/)
row |= self.rows[key]._asdict()
return row
``` |
{
"source": "jmlrt/toolbox",
"score": 2
} |
#### File: toolbox/python/backup_datas.py
```python
from utils.files import (
create_checksum_file,
compare_files,
decrypt_file,
encrypt_file,
rename_file,
shred_file,
)
from utils.shell import check_command_in_path, check_environment_variable
import argparse
from datetime import datetime
from getpass import getpass
import logging
import os
import subprocess
DATE = datetime.today()
FORMATED_DATE = DATE.strftime("%Y%m%d")
WORK_DIR = f"{os.environ['HOME']}/Downloads"
FILES_TO_RENAME = {
f"{FORMATED_DATE}_pocket_bookmarks.html": "ril_export.html",
f"{FORMATED_DATE}_shaarli_bookmarks.html": f"bookmarks_all_{FORMATED_DATE}_*.html",
f"{FORMATED_DATE}_firefox_bookmarks.html": f"bookmarks.html",
f"{FORMATED_DATE}_firefox_bookmarks.json": f"bookmarks-{DATE.strftime('%Y-%m-%d')}.json",
f"{FORMATED_DATE}_chrome_bookmarks.html": f"bookmarks_{DATE.strftime('%d_%m_%Y')}.html",
f"{FORMATED_DATE}_todoist.zip": f"Todoist backup {DATE.strftime('%Y-%m-%d')}.zip",
}
BITWARDEN_EXPORTS = [
("personal", "json", f"{WORK_DIR}/{FORMATED_DATE}_bitwarden_perso_export.json"),
("personal", "csv", f"{WORK_DIR}/{FORMATED_DATE}_bitwarden_perso_export.csv"),
("organization", "json", f"{WORK_DIR}/{FORMATED_DATE}_bitwarden_org_export.json"),
("organization", "csv", f"{WORK_DIR}/{FORMATED_DATE}_bitwarden_org_export.csv"),
]
def backup_files(files_patterns):
for file in files_patterns.keys():
try:
rename_file(f"{WORK_DIR}/{files_patterns[file]}", f"{WORK_DIR}/{file}")
except FileNotFoundError:
logging.info(f"No file found matching {files_patterns[file]}")
def encrypt_backup(file, passphrase):
encrypted_file = encrypt_file(file, passphrase)
# verify encrypted file
decrypted_file = decrypt_file(encrypted_file, passphrase, f"{encrypted_file}.tmp")
if compare_files(file, decrypted_file) is not True:
raise Exception(f"{decrypted_file} hash doesn't match {file} hash")
# write gpg file digest
create_checksum_file(file)
# shred original and tmp files
shred_file(file)
shred_file(decrypted_file)
def export_bitwarden_secrets():
check_environment_variable("BW_SESSION", "You must be logged in to Bitwarden")
gpg_passphrase = check_environment_variable("GPG_PASSPHRASE")
organization_id = check_environment_variable("BW_ORGANIZATION_ID")
check_command_in_path(
"bw", "Please install Bitwarden CLI: https://bitwarden.com/help/article/cli/"
)
def export_bitwarden_command(vault, password, file_format, file_path):
bw_cmd = [
"bw",
"export",
password,
"--output",
file_path,
"--format",
file_format,
]
if vault == "organization":
bw_cmd.extend(["--organizationid", organization_id])
subprocess.run(bw_cmd)
logging.info(f"Bitwarden {vault} vault exported in {file_format.upper()}")
password = getpass("Enter Bitwarden password: ")
for vault, file_format, file_path in BITWARDEN_EXPORTS:
export_bitwarden_command(vault, password, file_format, file_path)
encrypt_backup(file_path, gpg_passphrase)
def rename_exports():
for file in FILES_TO_RENAME:
try:
rename_file(f"{WORK_DIR}/{FILES_TO_RENAME[file]}", f"{WORK_DIR}/{file}")
except FileNotFoundError:
logging.info(f"No file found matching {FILES_TO_RENAME[file]}")
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("command")
command = parser.parse_args().command
print(command)
if command == "export-secrets":
export_bitwarden_secrets()
elif command == "rename-exports":
rename_exports()
else:
logging.error("Command not found")
if __name__ == "__main__":
main()
```
#### File: snippets/clirunner_logging/hello.py
```python
import click
import logging
logging.basicConfig(level=logging.INFO)
@click.command()
@click.argument("name")
def hello(name):
logging.info(f"Hello, {name}!")
if __name__ == "__main__":
hello()
```
#### File: snippets/clirunner_pdb/test_hello.py
```python
from click.testing import CliRunner
from hello import hello
def test_hello_world():
runner = CliRunner()
import pytest
pytest.set_trace()
result = runner.invoke(hello, ["world"], catch_exceptions=False)
assert result.exit_code == 0
assert result.output == "Hello, world!\n"
``` |
{
"source": "jml/rules_haskell-old",
"score": 2
} |
#### File: rules_haskell-old/haskell/def.bzl
```python
"""Valid Haskell source files."""
HASKELL_FILETYPE = [
"hs",
"lhs",
]
# TODO: Once Bazel 0.6.0 is released, specify allowed fields using 'fields'
# parameter.
ghc_output = provider()
def _haskell_toolchain(ctx):
"""Everything we need to build Haskell with GHC."""
# TODO: Assemble this from something like 'repositories', which fetches the
# toolchain and uses that to build things, rather than assuming a system GHC
# is installed.
return struct(
ghc_path = "ghc",
)
def _dirname(path_str):
if '/' in path_str:
return path_str[:path_str.rfind('/')]
return ''
def _get_output_dir(ctx):
return '/'.join([ctx.bin_dir.path, _dirname(ctx.build_file_path)])
def _path_segments(path):
return [s for s in path.split('/') if s not in ('.', '')]
def _declare_output_file(actions, build_dir, src_dir, src_file, extension):
"""Declare the output file of a GHC process.
:param actions: The ``ctx.actions`` object
:param build_dir: The directory BUILD is in.
:param src_dir: The root of the module hierarchy
:param src_file: File within 'src_dir' that's being compiled.
e.g. ``$src_dir/Module/Hierarchy/Name.hs``
:param extension: The extension of the new file, either ``'.o'`` or ``'.hi'``.
:return: A 'File' pointing at ``Module/Hierarchy/Name.(o|hi)``
"""
src_segments = _path_segments(src_file.path)
module_root = _path_segments(build_dir) + _path_segments(src_dir)
if src_segments[:len(module_root)] != module_root:
fail("Expected source file, %s, to be underneath source directory, %s" % (src_file, src_dir))
module_segments = src_segments[len(module_root):]
if len(module_segments) == 0:
fail("No source file left after trimming source directory (src_file=%s, src_dir=%s)" % (src_file, src_dir))
basename = module_segments[-1]
extension_index = basename.rfind('.')
if extension_index == -1:
fail("Somehow got unexpected source filename, %s. Must be one of %s" % (basename, HASKELL_FILETYPE))
new_basename = basename[:extension_index] + extension
new_path = '/'.join(module_segments[:-1] + [new_basename])
return actions.declare_file(new_path)
def _hs_compile(toolchain, name, actions, srcs, deps, build_dir, output_dir, main_file=None, src_dir=None):
"""Compile a single Haskell module.
To be able to use this, a reverse dependency is going to have to either
1. explicitly add the right directory to its search path with '-i', or
2. explicitly include the *.o (and maybe the *.hi?) in its args
Still trying to figure out how to express "the right directory" in Bazel
language. It's a directory such that the path to the *.o file looks like the
Haskell module name. e.g. for module Foo.Bar.Baz, the directory, $dir, has
to be something where $dir/Foo/Bar/Baz.{o,hi} exist.
Not sure if I need to create a directory and use that as the -odir & -hidir
for GHC, or whether I need to calculate the base directory that we're using,
or whether Bazel already has that calculated for me.
I haven't actually tried invoking ghc with *.o files in the args directly.
Or maybe I have but I've forgotten the results.
"""
object_files = []
interface_files = []
for src in srcs:
if src == main_file:
object_files.append(actions.declare_file("Main.o"))
interface_files.append(actions.declare_file("Main.hi"))
else:
object_files.append(_declare_output_file(actions, build_dir, src_dir, src, '.o'))
interface_files.append(_declare_output_file(actions, build_dir, src_dir, src, '.hi'))
output_files = object_files + interface_files
import_directories = []
immediate_hs_objects = depset([])
transitive_hs_objects = depset([])
transitive_hs_interfaces = depset([])
for dep in deps:
# XXX: We get duplicate directories. Should probably de-dupe them.
import_directories.append('-i%s' % dep[ghc_output].import_directory)
immediate_hs_objects += dep[ghc_output].hs_objects
transitive_hs_objects += dep[ghc_output].transitive_hs_objects
transitive_hs_interfaces += dep[ghc_output].transitive_hs_interfaces
#print('srcs = %s, deps = %s, dirs = %s --> %s' % (srcs, dep_files, import_directories, output_files))
ghc_args = [
'-c', # So we just compile things, no linking
'-i', # Empty the import directory list
'-hide-all-packages', # Don't let the global package database infect our sacred purity
# TODO: Rather than hard-coding a dependency on the base package,
# allow packages to be specified as dependencies
'-package base', # XXX: Infect our sacred purity
] + import_directories + [
'-odir', output_dir,
'-hidir', output_dir,
] + [src.path for src in srcs]
# XXX: stack also includes
# -ddump-hi
# -ddump-to-file
# -optP-include
# -optP.stack-work/.../cabal_macros.h
#
# - various output dir controls
# - various package db controls
#
# Also what about...
# - optimizations
# - warnings
# - concurrent builds (-j4)
# - -threaded (I guess only relevant for executables)
actions.run(
inputs = srcs + (immediate_hs_objects + transitive_hs_interfaces).to_list(),
outputs = output_files,
executable = toolchain.ghc_path,
arguments = ghc_args,
progress_message = ("Compiling Haskell modules %s" % (srcs,)),
mnemonic = 'HsCompile',
# TODO: Figure out how we can do without this.
use_default_shell_env = True,
)
return ghc_output(
files = depset(output_files),
hs_objects = depset(object_files),
hs_interfaces = depset(interface_files),
transitive_hs_objects = transitive_hs_objects + depset(object_files),
transitive_hs_interfaces = transitive_hs_interfaces + depset(interface_files),
# XXX: Would really like to have a better answer than this.
import_directory = output_dir,
)
def _hs_module_impl(ctx):
"""A single Haskell module.
At the moment this only works with a single file in srcs.
Assumes that every source file is named after the module name that it
contains (with dots replaced by directory separators). For example, the
module Data.Person would be in the file Data/Person.hs on Unix/Linux/Mac, or
Data\Person.hs on Windows.
See https://downloads.haskell.org/~ghc/latest/docs/html/users_guide/using.html#getting-started-compiling-programs
"""
toolchain = _haskell_toolchain(ctx)
return _hs_compile(
toolchain, ctx.label.name, ctx.actions, ctx.files.srcs, ctx.attr.deps,
_dirname(ctx.build_file_path), _get_output_dir(ctx), src_dir=ctx.attr.src_dir)
def _hs_binary_impl(ctx):
"""A Haskell executable."""
toolchain = _haskell_toolchain(ctx)
lib_self = _hs_compile(
toolchain, ctx.label.name, ctx.actions, ctx.files.srcs, ctx.attr.deps,
_dirname(ctx.build_file_path), _get_output_dir(ctx),
main_file=ctx.file.main_is, src_dir=ctx.attr.src_dir)
# XXX: I guess we have to use ghc to link executables.
ctx.actions.run(
inputs = lib_self.transitive_hs_objects + ctx.files.data,
outputs = [ctx.outputs.executable],
executable = toolchain.ghc_path,
arguments = [
"-o", ctx.outputs.executable.path,
] + [obj.path for obj in lib_self.transitive_hs_objects],
use_default_shell_env = True,
)
def _hs_test_impl(ctx):
return _hs_binary_impl(ctx)
_hs_attrs = {
"srcs": attr.label_list(
allow_files = HASKELL_FILETYPE,
),
"deps": attr.label_list(
allow_files = False,
),
"data": attr.label_list(
allow_files = True,
),
"src_dir": attr.string(
doc = 'The root of the module hierarchy',
),
}
_hs_binary_attrs = {
"main_is": attr.label(
allow_single_file = HASKELL_FILETYPE,
),
}
hs_library = rule(
attrs = _hs_attrs,
implementation = _hs_module_impl,
)
hs_binary = rule(
attrs = dict(_hs_attrs.items() + _hs_binary_attrs.items()),
executable = True,
implementation = _hs_binary_impl,
)
hs_test = rule(
attrs = dict(_hs_attrs.items() + _hs_binary_attrs.items()),
executable = True,
implementation = _hs_test_impl,
test = True,
)
```
#### File: rules_haskell-old/tests/bazel_tests.bzl
```python
TOOLCHAINS = []
_bazelrc = """
startup --batch
build --verbose_failures
build --sandbox_debug
build --test_output=errors
build --spawn_strategy=standalone
build --genrule_strategy=standalone
test --test_strategy=standalone
build:isolate --fetch=False
"""
def _md5_sum_impl(ctx):
out = ctx.new_file(ctx.label.name+".md5")
ctx.actions.run_shell(
inputs = ctx.files.srcs,
outputs = [out],
command = "md5sum %s > %s" % (' '.join([src.path for src in ctx.files.srcs]), out.path),
)
return struct(files=depset([out]))
md5_sum = rule(
_md5_sum_impl,
attrs = {
"srcs": attr.label_list(allow_files = True),
},
)
"""Output the MD5 sums of all the given files."""
def _bazel_test_script_impl(ctx):
script_content = ''
workspace_content = ''
subdir = ""
if ctx.attr.subdir:
subdir = ctx.attr.subdir + "/"
# Build the bazel startup args
bazelrc = ctx.new_file(subdir + ".bazelrc")
args = ["--bazelrc={0}".format(bazelrc.basename), "--nomaster_blazerc"]
# Add the command and any command specific args
args += [ctx.attr.command]
if ctx.attr.config:
args += ["--config", ctx.attr.config]
for ext in ctx.attr.externals:
root = ext.label.workspace_root
_,_,ws = root.rpartition("/")
workspace_content += 'local_repository(name = "{0}", path = "{1}/{2}")\n'.format(ws, ctx.attr._execroot.path, root)
# finalise the workspace file
if ctx.attr.workspace:
workspace_content += ctx.attr.workspace
workspace_file = ctx.new_file(subdir + "WORKSPACE")
ctx.file_action(output=workspace_file, content=workspace_content)
# finalise the script
args += ctx.attr.args + [ctx.attr.target]
# TODO(jml): Maybe factor this out into separate function? Try to use
# '\n'.join() rather than appending lines quadratically.
script_content += 'BASE=$(pwd)\n'
script_content += 'cd {0}\n'.format(ctx.label.package)
script_content += 'PACKAGE=$(pwd)\n'
if ctx.attr.subdir:
script_content += 'cd {0}\n'.format(ctx.attr.subdir)
script_content += 'cp BUILD.in BUILD.bazel\n'
script_content += 'WORKSPACE=$(pwd)\n'
if ctx.attr.prepare:
script_content += ctx.attr.prepare
script_content += 'cd $WORKSPACE\n'
script_content += 'echo {0} {1}\n'.format(ctx.attr._execroot.bazel, " ".join(args))
script_content += '{0} {1}\n'.format(ctx.attr._execroot.bazel, " ".join(args))
script_content += 'result=$?\n'
if ctx.attr.check:
script_content += ctx.attr.check
script_content += "exit $result\n"
script_file = ctx.new_file(ctx.label.name+".bash")
ctx.file_action(output=script_file, executable=True, content=script_content)
# finalise the bazel options
ctx.file_action(output=bazelrc, content=_bazelrc)
return struct(
files = depset([script_file]),
runfiles = ctx.runfiles([workspace_file, bazelrc])
)
_bazel_test_script = rule(
_bazel_test_script_impl,
attrs = {
"command": attr.string(
mandatory = True,
values = [
"build",
"test",
"coverage",
"run",
],
),
"args": attr.string_list(default = []),
"subdir": attr.string(),
"target": attr.string(mandatory = True),
"externals": attr.label_list(allow_files = True),
"data": attr.label_list(allow_files = True),
"workspace": attr.string(),
"prepare": attr.string(),
"check": attr.string(),
"config": attr.string(default = "isolate"),
"_execroot": attr.label(default = Label("@test_environment//:execroot")),
},
toolchains = TOOLCHAINS,
)
def bazel_test(name, command = None, args=None, subdir = None, target = None, tags=[], externals=[], data=[], workspace="", prepare="", check="", config=None):
"""Test a Bazel rule.
Runs the `bazel` executable.
Args:
name: The name of the test
command: The Bazel subcommand to run (e.g. 'run', 'build', 'test')
args: A list of arguments to pass to Bazel
subdir: ???
target: ???
tags: ???
externals: ???
data: ???
workspace: Extra content to add to the WORKSPACE file used by tests.
prepare: ???
check: ???
config: ???
"""
script_name = name+"_script"
_bazel_test_script(
name = script_name,
command = command,
args = args,
subdir = subdir,
target = target,
externals = externals,
workspace = workspace,
prepare = prepare,
check = check,
config = config,
)
native.sh_test(
name = name,
size = "large",
timeout = "moderate",
srcs = [script_name],
tags = ["local", "bazel"] + tags,
data = native.glob(["**/*"]) + externals + data,
)
def _test_environment_impl(ctx):
execroot, _, ws = str(ctx.path(".")).rpartition("/external/")
bazel = ""
if "BAZEL" in ctx.os.environ:
bazel = ctx.os.environ["BAZEL"]
elif "BAZEL_VERSION" in ctx.os.environ:
home = ctx.os.environ["HOME"]
bazel = home + "/.bazel/{0}/bin/bazel".format(ctx.os.environ["BAZEL_VERSION"])
if bazel == "" or not ctx.path(bazel).exists:
bazel = ctx.which("bazel")
if ctx.name != ws:
fail("workspace did not match, expected:", ctx.name, "got:", ws)
ctx.file("WORKSPACE", """
workspace(name = "%s")
""" % ctx.name)
ctx.file("BUILD", """
load("@io_jml_rules_haskell//tests:bazel_tests.bzl", "execroot")
execroot(
name = "execroot",
path = "{0}",
bazel = "{1}",
visibility = ["//visibility:public"],
)
""".format(execroot, bazel))
_test_environment = repository_rule(
attrs = {},
environ = [
"BAZEL",
"BAZEL_VERSION",
"HOME",
],
implementation = _test_environment_impl,
)
def test_environment():
_test_environment(name="test_environment")
def _execroot_impl(ctx):
return struct(
path = ctx.attr.path,
bazel = ctx.attr.bazel,
)
execroot = rule(
_execroot_impl,
attrs = {
"path": attr.string(mandatory = True),
"bazel": attr.string(mandatory = True),
},
)
``` |
{
"source": "jml/testtools",
"score": 2
} |
#### File: testtools/twistedsupport/_spinner.py
```python
__all__ = [
'NoResultError',
'not_reentrant',
'ReentryError',
'Spinner',
'StaleJunkError',
'TimeoutError',
'trap_unhandled_errors',
]
from fixtures import Fixture
import signal
from ._deferreddebug import DebugTwisted
from twisted.internet import defer
from twisted.internet.interfaces import IReactorThreads
from twisted.python.failure import Failure
from twisted.python.util import mergeFunctionMetadata
class ReentryError(Exception):
"""Raised when we try to re-enter a function that forbids it."""
def __init__(self, function):
Exception.__init__(self,
"%r in not re-entrant but was called within a call to itself."
% (function,))
def not_reentrant(function, _calls={}):
"""Decorates a function as not being re-entrant.
The decorated function will raise an error if called from within itself.
"""
def decorated(*args, **kwargs):
if _calls.get(function, False):
raise ReentryError(function)
_calls[function] = True
try:
return function(*args, **kwargs)
finally:
_calls[function] = False
return mergeFunctionMetadata(function, decorated)
def trap_unhandled_errors(function, *args, **kwargs):
"""Run a function, trapping any unhandled errors in Deferreds.
Assumes that 'function' will have handled any errors in Deferreds by the
time it is complete. This is almost never true of any Twisted code, since
you can never tell when someone has added an errback to a Deferred.
If 'function' raises, then don't bother doing any unhandled error
jiggery-pokery, since something horrible has probably happened anyway.
:return: A tuple of '(result, error)', where 'result' is the value
returned by 'function' and 'error' is a list of 'defer.DebugInfo'
objects that have unhandled errors in Deferreds.
"""
real_DebugInfo = defer.DebugInfo
debug_infos = []
def DebugInfo():
info = real_DebugInfo()
debug_infos.append(info)
return info
defer.DebugInfo = DebugInfo
try:
result = function(*args, **kwargs)
finally:
defer.DebugInfo = real_DebugInfo
errors = []
for info in debug_infos:
if info.failResult is not None:
errors.append(info)
# Disable the destructor that logs to error. We are already
# catching the error here.
info.__del__ = lambda: None
return result, errors
class TimeoutError(Exception):
"""Raised when run_in_reactor takes too long to run a function."""
def __init__(self, function, timeout):
Exception.__init__(self,
"%r took longer than %s seconds" % (function, timeout))
class NoResultError(Exception):
"""Raised when the reactor has stopped but we don't have any result."""
def __init__(self):
Exception.__init__(self,
"Tried to get test's result from Deferred when no result is "
"available. Probably means we received SIGINT or similar.")
class StaleJunkError(Exception):
"""Raised when there's junk in the spinner from a previous run."""
def __init__(self, junk):
Exception.__init__(self,
"There was junk in the spinner from a previous run. "
"Use clear_junk() to clear it out: %r" % (junk,))
class Spinner(object):
"""Spin the reactor until a function is done.
This class emulates the behaviour of twisted.trial in that it grotesquely
and horribly spins the Twisted reactor while a function is running, and
then kills the reactor when that function is complete and all the
callbacks in its chains are done.
"""
_UNSET = object()
# Signals that we save and restore for each spin.
_PRESERVED_SIGNALS = [
'SIGINT',
'SIGTERM',
'SIGCHLD',
]
# There are many APIs within Twisted itself where a Deferred fires but
# leaves cleanup work scheduled for the reactor to do. Arguably, many of
# these are bugs. As such, we provide a facility to iterate the reactor
# event loop a number of times after every call, in order to shake out
# these buggy-but-commonplace events. The default is 0, because that is
# the ideal, and it actually works for many cases.
_OBLIGATORY_REACTOR_ITERATIONS = 0
def __init__(self, reactor, debug=False):
"""Construct a Spinner.
:param reactor: A Twisted reactor.
:param debug: Whether or not to enable Twisted's debugging. Defaults
to False.
"""
self._reactor = reactor
self._timeout_call = None
self._success = self._UNSET
self._failure = self._UNSET
self._saved_signals = []
self._junk = []
self._debug = debug
self._spinning = False
def _cancel_timeout(self):
if self._timeout_call:
self._timeout_call.cancel()
def _get_result(self):
if self._failure is not self._UNSET:
self._failure.raiseException()
if self._success is not self._UNSET:
return self._success
raise NoResultError()
def _got_failure(self, result):
self._cancel_timeout()
self._failure = result
def _got_success(self, result):
self._cancel_timeout()
self._success = result
def _fake_stop(self):
"""Use to replace ``reactor.stop`` while running a test.
Calling ``reactor.stop`` makes it impossible re-start the reactor.
Since the default signal handlers for TERM, BREAK and INT all call
``reactor.stop()``, we patch it over with ``reactor.crash()``
Spinner never calls this method.
"""
self._reactor.crash()
def _stop_reactor(self, ignored=None):
"""Stop the reactor!"""
# XXX: Would like to emit a warning when called when *not* spinning.
if self._spinning:
self._reactor.crash()
self._spinning = False
def _timed_out(self, function, timeout):
e = TimeoutError(function, timeout)
self._failure = Failure(e)
self._stop_reactor()
def _clean(self):
"""Clean up any junk in the reactor.
Will always iterate the reactor a number of times equal to
``Spinner._OBLIGATORY_REACTOR_ITERATIONS``. This is to work around
bugs in various Twisted APIs where a Deferred fires but still leaves
work (e.g. cancelling a call, actually closing a connection) for the
reactor to do.
"""
for i in range(self._OBLIGATORY_REACTOR_ITERATIONS):
self._reactor.iterate(0)
junk = []
for delayed_call in self._reactor.getDelayedCalls():
delayed_call.cancel()
junk.append(delayed_call)
for selectable in self._reactor.removeAll():
# Twisted sends a 'KILL' signal to selectables that provide
# IProcessTransport. Since only _dumbwin32proc processes do this,
# we aren't going to bother.
junk.append(selectable)
if IReactorThreads.providedBy(self._reactor):
if self._reactor.threadpool is not None:
self._reactor._stopThreadPool()
self._junk.extend(junk)
return junk
def clear_junk(self):
"""Clear out our recorded junk.
:return: Whatever junk was there before.
"""
junk = self._junk
self._junk = []
return junk
def get_junk(self):
"""Return any junk that has been found on the reactor."""
return self._junk
def _save_signals(self):
available_signals = [
getattr(signal, name, None) for name in self._PRESERVED_SIGNALS]
self._saved_signals = [
(sig, signal.getsignal(sig)) for sig in available_signals if sig]
def _restore_signals(self):
for sig, hdlr in self._saved_signals:
signal.signal(sig, hdlr)
self._saved_signals = []
@not_reentrant
def run(self, timeout, function, *args, **kwargs):
"""Run 'function' in a reactor.
If 'function' returns a Deferred, the reactor will keep spinning until
the Deferred fires and its chain completes or until the timeout is
reached -- whichever comes first.
:raise TimeoutError: If 'timeout' is reached before the Deferred
returned by 'function' has completed its callback chain.
:raise NoResultError: If the reactor is somehow interrupted before
the Deferred returned by 'function' has completed its callback
chain.
:raise StaleJunkError: If there's junk in the spinner from a previous
run.
:return: Whatever is at the end of the function's callback chain. If
it's an error, then raise that.
"""
if self._debug:
debug_settings = DebugTwisted(True)
else:
debug_settings = Fixture()
with debug_settings:
junk = self.get_junk()
if junk:
raise StaleJunkError(junk)
self._save_signals()
self._timeout_call = self._reactor.callLater(
timeout, self._timed_out, function, timeout)
# Calling 'stop' on the reactor will make it impossible to
# re-start the reactor. Since the default signal handlers for
# TERM, BREAK and INT all call reactor.stop(), we'll patch it over
# with crash. XXX: It might be a better idea to either install
# custom signal handlers or to override the methods that are
# Twisted's signal handlers.
real_stop, self._reactor.stop = self._reactor.stop, self._fake_stop
def run_function():
d = defer.maybeDeferred(function, *args, **kwargs)
d.addCallbacks(self._got_success, self._got_failure)
d.addBoth(self._stop_reactor)
try:
self._reactor.callWhenRunning(run_function)
self._spinning = True
self._reactor.run()
finally:
self._reactor.stop = real_stop
self._restore_signals()
try:
return self._get_result()
finally:
self._clean()
``` |
{
"source": "jml/trial-eliot",
"score": 2
} |
#### File: trial-eliot/eliotreporter/_reporter.py
```python
import json
from eliot import add_destination
from pyrsistent import PClass, field
from twisted.plugin import IPlugin
from twisted.trial.itrial import IReporter
from zope.interface import implementer
from ._types import (
ERROR,
FAILURE,
SKIP,
TEST,
UNEXPECTED_SUCCESS,
make_error_message,
make_expected_failure_message,
)
class InvalidStateError(Exception):
"""
Raised when someone attempts to put an EliotReporter into an invalid state.
"""
# TODO: The Trial base reporter does some sort of warning capturing. It would
# be good to do something similar here so that *everything* that the test
# emits is captured in single, coheerent Eliot log.
# TODO: Ideally we'd also capture stdout & stderr and encode those as Eliot
# messages. Probably can't be done at the reporter level, but we can provide
# functions for tests to be able to use that.
# TODO: Should setUp, tearDown, the test itself and cleanup also be Eliot
# actions? If so, that's a thing for the base test case rather than the
# reporter.
# TODO: Currently Eliot has support for capturing the eliot logs and dumping
# them to the Twisted log, which Trial stores as _trial_temp/test.log. If
# we're using something like this, then we actually want all of those log
# messages to be included as part of the test action, included in the same
# log.
# TODO: "The value is in the output". No one is going to care about this
# unless there's something that consumes the output and displays the results
# as something that matters to humans.
# TODO: Currently the action of a test "succeeds" whether or not the test
# passes. It's unclear whether this is the right behaviour. Factors:
#
# - when reading eliot output, it makes it harder to see whether a test
# passed or failed.
# - tests can have multiple errors, if we made the action fail on test failure,
# then we'd have to aggregate these errors somehow.
# - aggregating the errors would mean that we either would not see them at all
# until the test completes, or that we would log duplicate actions
@implementer(IReporter)
class EliotReporter(object):
def __init__(self, stream, tbformat='default', realtime=False,
publisher=None, logger=None):
# TODO: Trial has a pretty confusing set of expectations for
# reporters. In particular, it's not clear what it needs to construct
# a reporter. It's also not clear what it expects as public
# properties. The IReporter interface and the tests for the reporter
# interface cover somewhat different things.
self._stream = stream
self.tbformat = tbformat
self.shouldStop = False
self.testsRun = 0
add_destination(self._write_message)
self._current_test = None
self._successful = True
self._logger = logger
def _write_message(self, message):
self._stream.write(json.dumps(message) + "\n")
def _ensure_test_running(self, expected_test):
current = self._current_test
if current and current.id() != expected_test.id():
raise InvalidStateError(
'Expected {} to be running, was {} instead'.format(
expected_test, self._current_test))
def startTest(self, method):
"""
Report the beginning of a run of a single test method.
@param method: an object that is adaptable to ITestMethod
"""
if self._current_test:
raise InvalidStateError(
'Trying to start {}, but {} already started'.format(
method, self._current_test))
self._current_test = method
self._action = TEST(test=method, logger=self._logger)
# TODO: This isn't using Eliot the way it was intended. Probably a
# better way is to have a test case (or a testtools-style TestCase
# runner!) that does all of this.
self._action.__enter__()
def stopTest(self, method):
"""
Report the status of a single test method
@param method: an object that is adaptable to ITestMethod
"""
if not self._current_test:
raise InvalidStateError(
'Trying to stop {} without starting it first'.format(method))
self._ensure_test_running(method)
self._current_test = None
self._action.__exit__(None, None, None)
def addSuccess(self, test):
"""
Record that test passed.
"""
self._ensure_test_running(test)
def addError(self, test, error):
"""
Record that a test has raised an unexpected exception.
"""
self._ensure_test_running(test)
make_error_message(ERROR, error).write(self._logger)
self._successful = False
def addFailure(self, test, failure):
"""
Record that a test has failed with the given failure.
"""
self._ensure_test_running(test)
make_error_message(FAILURE, failure).write(self._logger)
self._successful = False
def addExpectedFailure(self, test, failure, todo):
"""
Record that the given test failed, and was expected to do so.
"""
self._ensure_test_running(test)
make_expected_failure_message(todo, failure).write(self._logger)
def addUnexpectedSuccess(self, test, todo):
"""
Record that the given test failed, and was expected to do so.
"""
self._ensure_test_running(test)
UNEXPECTED_SUCCESS(todo=todo).write(self._logger)
def addSkip(self, test, reason):
"""
Record that a test has been skipped for the given reason.
"""
self._ensure_test_running(test)
SKIP(reason=reason).write(self._logger)
def wasSuccessful(self):
return self._successful
def stop(self):
self.shouldStop = True
def done(self):
"""
Called when the test run is complete.
"""
@implementer(IReporter, IPlugin)
class TrialReporter(PClass):
name = field()
module = field()
description = field()
longOpt = field()
shortOpt = field()
klass = field()
eliot_plugin = TrialReporter(
name="Eliot reporter",
description="Output all test results as eliot logs",
longOpt="eliot",
shortOpt=None,
module="eliotreporter",
klass="EliotReporter",
)
``` |
{
"source": "jml/txapply",
"score": 2
} |
#### File: jml/txapply/setup.py
```python
import codecs
import os
import versioneer
from setuptools import find_packages, setup
def read(*parts):
"""
Build an absolute path from C{parts} and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *parts), 'r', 'utf-8') as f:
return f.read()
if __name__ == "__main__":
setup(
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules'
],
description="Call functions with Deferred arguments",
long_description=read('README.md'),
keywords="twisted",
license="MIT",
name="txapply",
packages=find_packages(),
url="https://github.com/jml/txapply",
maintainer='<NAME>',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
install_requires=[
'Twisted',
],
extras_require={
'tests': [
'testtools>=1.9.0',
'hypothesis>=1.18.1',
],
},
)
```
#### File: txapply/txapply/_combinators.py
```python
from functools import wraps
def nop(*args, **kwargs):
"""
Do nothing.
"""
def transparent(value, function, *args, **kwargs):
"""
Invoke ``function`` with ``value`` and other arguments, return ``value``.
Use this to add a function to a callback chain without disrupting the
value of the callback chain::
d = defer.succeed(42)
d.addCallback(transparent, print)
d.addCallback(lambda x: x == 42)
"""
function(value, *args, **kwargs)
return value
def transparently(function):
"""
Wrap ``function`` so that it is called, but that the first parameter is
returned.
"""
@wraps(function)
def decorated(value, *args, **kwargs):
function(value, *args, **kwargs)
return value
return decorated
def ignore(value, function, *args, **kwargs):
"""
Invoke ``function`` with ``*args`` and ``*kwargs``.
Use this to add a function to a callback chain that just ignores the
previous value in the chain::
>>> d = defer.succeed(42)
>>> d.addCallback(ignore, print, 37)
37
"""
return function(*args, **kwargs)
def ignored(function):
"""
Wrap ``function`` so that it discards its first parameter.
"""
@wraps(function)
def decorated(value, *args, **kwargs):
return function(*args, **kwargs)
return decorated
def combine(value, function, *args, **kwargs):
"""
Call ``function``, return its result and ``value`` as a tuple.
``function`` is invoked with ``value``, ``*args`` and ``**kwargs`` and
then we return a 2-tuple of whatever ``function`` returned and ``value``.
Use this to add a function to a callback chain that combines its return
value with the previous value::
>>> d = defer.succeed(42)
>>> d.addCallback(combine, lambda: 47)
>>> d.addCallback(print)
(37, 42)
"""
y = function(value, *args, **kwargs)
return (y, value)
def combined(function):
"""
Wrap ``function`` so that it returns a tuple of its return value and its
first parameter.
"""
@wraps(function)
def decorated(value, *args, **kwargs):
y = function(value, *args, **kwargs)
return (y, value)
return decorated
```
#### File: txapply/tests/test_combinators.py
```python
from hypothesis import given
from testtools import TestCase
from testtools.matchers import Equals, Is
from testtools.twistedsupport import succeeded
from twisted.internet.defer import succeed
from .._combinators import (
combine, combined,
ignore, ignored,
nop,
transparent, transparently,
)
from .strategies import any_value, arguments, keyword_arguments
class TestNop(TestCase):
"""
Tests for ``nop``.
"""
@given(args=arguments(), kwargs=keyword_arguments())
def test_none(self, args, kwargs):
"""
``nop`` always returns ``None``, regardless of what you call it with.
"""
self.assertThat(nop(*args, **kwargs), Is(None))
class TestTransparent(TestCase):
"""
Tests for ``transparent``.
"""
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_calls_transparent_callback(self, first, second, args, kwargs):
"""
Even though the return value of a "transparent" callback is ignored,
it *is* invoked and is passed all the arguments and keyword arguments
it would have if it were a normal callback.
"""
log = []
def callback(value, *a, **kw):
log.append((value, a, kw))
return second
d = succeed(first)
d.addCallback(transparent, callback, *args, **kwargs)
self.assertThat(log, Equals([(first, args, kwargs)]))
self.assertThat(d, succeeded(Is(first)))
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_decorated(self, first, second, args, kwargs):
log = []
def callback(value, *a, **kw):
log.append((value, a, kw))
return second
d = succeed(first)
d.addCallback(transparently(callback), *args, **kwargs)
self.assertThat(log, Equals([(first, args, kwargs)]))
self.assertThat(d, succeeded(Is(first)))
class TestIgnore(TestCase):
"""
Tests for ``ignore``.
"""
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_ignores_callback_value(self, first, second, *args, **kwargs):
"""
Callbacks added with ``ignore`` don't get passed the return value of
the previous callback.
"""
log = []
def callback(*a, **kw):
log.append((a, kw))
return second
d = succeed(first)
d.addCallback(ignore, callback, *args, **kwargs)
self.assertThat(d, succeeded(Is(second)))
self.assertThat(log, Equals([(args, kwargs)]))
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_decorator(self, first, second, *args, **kwargs):
log = []
def callback(*a, **kw):
log.append((a, kw))
return second
d = succeed(first)
d.addCallback(ignored(callback), *args, **kwargs)
self.assertThat(d, succeeded(Is(second)))
self.assertThat(log, Equals([(args, kwargs)]))
class TestCombine(TestCase):
"""
Tests for ``combine``.
"""
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_combines_results(self, first, second, *args, **kwargs):
"""
The return value of a callback added with ``combine`` is a tuple made
up of the *previous* callback value and whatever the new callback
returns.
"""
log = []
def callback(value, *a, **kw):
log.append((value, a, kw))
return second
d = succeed(first)
d.addCallback(combine, callback, *args, **kwargs)
self.assertThat(d, succeeded(Equals((second, first))))
self.assertThat(log, Equals([(first, args, kwargs)]))
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_decorator(self, first, second, *args, **kwargs):
log = []
def callback(value, *a, **kw):
log.append((value, a, kw))
return second
d = succeed(first)
d.addCallback(combined(callback), *args, **kwargs)
self.assertThat(d, succeeded(Equals((second, first))))
self.assertThat(log, Equals([(first, args, kwargs)]))
class TestComposition(TestCase):
"""
Make sure all this stuff can be used together.
Not so much describing desired behavior as documenting actual behavior.
"""
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_transparent_ignore(self, first, second, args, kwargs):
log = []
def callback(*a, **kw):
log.append((a, kw))
return second
d = succeed(first)
d.addCallback(transparent, ignore, callback, *args, **kwargs)
self.assertThat(d, succeeded(Is(first)))
self.assertThat(log, Equals([(args, kwargs)]))
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_transparent_combine(self, first, second, args, kwargs):
log = []
def callback(value, *a, **kw):
log.append((value, a, kw))
return second
d = succeed(first)
d.addCallback(transparent, combine, callback, *args, **kwargs)
self.assertThat(d, succeeded(Is(first)))
self.assertThat(log, Equals([(first, args, kwargs)]))
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_combine_transparent(self, first, second, args, kwargs):
log = []
def callback(value, *a, **kw):
log.append((value, a, kw))
return second
d = succeed(first)
d.addCallback(combine, transparent, callback, *args, **kwargs)
self.assertThat(d, succeeded(Equals((first, first))))
self.assertThat(log, Equals([(first, args, kwargs)]))
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_combine_ignore(self, first, second, args, kwargs):
log = []
def callback(*a, **kw):
log.append((a, kw))
return second
d = succeed(first)
d.addCallback(combine, ignore, callback, *args, **kwargs)
self.assertThat(d, succeeded(Equals((second, first))))
self.assertThat(log, Equals([(args, kwargs)]))
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_combine_combine(self, first, second, args, kwargs):
log = []
def callback(value, *a, **kw):
log.append((value, a, kw))
return second
d = succeed(first)
d.addCallback(combine, combine, callback, *args, **kwargs)
self.assertThat(d, succeeded(Equals(((second, first), first))))
self.assertThat(log, Equals([(first, args, kwargs)]))
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_transparent_transparent(self, first, second, args, kwargs):
log = []
def callback(value, *a, **kw):
log.append((value, a, kw))
return second
d = succeed(first)
d.addCallback(transparent, transparent, callback, *args, **kwargs)
self.assertThat(d, succeeded(Is(first)))
self.assertThat(log, Equals([(first, args, kwargs)]))
class TestDecoratorComposition(TestCase):
"""
Make sure all the decorators can be used together.
Not so much describing desired behavior as documenting actual behavior.
"""
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_transparent_ignore(self, first, second, args, kwargs):
log = []
def callback(*a, **kw):
log.append((a, kw))
return second
d = succeed(first)
d.addCallback(transparently(ignored(callback)), *args, **kwargs)
self.assertThat(d, succeeded(Is(first)))
self.assertThat(log, Equals([(args, kwargs)]))
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_transparent_combine(self, first, second, args, kwargs):
log = []
def callback(value, *a, **kw):
log.append((value, a, kw))
return second
d = succeed(first)
d.addCallback(transparently(combined(callback)), *args, **kwargs)
self.assertThat(d, succeeded(Is(first)))
self.assertThat(log, Equals([(first, args, kwargs)]))
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_combine_transparent(self, first, second, args, kwargs):
log = []
def callback(value, *a, **kw):
log.append((value, a, kw))
return second
d = succeed(first)
d.addCallback(combined(transparently(callback)), *args, **kwargs)
self.assertThat(d, succeeded(Equals((first, first))))
self.assertThat(log, Equals([(first, args, kwargs)]))
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_combine_ignore(self, first, second, args, kwargs):
log = []
def callback(*a, **kw):
log.append((a, kw))
return second
d = succeed(first)
d.addCallback(combined(ignored(callback)), *args, **kwargs)
self.assertThat(d, succeeded(Equals((second, first))))
self.assertThat(log, Equals([(args, kwargs)]))
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_combine_combine(self, first, second, args, kwargs):
log = []
def callback(value, *a, **kw):
log.append((value, a, kw))
return second
d = succeed(first)
d.addCallback(combined(combined(callback)), *args, **kwargs)
self.assertThat(d, succeeded(Equals(((second, first), first))))
self.assertThat(log, Equals([(first, args, kwargs)]))
@given(first=any_value(), second=any_value(), args=arguments(),
kwargs=keyword_arguments())
def test_transparent_transparent(self, first, second, args, kwargs):
log = []
def callback(value, *a, **kw):
log.append((value, a, kw))
return second
d = succeed(first)
d.addCallback(transparently(transparently(callback)), *args, **kwargs)
self.assertThat(d, succeeded(Is(first)))
self.assertThat(log, Equals([(first, args, kwargs)]))
``` |
{
"source": "jmluang/text-to-ascii",
"score": 3
} |
#### File: jmluang/text-to-ascii/text-to-ascii-generator.py
```python
import os
try:
import pyfiglet
from termcolor import colored
import colorama
from PIL import Image
from PIL import ImageDraw
except ImportError as ImpErr:
raise ImportError("Import couldn't find module, or couldn't find name" +\
"in module {}".format(ImpErr))
colorama.init()
__ver__ = "0.1"
class FontGenerator:
# fonts from pyfiglet
font_list = [
"3-d", "3x5", "5lineoblique", "acrobatic", "alligator", "alligator2", "alphabet", "avatar", "banner",
"banner3-D",
"banner3", "banner4", "barbwire", "basic", "bell", "big", "bigchief", "binary", "block", "bubble", "bulbhead",
"calgphy2", "caligraphy", "catwalk", "chunky", "coinstak", "colossal", "computer", "contessa", "contrast",
"cosmic",
"cosmike", "cricket", "cursive", "cyberlarge", "cybermedium", "cybersmall", "diamond", "digital", "doh", "doom",
"dotmatrix",
"drpepper", "eftichess", "eftifont", "eftipiti", "eftirobot", "eftitalic", "eftiwall", "eftiwater", "epic",
"fender", "fourtops", "fuzzy", "goofy", "gothic", "graffiti", "hollywood", "invita", "isometric1", "isometric2",
"isometric3", "isometric4", "italic", "ivrit", "jazmine", "jerusalem", "katakana", "kban", "larry3d", "lcd",
"lean",
"letters", "linux", "lockergnome", "madrid", "marquee", "maxfour", "mike", "mini", "mirror", "mnemonic",
"morse",
"moscow", "nancyj-fancy", "nancyj-underlined", "nancyj", "nipples", "ntgreek", "o8", "ogre", "pawp", "peaks",
"pebbles", "pepper", "poison", "puffy", "pyramid", "rectangles", "relief", "relief2", "rev", "roman", "rot13",
"rounded", "rowancap", "rozzo", "runic", "runyc", "sblood", "script", "serifcap", "shadow", "short", "slant",
"slide", "slscript", "small", "smisome1", "smkeyboard", "smscript", "smshadow", "smslant", "smtengwar", "speed",
"stampatello", "standard", "starwars", "stellar", "stop", "straight", "tanja", "tengwar", "term", "thick",
"thin",
"threepoint", "ticks", "ticksslant", "tinker-toy", "tombstone", "trek", "tsalagi", "twopoint", "univers",
"usaflag", "wavy",
"weird"
]
def __init__(self):
"""
Program introduction
"""
os.system("cls")
print(colored(pyfiglet.figlet_format("Text to ASCII generator",
font="slant"), "green"))
print("Version: " + __ver__ + "\n")
self.menu()
def menu(self):
"""
Set a font
"""
error = False
while True:
if error != False:
print error
error = False
else :
print(colored("What kind of font do you want? ( q:exit , p:show all fonts )", "white", "on_green"))
font = raw_input("> ")
if font == "q":
return
if font == "p":
self.show_all_fonts()
continue
if font not in self.font_list:
error = "Do not have this font type, choose another one.\n"
error += "All Fonts in: https://github.com/jmluang/text-to-ascii/blob/master/EXAMPLE"
break
self.introduction_and_generation(font)
def introduction_and_generation(self, font):
"""
Font example.
It also accepts user's text.
"""
while True:
os.system("cls")
print(pyfiglet.figlet_format("Sample Text", font=font))
print(colored("Input text to convert: ", "white", "on_green"))
text = raw_input("> ")
ascii_text = pyfiglet.figlet_format(text, font=font)
print(ascii_text)
# Loop until a user type a correct command
while True:
print(colored("Save? t/i/b/q (text/image/back/exit)", "white", "on_green"))
command = raw_input("> ")
if command in ["b", "t", "i", "q"]:
if command == "b":
break
if command == "q":
return
name = raw_input("File name: ")
if command == "t":
file = open(name + ".txt", "w")
file.write(ascii_text)
file.close()
elif command == "i":
img = Image.new("RGB", (1, 1), (255, 255, 255))
d = ImageDraw.Draw(img)
text_width, text_height = d.textsize(ascii_text)
img = img.resize((text_width, text_height))
d = ImageDraw.Draw(img)
d.text((0, 0), ascii_text, (0, 0, 0))
img.save(name + ".png")
print(colored("Done!", "green"))
os.system("pause")
return
def show_all_fonts(self):
os.system("cls")
print "-----------------------------------"
for i,font in enumerate(self.font_list):
print "%s" % font,
if i % 10 == 0:
print "\n"
print "\n-----------------------------------"
return
def main():
font_generator = FontGenerator()
if __name__ == "__main__":
main()
``` |
{
"source": "jmmaki/BlamePipeline",
"score": 3
} |
#### File: blamepipeline/preprocess/match_article_entry.py
```python
from collections import defaultdict
import argparse
from blamepipeline.preprocess.dataloader import Dataset
case1, case2 = 0, 0
def match_data(source):
dataset = Dataset(source)
articles = dataset.get_articles()
entries = dataset.get_entries()
date_articles = defaultdict(list)
for article in articles:
date_articles[article['date']].append(article)
print('{} dates of {} articles loaded.'.format(len(date_articles), len(articles)))
print('{} entries loaded.'.format(len(entries)))
title_match = 0
subtitle_match = 0
pairs = []
def matches(entry_title, article_title):
if not entry_title or len(entry_title) < 10:
return False
elif entry_title and article_title and entry_title == article_title:
return True
elif entry_title and entry_title in article_title:
return True
return False
for entry in entries:
for article in date_articles[entry['date']]:
if matches(entry['title'], article['title']):
title_match += 1
pairs.append((entry, article))
break
elif matches(entry['title'], article['subtitle']):
subtitle_match += 1
pairs.append((entry, article))
break
print('title match:', title_match)
print('subtitle match:', subtitle_match)
return pairs
def main(args):
if args.source == 'all':
sources = ['FOX']
else:
sources = [args.source.upper()]
for source in sources:
print(source)
pairs = match_data(source)
print('matched pairs:', len(pairs))
print('---')
global case1, case2
print(f'{case1}, {case2}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='match articles and entries')
parser.add_argument('--source', type=str, choices=['all', 'fox'], default='all')
args = parser.parse_args()
main(args)
```
#### File: blamepipeline/tokenizers/__init__.py
```python
import os
import blamepipeline
import spacy
from blamepipeline import tokenizers
try:
from spacy.spacy_tokenizer import SpacyTokenizer
except ImportError:
pass
DEFAULTS = {
}
def set_default(key, value):
global DEFAULTS
DEFAULTS[key] = value
from blamepipeline.tokenizers.corenlp_tokenizer import CoreNLPTokenizer
# Spacy is optional
def get_class(name):
if name == 'spacy':
return SpacyTokenizer
if name == 'corenlp':
return CoreNLPTokenizer
raise RuntimeError('Invalid tokenizer: %s' % name)
```
#### File: script/simplebaseline/run.py
```python
import argparse
import json
import os
import sys
import logging
import subprocess
from collections import defaultdict
from termcolor import colored
import random
import numpy as np
from tqdm import tqdm
import torch
from blamepipeline import DATA_DIR as DATA_ROOT
from blamepipeline.simplebaseline import BaselineModel
from blamepipeline.simplebaseline import utils, config
logger = logging.getLogger()
# ------------------------------------------------------------------------------
# Training arguments.
# ------------------------------------------------------------------------------
# Defaults
DATA_DIR = os.path.join(DATA_ROOT, 'datasets')
LOG_DIR = os.path.join(DATA_ROOT, 'models/simplebaseline')
def str2bool(v):
return v.lower() in ('yes', 'true', 't', '1', 'y')
def add_train_args(parser):
"""Adds commandline arguments pertaining to training a model. These
are different from the arguments dictating the model architecture.
"""
parser.register('type', 'bool', str2bool)
# Runtime environment
runtime = parser.add_argument_group('Environment')
runtime.add_argument('--random-seed', type=int, default=712,
help=('Random seed for all numpy/torch/cuda '
'operations (for reproducibility)'))
# Files
files = parser.add_argument_group('Filesystem')
files.add_argument('--data-dir', type=str, default=DATA_DIR,
help='Directory of training/validation data')
files.add_argument('--log-dir', type=str, default=LOG_DIR,
help='Directory for saved models/checkpoints/logs')
files.add_argument('--train-file', type=str, default='samples-directed-train.json',
help='train file')
files.add_argument('--dev-file', type=str, default='samples-directed-dev.json',
help='dev file')
files.add_argument('--test-file', type=str, default='samples-directed-test.json',
help='test file')
files.add_argument('--aggressiveness-file', type=str, default='aggressiveness.txt')
files.add_argument('--blame-lexicons', type=str, default='blame_lexicons.txt')
# General
general = parser.add_argument_group('General')
general.add_argument('--metrics', type=str, choices=['precision', 'recall', 'F1', 'acc'],
help='metrics to display when training', nargs='+',
default=['precision', 'recall', 'F1', 'acc'])
general.add_argument('--valid-metric', type=str, default='F1',
help='The evaluation metric used for model selection')
def set_defaults(args):
"""Make sure the commandline arguments are initialized properly."""
# Check critical files exist
args.train_file = os.path.join(args.data_dir, args.train_file)
if not os.path.isfile(args.train_file):
raise IOError(f'No such file: {args.train_file}')
if args.dev_file:
args.dev_file = os.path.join(args.data_dir, args.dev_file)
if not os.path.isfile(args.dev_file):
raise IOError(f'No such file: {args.dev_file}')
if args.test_file:
args.test_file = os.path.join(args.data_dir, args.test_file)
if not os.path.isfile(args.test_file):
raise IOError(f'No such file: {args.test_file}')
if args.blame_lexicons:
args.blame_lexicons = os.path.join(args.data_dir, args.blame_lexicons)
if not os.path.isfile(args.blame_lexicons):
raise IOError(f'No such file {args.blame_lexicons}')
# Set log file names
# Set model directory
subprocess.call(['mkdir', '-p', args.log_dir])
args.log_file = os.path.join(args.log_dir, 'baseline.txt')
if args.aggressiveness_file:
args.aggressiveness_file = os.path.join(args.data_dir, args.aggressiveness_file)
return args
def evaluate(pred, true, eps=1e-9):
true_positive = (pred * true).sum().item()
precision = true_positive / (pred.sum().item() + eps)
recall = true_positive / (true.sum().item() + eps)
F1 = 2 * (precision * recall) / (precision + recall + eps)
acc = (pred == true).sum().item() / len(pred)
return {'precision': precision, 'recall': recall, 'F1': F1, 'acc': acc}
def validate(args, data_loader, model, mode):
"""Run one full validation.
"""
eval_time = utils.Timer()
# Make predictions
examples = 0
preds = []
trues = []
for ex in tqdm(data_loader, total=len(data_loader), desc=f'validate {mode}'):
batch_size = len(ex[-1])
inputs = ex[:-1]
pred = model.predict(inputs)
true = ex[-1]
preds += pred
trues += true
# If getting train accuracies, sample max 10k
examples += batch_size
if mode == 'train' and examples >= 1e4:
break
metrics = evaluate(np.array(preds), np.array(trues))
logger.info(f'{mode} valid: ' +
f'examples = {examples} | valid time = {eval_time.time():.2f} (s).')
logger.info(' | '.join([f'{k}: {metrics[k]*100:.2f}%' for k in metrics]))
return {args.valid_metric: metrics[args.valid_metric]}
# ------------------------------------------------------------------------------
# Main.
# ------------------------------------------------------------------------------
def main(args):
# --------------------------------------------------------------------------
# DATA
logger.info('-' * 100)
logger.info('Load data files')
train_exs = utils.load_data(args.train_file)
logger.info(f'Num train examples = {len(train_exs)}')
if args.dev_file:
dev_exs = utils.load_data(args.dev_file)
logger.info(f'Num dev examples = {len(dev_exs)}')
else:
dev_exs = []
logger.info('No dev data. Randomly choose 10% of train data to validate.')
if args.test_file:
test_exs = utils.load_data(args.test_file)
logger.info(f'Num test examples = {len(test_exs)}')
else:
test_exs = []
logger.info('No test data. Use 10 fold cv to evaluate.')
logger.info(f'Total {len(train_exs) + len(dev_exs) + len(test_exs)} examples.')
logger.info(f'Loading blame lexicons from {args.blame_lexicons}...')
with open(args.blame_lexicons) as f:
lexicons = [w.strip().lower() for w in f.read().strip().split(' or ')]
logging.info(f'{len(lexicons)} blame lexicons loaded.')
# -------------------------------------------------------------------------
# PRINT CONFIG
logger.info('-' * 100)
logger.info('CONFIG:\n%s' %
json.dumps(vars(args), indent=4, sort_keys=True))
# --------------------------------------------------------------------------
# DATA ITERATORS
logger.info('-' * 100)
logger.info('Make data loaders')
if args.test_file:
aggressiveness = {}
if args.aggressiveness_file:
with open(args.aggressiveness_file) as f:
for line in f:
entity, score = line.split(':')
score = float(score)
aggressiveness[entity] = score
model = BaselineModel(config.get_model_args(args), lexicons, aggressiveness=aggressiveness)
train_loader, dev_loader, test_loader = utils.split_loader(train_exs, test_exs, args, model,
dev_exs=dev_exs)
# Validate train
validate(args, train_loader, model, mode='train')
# Validate dev
validate(args, dev_loader, model, mode='dev')
# validate test
result = validate(args, test_loader, model, mode='test')
logger.info('-' * 100)
logger.info(f'Test {args.valid_metric}: {result[args.valid_metric]*100:.2f}%')
else:
# 10-cross cv
results = []
samples_fold = [np.random.randint(10) for _ in range(len(train_exs))]
fold_samples = defaultdict(list)
for sample_idx, sample_fold in enumerate(samples_fold):
fold_samples[sample_fold].append(sample_idx)
model = BaselineModel(config.get_model_args(args), lexicons)
for fold in range(10):
fold_info = f'for fold {fold}' if fold is not None else ''
logger.info(colored(f'Starting training {fold_info}...', 'blue'))
train_loader, dev_loader = utils.split_loader_cv(
train_exs, args, model, fold_samples[fold])
result = validate(args, dev_loader, model, mode='dev')
results.append(result[args.valid_metric])
# logger.debug(colored('DEBUG: Run for 1 folds. Stop.', 'red'))
# break
result = np.mean(results).item()
logger.info('-' * 100)
logger.info(f'CV {args.valid_metric}: {result*100:.2f}%')
if __name__ == '__main__':
# Parse cmdline args and setup environment
parser = argparse.ArgumentParser(
'Run Blame Extractor Baseline',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
add_train_args(parser)
config.add_model_args(parser)
args = parser.parse_args()
set_defaults(args)
# Set random state
random.seed(args.random_seed)
np.random.seed(args.random_seed)
torch.manual_seed(args.random_seed)
# Set logging
logger.setLevel(logging.DEBUG)
fmt = logging.Formatter('%(asctime)s: [ %(message)s ]',
'%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
if args.log_file:
logfile = logging.FileHandler(args.log_file, 'w')
logfile.setFormatter(fmt)
logger.addHandler(logfile)
logger.info('COMMAND: %s' % ' '.join(sys.argv))
# Run!
main(args)
```
#### File: venv/bin/pydev.py
```python
import argparse
import logging
import subprocess
import os
from os import path
from pathlib import Path
import glob
from typing import List
import re
import sys
from pydantic import BaseModel
import colorama
class Plugin(BaseModel):
path: str
name: str
url: str = None
description: str = None
def docker_works() -> bool:
logging.debug('running docker --help to make sure docker is installed')
process = subprocess.run("docker --help",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
logging.debug(f'docker --help return code: %s', process.returncode)
return process.returncode == 0
def get_context_info() -> dict:
return {
'PYDEV_PROJECT_ROOT': os.getcwd()
}
def stop_plugin(plugin: Plugin):
logging.debug(f'Shutting down plugin: {plugin.name}')
env = {**os.environ.copy(), **get_context_info()}
process = subprocess.run(f"docker-compose -f {plugin.path} down --remove-orphans",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
logging.debug('shutdown complete with return code: %s', process.returncode)
if process.returncode != 0:
logging.debug('stdout: %s', process.stdout)
logging.error(process.stderr)
def start_plugin(plugin: Plugin):
logging.debug(f'Starting plugin: {plugin.name}')
env = {**os.environ.copy(), **get_context_info()}
process = subprocess.run(f"docker-compose -f {plugin.path} up -d",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
logging.debug('start complete with return code: %s', process.returncode)
if process.returncode != 0:
logging.debug('stdout: %s', process.stdout)
logging.error(process.stderr)
def get_plugins(plugins_dir: str) -> List[Plugin]:
logging.debug('Searching for plugins in %s', plugins_dir)
plugin_files = glob.glob(path.join(plugins_dir, '*.yml'))
plugin_files = plugin_files + glob.glob(path.join(plugins_dir, '*.yaml'))
plugin_files = list(set(plugin_files))
logging.debug('Plugin files: %s', plugin_files)
plugins = []
for plugin_file in plugin_files:
name, ext = os.path.splitext(os.path.basename(plugin_file))
comment_lines = []
with open(plugin_file) as f:
for line in f:
if line.startswith('#'):
comment_lines.append(line)
else:
break
url = None
description = None
for line in comment_lines:
matches = re.findall(r'^\s*#\s*url:\s*(.*?)\s*$', line)
if matches and len(matches) == 1:
url = matches[0]
matches = re.findall(r'^\s*#\s*description:\s*(.*?)\s*$', line)
if matches and len(matches) == 1:
description = matches[0]
plugins.append(Plugin(
name=name,
path=plugin_file,
url=url,
description=description
))
return plugins
def docker_compose_works() -> bool:
logging.debug('running docker-compose --help to make sure it is installed')
process = subprocess.run("docker-compose --help",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
logging.debug(f'docker-compose --help return code: %s', process.returncode)
return process.returncode == 0
def main(plugins_dir: str, verify: bool = False, *args, **kwargs):
if verify:
if not docker_works():
logging.error('Docker does not seem to be installed, terminating')
exit(1)
if not docker_compose_works():
logging.error('Docker compose does not seem to be installed, terminating')
exit(1)
logging.debug('Creating plugins dir at %s', plugins_dir)
os.makedirs(plugins_dir, exist_ok=True)
plugins = get_plugins(plugins_dir)
logging.debug('Found %s plugins', len(plugins))
colorama.init(autoreset=True)
for plugin in plugins:
sys.stdout.write(f'Stopping {plugin.name}... ')
stop_plugin(plugin)
print(colorama.Fore.GREEN + 'Done')
logging.debug('Plugin %s stopped', plugin.name)
for plugin in plugins:
sys.stdout.write(f'Starting {plugin.name}... ')
start_plugin(plugin)
print(colorama.Fore.GREEN + 'Done')
print('pydev is up and running!')
for plugin in plugins:
if plugin.url:
print(f'\t{plugin.name} - {plugin.url}')
else:
print(f'\t{plugin.name}')
if plugin.description:
print(f'\t\t{plugin.description}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
prog='pydev',
description='''
A tool for deploying a local containerised python development environment, with a
full web interface''',
epilog='''
Notes:
* You need to have docker installed (You can use Docker for windows as well)
* You need to install docker-compose
'''
)
parser.add_argument('-p', '--plugins',
dest='plugins_dir',
type=str,
help='The path to the pydev plugins directory, containing docker-compose files',
required=False,
default=path.join(Path.home(), '.pydev'))
parser.add_argument('--no-verify',
action='store_false',
dest='verify',
default=True,
help='If specified the installation of docker & docker-compose will not be verified')
parser.add_argument('-l', '--log-level',
dest='log_level',
choices=['DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'],
default='INFO',
help='Sets the log level')
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=getattr(logging, args.log_level))
logging.debug(f'args: {vars(args)}')
main(**vars(args))
``` |
{
"source": "jmmaloney3/gapstat",
"score": 3
} |
#### File: gapstat/src/gapstat.py
```python
import math
import numpy as np
from random import randint
from sklearn.base import BaseEstimator, ClusterMixin, TransformerMixin
from sklearn.cluster import KMeans
from sklearn.exceptions import NotFittedError
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import check_array
from sklearn.utils import check_X_y
from sklearn.utils.validation import column_or_1d
from sklearn.utils.validation import check_is_fitted
class GapStatClustering(BaseEstimator, ClusterMixin, TransformerMixin):
"""A clusterer that uses the gap statistic to estimate the optimal
number of clusters.
For details on the gap statistic method for estimating the optimal
number of clusters see [1]_.
Parameters
----------
base_clusterer : object or None, optional (default=None)
The base clusterer to use to cluster the data.
If None, then the base clusterer is K-Means.
max_k : int, optional, default: 10
The maximum number of clusters to consider when estimating the
optimal number of clusters for the data set.
B1 : int, optional, default: 10
The number of null reference data sets that are generated and
clustered in order to estimate the optimal number of clusters
for the data set.
B2 : int, optional, default: 1
The number of times the input data set is clustered in order to
estimate the average pooled with-in cluster sum of squares. This
can be used to improve the stability of the results.
Attributes
----------
n_clusters_ : int
The estimate of the optimal number of clusters identified using
the gap statistic method.
labels_ :
Labels of each point
Examples
--------
>>> from gapstat import GapStatClustering
>>> from sklearn.cluster import AgglomerativeClustering
>>> from sklearn.datasets import make_blobs
>>> X,_ = make_blobs(n_samples=16, centers=[[4,4],[-4,4],[-4,-4],[4,-4]],
... n_features=2, random_state=2)
>>>
>>> gstat_km = GapStatClustering(max_k=5).fit(X)
>>> gstat_km.n_clusters_
4
>>> gstat_km.labels_
array([0, 0, 3, 1, 2, 0, 3, 2, 2, 1, 3, 0, 1, 2, 1, 3])
>>> gstat_km.predict([[-3, -3], [3, 3]])
array([4, 3], dtype=int32)
>>>
>>> gstat_ac = GapStatClustering(base_clusterer=AgglomerativeClustering(),
... max_k=5).fit(X)
>>> gstat_ac.n_clusters_
4
>>> gstat_ac.labels_
array([3, 3, 2, 0, 1, 3, 2, 1, 1, 0, 2, 3, 0, 1, 0, 2])
References
----------
.. [1] <NAME>. , <NAME>. and <NAME>. (2001), Estimating the
number of clusters in a data set via the gap statistic. Journal of
the Royal Statistical Society: Series B (Statistical Methodology),
63: 411-423. doi:10.1111/1467-9868.00293
"""
def __init__(self,
base_clusterer=None,
max_k=10,
B1=10,
B2=1):
# create default base clusterer if necessary
self.base_clusterer = _check_clusterer(base_clusterer)
self.max_k = max_k
self.B1 = B1
self.B2 = B2
def fit(self, X, y=None):
"""Compute the clustering. The gap statistic method is used to estimate
the optimal number of clusters.
TO DO: allow optional fit parameters to be passed to the base clusterer
Parameters
----------
X : array-like, sparse matrix or dataframe,shape=[n_samples,n_features]
The observations to cluster.
y : Ignored
not used, present here for API consistency by convention.
Raises
------
NotFittedError
If the data set contains more clusters than k_max.
"""
n_clusters, labels = \
gapstat(X, clusterer=self.base_clusterer,
max_k=self.max_k, B1=self.B1, B2=self.B2)
if ((n_clusters is None) | (labels is None)):
msg = "The estimated optimal number of clusters is greater than " \
"max_k=%d"
raise NotFittedError(msg % self.max_k)
else:
self.n_clusters_, self.labels_ = (n_clusters, labels)
return self
def fit_predict(self, X, y=None):
"""Compute the clustering and return the cluster label for each
observation.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
TO DO: allow optional fit parameters to be passed to the base clusterer
Parameters
----------
X : array-like, sparse matrix or dataframe,shape=[n_samples,n_features]
The observations to cluster.
y : Ignored
not used, present here for API consistency by convention.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
Raises
------
NotFittedError
If the data set contains more clusters than k_max.
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute the clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
If the base clusterer does not implement the trnsform() method then
X is return untransformed.
TO DO: allow optional fit parameters to be passed to the base clusterer
Parameters
----------
X : array-like, sparse matrix or dataframe,shape=[n_samples,n_features]
New data to transform.
y : Ignored
not used, present here for API consistency by convention.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
Raises
------
NotFittedError
If the data set contains more clusters than k_max.
AttributeError
If the base_clusterer does not implement transform().
"""
# make sure the base cluster implements transform()
# -- raises AttributeError if it doesn't
getattr(self.base_clusterer, 'transform')
# fit the data and then call transform
return self.fit(X).transform(X)
def transform(self, X):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
If the base clusterer does not implement the trnsform() method then
X is return untransformed.
Parameters
----------
X : array-like, sparse matrix or dataframe,shape=[n_samples,n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
Raises
------
NotFittedError
If the estimator has not been fitted to a data set.
AttributeError
If the base_clusterer does not implement transform().
"""
check_is_fitted(self)
# call transform on the base clusterer
return self.base_clusterer.transform(X)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : array-like, sparse matrix or dataframe,shape=[n_samples,n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
Raises
------
NotFittedError
If the estimator has not been fitted to a data set.
AttributeError
If the base_clusterer does not implement predict().
"""
check_is_fitted(self)
# call predict on the base clusterer
return self.base_clusterer.predict(X)
def gapstat(X, clusterer=None, max_k=10, B1=10, B2=1, calcStats=False):
"""Gap statistic clustering algorithm. Uses the gap statistic method
to estimate the optimal number of clusters and uses that estimate
to generate a clustering.
TO DO: Provide a way to pass additionl parameters to the base clusterer.
Parameters
----------
X : array-like, sparse matrix or dataframe, shape (n_samples, n_features)
The observations to cluster.
clusterer : object or None, optional (default=None)
The base clusterer to use to cluster the data.
If None, then the base clusterer is K-Means.
max_k : int, optional, default: 10
The maximum number of clusters to consider when estimating the
optimal number of clusters for the data set.
B1 : int, optional, default: 10
The number of null reference data sets that are generated and
clustered in order to estimate the optimal number of clusters
for the data set.
B2 : int, optional, default: 1
The numbe of times the input data set is clustered in order to
estimate the average pooled with-in cluster sum of squares. This
can be used to improve the stability of the results.
calcStats : boolean, optional, default: False
Calculate and return the statistics for all values of k from
1 through max_k. The statistics include W, log(W), log(W*),
gap and standard error. Otherwise, stop when the estimated optimal
k is determined and only return n_clusters and labels.
Returns
-------
n_clusters : int
The estimate of the optimal number of clusters identified using the
gap statistic method.
labels : int array, shape = [n_samples]
The labels identifying the cluster that each sample belongs to.
label[i] is the index of the cluster for the i-th observation. The
clustering includes n_clusters clusters.
stats : dict, optional
When calcStats is true, the statistics are returned in a dictionary
with three entries: data, index and columns. The data entry is a numpy
two-dimensional array that includes the statistics described below.
The index and columns entries provide additional information that can
be used to create a pandas dataframe containing the statistics. Each
row of the data matrix provides the following statistics for each value
of k considered:
W : The mean pooled within-cluter sum of squares around the cluster
means for the input data set. The value returned for each value of
k is the mean of B2 clusterings.
log(W) : the logarithm of W (see above)
log(W*) : The expectation of log(W) under an appropriate null reference
distribution of the data. This is calculated as the mean log
pooled within-cluter sum of squares around the cluster means
for B2 generated null reference data sets.
Gap : The gap statistic calculated as log(W*) - log(W).
Std Err : The standard error of log(W*).
Examples
--------
>>> from gapstat import gapstat
>>> from sklearn.cluster import AgglomerativeClustering
>>> from sklearn.datasets import make_blobs
>>>
>>> X,_ = make_blobs(n_samples=16, centers=[[4,4],[-4,4],[-4,-4],[4,-4]],
... n_features=2, random_state=2)
>>>
>>> k, labels = gapstat(X, clusterer=AgglomerativeClustering(),
... max_k=5)
>>> k
4
>>> labels
array([3, 3, 2, 0, 1, 3, 2, 1, 1, 0, 2, 3, 0, 1, 0, 2])
"""
# validate input parameters
if max_k <= 0: # TO DO: also check if it is an integer
raise ValueError("Maximum number of clusters to consider should be "
"a positive integer, got %d instead" % max_k)
if B1 <= 0: # TO DO: also check if it is an integer
raise ValueError("The number of null reference data sets to generate "
"should be a positive integer, got %d instead" % B1)
if B2 <= 0: # TO DO: also check if it is an integer
raise ValueError("The number of times to cluster the data set to find "
"a stable W value should be a positive integer, got "
"%d instead" % B2)
# check the clusterer and create a default clusterer if necessary
clusterer = _check_clusterer(clusterer)
# to determine whether a particular value of k is optimal
# requires calculating the gap statistic for k+1, so
# interate through all values of k up to max_k+1
# check that the number of samples is consistent with (max_k+1)
X, _, _ = _check_inputs(X=X, k=max_k+1)
# create arrays to hold statistics
# -- "pooled within-cluster sum of squares around cluster means"
W = np.zeros(max_k+1)
log_W = np.empty(max_k+1)
log_W[:] = np.nan
# -- "expected W_k under a null reference distribution of the data"
log_W_star = np.empty(max_k+1)
log_W_star[:] = np.nan
# -- the gap statistic
gap = np.empty(max_k+1)
gap[:] = np.nan
# -- standard error
s = np.empty(max_k+1)
s[:] = np.nan
# -- labels for each value of k
labels = np.full((max_k+1, X.shape[0]), -1) # labels for each b
# -- the estimated optimal number of clusters
k_hat = None # if max_k is too small then k_hat will be None
for k in range(max_k+1):
# calculate W and log(W)
# -- k is zero-basd iterator, num clusters is one greater
W[k], log_W[k], labels[k, :] = _calc_W(X, k+1,
clusterer=clusterer, B=B2)
# calculate log(W*) and the standard error
# -- k is zero-basd iterator, num clusters is one greater
log_W_star[k], s[k] = _calc_exp_W(X, k+1, clusterer=clusterer, B=B1)
# calculate the gap statistic for k
gap[k] = log_W_star[k] - log_W[k]
# if W for ref data is less than W for input matrix
# then set gap to zero and see if adding more clusters
# reduces the value of W for the input matrix
if (gap[k] < 0):
gap[k] = 0
# determine whether the previous value of k is the estimated optimal
# number of clusters
# -- (1) make sure the optimal has not been found
# -- (2) make sure there is a previous value (k-1) for comparison
# -- (3) make sure clustering of X is actually better than the
# -- clustering of null ref data
# -- (4) use gap statistic to determine if optimal k has been found
if ((k_hat is None) & # (1)
(k > 0) & # (2)
(gap[k-1] != 0) & # (3)
(gap[k-1] >= (gap[k] - s[k]))): # (4)
# found an estimate of the optimal number of clusters!
# -- # k is zero-based iteration index, num of clusters is +1
k_hat = k # previous value of k is the estimate: ((k-1)+1) = k
# if we are not calculating statistics then stop
if (not calcStats):
break
# -- end for k
# fit the clusterer using the estimated optimal k &
# identify labels for optimal k
if (k_hat is not None):
# fit the clusterer using k_hat as the number of clusters
clusterer.set_params(n_clusters=k_hat)
k_hat_labels = clusterer.fit_predict(X)
else:
k_hat_labels = None
# return the results
if (calcStats):
stats = {}
# create array of k values (index)
stats["index"] = np.arange(1,max_k+2)
# create an array of column headers (columns)
stats["columns"] = np.array(["W", "log(W)", "log(W*)", "Gap", "Std Err"])
# create a multi-dimensional array with the statistics (data)
stats["data"] = np.stack((W, log_W, log_W_star, gap, s), axis=1)
return k_hat, k_hat_labels, stats
else:
return k_hat, k_hat_labels
# end function
def gapstat_score(X, labels, k=None, clusterer=None, B=10, calcStats=False):
"""Compute the gap statistic score (metric) for the given clustering.
The gap statistic is the difference between the log of the pooled
within-cluster sum of squares for the candiate clustering and the
expectation of that value under an apprpriate null reference
distribution.
For more details on the gap statistic see [1]_.
Parameters
----------
X : array [n_samples_a, n_features]
The observations that were clustered.
labels : array, shape = [n_samples]
Predicted labels for each observation.
k : int, optional, default: None
The number of clusters in the clustering. If set to None then the
number of clusters will be calculated based on the supplied labels.
clusterer : object or None, optional (default=None)
The clusterer to use to cluster the null referece data sets.
If None, then the base clusterer is K-Means.
B : int, optional, default: 10
The number of null reference data sets that are generated and
clustered in order to estimate the optimal number of clusters
for the data set.
calcStats : boolean, optional, default: False
Calculate and return the underlying statistics used to calculate
the gap statistic score. The statistics include W, log(W), log(W*),
and standard error. Otherwise, only the gap statistic score is
returned.
Returns
-------
gap : float
The value of the gap statistic for the clustering.
W : float, optional
The mean pooled within-cluter sum of squares around the cluster means
for the provided clustering. This is only returned when calcStats is
True.
log_W : float, optional
log(W). This is only returned when calcStats is True.
log_W_star : float, optional
The expectation of log(W) under an appropriate null reference
distribution of the data. This is calculated as the mean log pooled
within-cluter sum of squares around the cluster means for B generated
null reference data sets. This is only returned when calcStats is
True.
s : float, optional
The standard error of log(W*). This is only returned when calcStats
is True.
Examples
--------
>>> from gapstat import gapstat
>>> from sklearn.cluster import AgglomerativeClustering
>>> from sklearn.datasets import make_blobs
>>>
>>> X,_ = make_blobs(n_samples=16, centers=[[4,4],[-4,4],[-4,-4],[4,-4]],
... n_features=2, random_state=2)
>>>
>>> ac = AgglomerativeClustering().fit(X)
>>> gapstat_score(X, ac.labels_)
-0.6028585939536981
References
----------
.. [1] <NAME>. , <NAME>. and <NAME>. (2001), Estimating the
number of clusters in a data set via the gap statistic. Journal of
the Royal Statistical Society: Series B (Statistical Methodology),
63: 411-423. doi:10.1111/1467-9868.00293
"""
if B <= 0: # TO DO: also check if it is an integer
raise ValueError("The number of null reference data sets to generate "
"should be a positive integer, got %d instead" % B)
# check that the inputs are valid and consistent
X, labels, k = _check_inputs(X=X, y=labels, k=k)
# check the clusterer and create a default clusterer if necessary
clusterer = _check_clusterer(clusterer)
# calculate W for supplied clustering
W = _pooled_within_cluster_sum_of_squares(X, labels, k)
log_W = _safeLog(W)
# calculate log(W*) and standard error
log_W_star, s = _calc_exp_W(X, k, clusterer, B)
# calculate the gap statistic for the clustering
gap = log_W_star - log_W
if (calcStats):
return gap, W, log_W, log_W_star, s
else:
return gap
def _calc_W(X, k, clusterer=None, B=1):
"""Calculate the expected pooled within-in cluster sum of squares
for the data set and the specified number of clusters k.
Parameters
----------
X : array [n_samples_a, n_features]
The observations that were clustered.
k : int
The number of clusters to use when clustering the data sets.
clusterer : object or None, optional (default=None)
The clusterer to use to cluster the data set. If None, then
the clusterer is K-Means.
B : int, optional, default: 10
The number of times the data set should be clustered in order to
determine an average pooled within cluster sum of squares. This
helps smooth out random differences introduced by random starting
states for some clusterers.
Returns
-------
W : float
The mean pooled with-in cluster sum of squares for the B
clusterings that were generated.
log_W : float
The mean log(W) for the B clusterings that were generated
labels : array [n_samples]
The mode of the labels generated for each of the B clusterings
that were generated
"""
# check the clusterer and create a default clusterer if necessary
clusterer = _check_clusterer(clusterer)
# handle degenerate case when there is 1 sample per cluster
if (k == X.shape[0]):
# return:
# -- W: one sample per cluster, so W is zero
# -- log(W): log(0) is undefined, return NaN
# -- labels: return unique label for each sample
return 0.0, np.nan, np.array(range(k))
# cluster the data set B times and calculate the average W
# arrays to hold stats for the B iterations
W = np.zeros(B)
log_W = np.empty(B)
log_W[:] = np.nan
labels = np.full((B, X.shape[0]), -1) # labels for each b
# set the number for clusters in the clustered data set
clusterer.set_params(n_clusters=k)
for b in range(B):
# generate clusters
labels[b, :] = clusterer.fit_predict(X)
# calculate W and log(W) for the b-th iteration
W[b] = _pooled_within_cluster_sum_of_squares(X, labels[b, :], k)
log_W[b] = _safeLog(W[b])
# -- end for b
# find the mean of W and log(W) for the B clusterings
avg_W = np.sum(W)/B
avg_log_W = np.sum(log_W)/B
# randomly select one of the clusterings to return
i = randint(0, B-1)
ret_labels = labels[i, :]
return avg_W, avg_log_W, ret_labels
def _calc_exp_W(X, k, clusterer=None, B=10):
"""Calculate the expected pooled within-in cluster sum of squares
for the null reference disribution for the data set and the
specified number of clusters k.
Parameters
----------
X : array [n_samples, n_features]
The observations that were clustered.
k : int
The number of clusters to use when clustering the null ref
data sets.
clusterer : object or None, optional (default=None)
The clusterer to use to cluster the null referece data sets.
If None, then the clusterer is K-Means.
B : int, optional, default: 10
The number of null reference data sets that are generated and
clustered in order to calculate the null reference pooled within
cluster sum of squares.
Returns
-------
log_W_star : float
The expected pooled within-in cluster sum of squares for the null
reference disribution.
std_err : float
The standard error for the means of the B null reference data sets
"""
# check the clusterer and create a default clusterer if necessary
clusterer = _check_clusterer(clusterer)
n_samples, n_features = X.shape
# handle degenerate case when there is 1 sample per cluster
if (k == n_samples):
# return:
# -- log(W*): W* is 0, log(0) is undefined, return NaN
# -- standard error: return NaN
return np.nan, np.nan
# calculate min & max for samples
X_min = X.min(axis=0)
X_max = X.max(axis=0)
# generate B null ref sets, custer each set and calcualte the statistic
# arrays to hold stats for the B null ref data sets for current k
null_ref_W = np.zeros(B) # value of W* for the B null ref sets
log_null_ref_W = np.empty(B) # value of log(W*) for the B null ref sets
log_null_ref_W[:] = np.nan
# set the number for clusters in the clustered data set
clusterer.set_params(n_clusters=k)
for b in range(B):
# generate a new "null reference data set"
null_ref = _gen_null_ref(n_samples, n_features, X_min, X_max)
# generate clusters for the "null reference data set"
labels = clusterer.fit_predict(null_ref)
# calculate W* and log(W*) for the b-th null reference data set
null_ref_W[b] = _pooled_within_cluster_sum_of_squares(null_ref,
labels, k)
log_null_ref_W[b] = _safeLog(null_ref_W[b])
# -- end for b
# end generation and clustering of B null ref data sets
# find the mean of log(W*) for the B ref data set samples
log_W_star = np.sum(log_null_ref_W)/B # log(W*) (aka, l_bar)
# calculate the standard deviation
sd = math.sqrt(np.mean(np.power(log_null_ref_W - log_W_star, 2)))
# calculate the standard error
s = sd*math.sqrt(1 + 1/B)
return log_W_star, s
def _gen_null_ref(n_samples, n_features, f_min, f_max):
"""Generate a data set with the specified number of samples and
values for features chosen from a uniform distributions with the
specified minimum and maximum values.
Parameters
----------
n_samples : int
The number of samples to generate.
n_features : int
The number of features to generate.
f_min : arrray, float [n_features]
The minimum values for the features.
f_max : array, float [n_features]
The maximum value for the features.
Returns
-------
null_ref : array, float [ n_sample, n_features ]
The generated samples.
"""
# create 2D array to hold null reference data set
null_ref = np.empty((n_samples, n_features))
null_ref[:] = np.nan
# generate a "null reference data set"
for f in range(n_features):
null_ref[:, f] = np.random.uniform(low=f_min[f], high=f_max[f],
size=n_samples)
# null ref set generated ---
return null_ref
def _check_clusterer(clusterer=None):
"""Check that the clusterer is a valid clusterer (it implements
the required methods). If no cluster is provided, create
a default clusterer.
Parameters
----------
clusterer : object or None, optional (default=None)
The clusterer to use to cluster the data sets.
If None, then the clusterer is K-Means.
Returns
-------
clusterer : object
The supplied clusterer or a default clusterer if none was provided.
"""
if (clusterer is None): # create default clusterer if necessary
# default Cluster is KMeans
clusterer = KMeans()
else:
# make sure base clusterer implements set_params()
getattr(clusterer, 'set_params')
# make sure base clusterer implements fit_predict()
getattr(clusterer, 'fit_predict')
# make sure base clusterer has n_clusters attribute
getattr(clusterer, 'n_clusters')
return clusterer
def _check_inputs(X=None, y=None, k=None):
"""Input validation for gapstat.
Depending on the inputs provided, use one or more of the following
validation utilities to validate the inputs:
sklearn.utils.check_array()
sklearn.utils.check_X_y()
sklearn.utils.validation.column_or_1d()
In addition, if k is provided, validate the following:
0 < k <= X.shape[0]
k == number of unique value in y
Parameters
----------
X : array [n_samples, n_features], optional
The data set to be clustered
y : array-like, shape=[n_samples], optional
The labels identifying the cluster that each sample belongs to.
k : int
The number of clusters.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
k_validated: int
The calculated or validated k.
"""
if (X is None) & (y is None):
raise ValueError("One of 'X' or 'y' must not be 'None'")
n_labels = None
if (y is not None):
le = LabelEncoder()
y = le.fit_transform(y)
n_labels = len(le.classes_)
if (X is not None) & (y is not None):
X, y = check_X_y(X, y)
if (k is not None):
if (not 0 < k <= X.shape[0]):
raise ValueError("Number of clusters (k) is %d. Valid values "
"are 1 to n_samples (inclusive)" % k)
if (n_labels != k):
raise ValueError("Number of unique labels (%d) does not equal "
"the number of clusters (k=%d)."
% (n_labels, k))
else: # (k is None)
k = n_labels
if (X is not None) & (y is None):
X = check_array(X)
if (k is not None) & (not 0 < k <= X.shape[0]):
raise ValueError("Number of clusters (k) is %d. Valid values "
"are 1 to n_samples=%d (inclusive)"
% (k, X.shape[0]))
if (X is None) & (y is not None):
y = column_or_1d(y)
if (k is not None) & (n_labels != k):
raise ValueError("Number of unique labels (%d) does not equal the "
"number of clusters (k=%d)." % (n_labels, k))
else:
k = n_labels
return X, y, k
def _pooled_within_cluster_sum_of_squares(X, labels, k):
"""Calculate the pooled within-cluster sum of squares (W) for
the clustering defined by the specified labels.
Parameters
----------
X : array-like, sparse matrix or dataframe, shape=[n_samples, n_features]
The observations that were clustered.
labels : array-like, shape=[n_samples]
The labels identifying the cluster that each sample belongs to.
k: integer
The number of unique labels - number of clusters.
"""
n_samples, _ = X.shape
# initialize W to zero
W = 0
# -- iterate over the clusters and calculate the pairwise distances for
# -- the points
for c_label in range(k):
c_k = X[labels == c_label]
d_k = euclidean_distances(c_k, c_k, squared=True)
n_k = len(c_k)
# multiply by 0.5 because each distance is included in the sum twice
W = W + (0.5*d_k.sum())/(2*n_k)
# return the result
return W
# end pooled_within_cluster_sum_of_squares
def _safeLog(x):
"""Return the log of the specified number. If the number
is zero (or close to zero) return NaN.
Parameters
==========
x : float
Returns
=======
log_x : float
The value of log(x) or np.nan if x is zero.
"""
if (math.isclose(x, 0.0)):
# return a very small number
return np.nan
else:
return math.log(x)
def _get_column_indices(X):
"""Get the column indices for the input matrix. Determines the data
type of the input matrix and uses the appropriate method to retrieve
the column indices.
"""
try:
# this will fail if X is not a pandas DataFrame
return list(X.columns)
except AttributeError:
pass
# X is an array-like
return list(range(X.shape[1]))
# -- end function
``` |
{
"source": "jmmaloney4/renovate-helm-releases",
"score": 2
} |
#### File: jmmaloney4/renovate-helm-releases/test_renovate.py
```python
from click.testing import CliRunner
from renovate import cli
def test_renovate():
runner = CliRunner()
result = runner.invoke(cli, ['--cluster-path', './tests/'])
assert result.exit_code == 0
if __name__ == '__main__':
test_renovate()
``` |
{
"source": "jmmarino723/pyprotocol",
"score": 3
} |
#### File: jmmarino723/pyprotocol/Parser.py
```python
import logging
from time import sleep
from queue import Queue
from protocol.package.package import Package
def worker(input_queue: Queue, ouput_queue: Queue):
while True:
payload = input_queue.get()
pack = Package(payload=payload, size=payload.length())
logging.info(f"Package added to Queue {pack.dict()}")
ouput_queue.put(pack.dict())
``` |
{
"source": "Jmmaroli/eqn-gen",
"score": 2
} |
#### File: eqn-gen/lib/analyze_model.py
```python
import time
import itertools
import multiprocessing
import numpy as np
from joblib import Parallel, delayed
from scipy.optimize import curve_fit
import pyprind # Progress bar
import torch
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import mat4py
import os
from lib.evaluate_function import evaluate_function
# Format of function parameters.
FORMAT = '%.3e'
def analyze_model(analysis_parameters, model_dictionary, input_data, output_data, input_mask=1):
functions = analysis_parameters["functions"]
sweep_initial = analysis_parameters["sweep_initial"]
sweep_detailed = analysis_parameters["sweep_detailed"]
contrib_thresh = analysis_parameters["contrib_thresh"]
contrib_thresh_omit = analysis_parameters["contrib_thresh_omit"]
use_f_weight = analysis_parameters["use_f_weight"]
seed = analysis_parameters["seed"]
np.random.seed(seed)
verbose = analysis_parameters["verbose"]
visual = analysis_parameters["visual"]
save_visual = analysis_parameters["save_visual"]
# Check inputs for validity.
if sweep_initial < 1:
print("ERROR: analyze_model parameter sweep_initial must be >= 1")
return None, None
if sweep_detailed < 100:
print("ERROR: analyze_model parameter sweep_detailed must be >= 100")
return None, None
# Function for indexing the large impulse array.
def array_to_int(num_list): # [1,2,3]
str_list = map(str, num_list) # ['1','2','3']
num_str = ''.join(str_list) # '123'
num = int(num_str, 2) # 123
return num
model = model_dictionary["model"]
history = model_dictionary["model_parameters"]["history"]
history_eff = model_dictionary["history_eff"]
mu_x = model_dictionary["mu_x"]
sig_x = model_dictionary["sig_x"]
mu_y = model_dictionary["mu_y"]
sig_y = model_dictionary["sig_y"]
input_channels = model_dictionary["input_channels"]
output_channels = model_dictionary["output_channels"]
input_range = model_dictionary["input_range"]
input_shift = model_dictionary["input_shift"]
# Establish tensor types of certain variables for computation.
mu_y_t = torch.tensor(mu_y, dtype=torch.float)
sig_y_t = torch.tensor(sig_y, dtype=torch.float)
# Get the current data output folder if saving data and plots.
if save_visual == True:
if not os.path.exists('./output'):
os.mkdir('./output')
analysis_dir_count = 1
while os.path.exists('./output/analysis_{}'.format(analysis_dir_count)):
analysis_dir_count = analysis_dir_count + 1
os.mkdir('./output/analysis_{}'.format(analysis_dir_count))
# Generate every possible combination of impulses.
if history < history_eff:
history_eff = history
combination_count = pow(2, input_channels*(history_eff))
combinations = [x for x in range(0, input_channels*(history_eff))]
impulse_array = np.zeros([combination_count, input_channels, history])
# Loop through every combination of subsets of constituants
for combination_id in range(0, len(combinations)+1):
for subset in itertools.combinations(combinations, combination_id):
impulse = np.zeros([1, 1, input_channels*(history_eff)])
for element in subset:
impulse[0, 0, input_channels*(history_eff)-1-element] = 1
index = array_to_int(impulse[0, 0, :].astype(int))
impulse_shaped = np.reshape(impulse, [input_channels, history_eff])
# Add buffer elements to account for a history longer than scope.
impulse_array[index, :, (history-history_eff):history] = impulse_shaped
# Generate the impulse sweep set for creating multiples of impulses.
if sweep_initial != 1:
impulse_sweep_set = 2*np.random.rand(sweep_initial, input_channels, history)-1
# Bound sweep set to be within range of the original input data.
for i in range(0, input_channels):
min_value = input_range[i][0]
max_value = input_range[i][1]
impulse_sweep_set[:, i, :] = impulse_sweep_set[:, i, :]*(max_value-min_value)+min_value
# Obtain the output for input impulses.
print("Exciting model...")
model.cpu()
if sweep_initial != 1:
impulse_response = np.zeros([combination_count, output_channels, sweep_initial])
else:
impulse_response = np.zeros([combination_count, output_channels, 1])
batch_idx = 1
batch_size_analyze = 256
progress_bar = pyprind.ProgBar(len(range(0, combination_count, batch_size_analyze)), monitor=True)
# Calculate the bias at the zero point.
model_input = np.copy(impulse_array[0:1, :, :])
bias = model(torch.tensor((model_input-mu_x)/sig_x, dtype=torch.float))*sig_y_t+mu_y_t
# Calculate the response from all impulse combinations.
for i in range(0, combination_count, batch_size_analyze):
if i + batch_size_analyze > combination_count:
# Handle the last batch.
impulse = impulse_array[i:]
if sweep_initial > 1:
for j in range(0, sweep_initial):
mult = impulse_sweep_set[j, :, :]
model_input = mult*impulse*input_mask
output = (model(torch.tensor((model_input-mu_x)/sig_x, dtype=torch.float))*sig_y_t+mu_y_t).detach().cpu().numpy()
impulse_response[i:, :, j] = output
else:
model_input = impulse*input_mask
output = (model(torch.tensor((model_input-mu_x)/sig_x, dtype=torch.float))*sig_y_t+mu_y_t).detach().cpu().numpy()
impulse_response[i:, :, 0] = output
else:
# Handle a standard size batch.
impulse = impulse_array[i:(i+batch_size_analyze)]
if sweep_initial > 1:
for j in range(0, sweep_initial):
mult = impulse_sweep_set[j, :, :]
model_input = mult*impulse*input_mask
output = (model(torch.tensor((model_input-mu_x)/sig_x, dtype=torch.float))*sig_y_t+mu_y_t).detach().cpu().numpy()
impulse_response[i:(i+batch_size_analyze), :, j] = output
else:
model_input = impulse*input_mask
output = (model(torch.tensor((model_input-mu_x)/sig_x, dtype=torch.float))*sig_y_t+mu_y_t).detach().cpu().numpy()
impulse_response[i:(i+batch_size_analyze), :, 0] = output
batch_idx += 1
progress_bar.update()
#impulse_response = impulse_response.detach().numpy()
time.sleep(0.5) # Allows progress bar to finish printing elapsed time.
print()
def process_subcombination(subcombination):
sub_impulse = np.zeros([input_channels*history])
# Determine index of combination in impulse_response
for element in subcombination:
sub_impulse[input_channels*history-1-element] = 1
sub_index = array_to_int(sub_impulse.astype(int))
# Loop through all subcombinations
subsub_indices = []
for l in range(0, len(subcombination)+1):
for subsubcombination in itertools.combinations(subcombination, l):
if subcombination != subsubcombination:
subsub_impulse = np.zeros([input_channels*history])
# Determine index of subcombination in impulse_response
for element in subsubcombination:
subsub_impulse[input_channels*history-1-element] = 1
subsub_index = array_to_int(subsub_impulse.astype(int))
subsub_indices.append(subsub_index)
return sub_index, subsub_indices
# Analyze responses (note: progress bar is not linear with computation time)
print("Analyzing responses...")
progress_bar = pyprind.ProgBar(combination_count, monitor=True)
num_cores = multiprocessing.cpu_count()
for combination_id in range(0, len(combinations)+1):
# Loop all combinations
results = Parallel(n_jobs=num_cores)(delayed(process_subcombination)(subcombination) \
for subcombination in itertools.combinations(combinations, combination_id))
for each in results:
sub_index = each[0]
subsub_indices = each[1]
for subsub_index in subsub_indices:
impulse_response[sub_index, :, :] = impulse_response[sub_index, :, :] - \
impulse_response[subsub_index, :, :]
progress_bar.update()
time.sleep(0.5) # Allows progress bar to finish printing elapsed time.
print()
# Examine the impulse response for all combinations and generate a function.
print("Estimating system equation...")
# Create a mask of relevant inputs for later model retraining.
new_mask = np.zeros([input_channels, history])
# Create a sweep set for curve fitting.
fit_sweep_set = np.random.rand(sweep_detailed, input_channels, history)
for i in range(0, input_channels):
min_value = input_range[i][0]
max_value = input_range[i][1]
fit_sweep_set[:, i, :] = fit_sweep_set[:, i, :]*(max_value-min_value)+min_value
model_function = []
for channel_id in range(0, output_channels):
# Function for the output channel is a sum of product functions.
channel_function = []
# Get the magnitude average point value of each product function contribution.
Z = np.sum(abs(impulse_response[:, channel_id, :]), 1)/sweep_initial
# Get the variance of each product function.
S = np.var(impulse_response[:, channel_id, :], 1)
total_variance = sum(S)
# Get indices of responses from largest to smallest.
response_indices = np.flip(np.argsort(Z), 0)
# Get indices of variances from largest to smallest.
variance_indices = np.flip(np.argsort(S), 0)
# Identify the top responses.
if verbose:
print("############################################################")
print("Estimate of channel " + str(channel_id+1))
print("############################################################")
candidate_limit = min(25, len(response_indices))
sig_indexes = []
for k in range(0, candidate_limit):
sig_index = response_indices[k]
sig_response = Z[sig_index]
z_sorted = np.flip(np.sort(Z[1:], 0), 0)
contribution_magnitude = sig_response/sum(z_sorted)
if contribution_magnitude > contrib_thresh:
sig_indexes.append(sig_index)
for k in range(0, candidate_limit):
sig_index = variance_indices[k]
sig_variance = S[sig_index]
contribution_variance = sig_variance/total_variance
if contribution_variance > contrib_thresh and sig_index not in sig_indexes:
sig_indexes.append(sig_index)
# Estimate equations for top responses.
for sig_index in sig_indexes:
sig_response = Z[sig_index]
sig_variance = S[sig_index]
sig_impulse = impulse_array[sig_index:sig_index+1, :, :]
if verbose: print("Response ID " + str(sig_index) + " contribution:")
# Process a product function if the response is significant.
# Significance is % contribution to total magnitude or variance.
# Bias is not included in magnitude significance.
z_sorted = np.flip(np.sort(Z[1:], 0), 0)
contribution_magnitude = sig_response/sum(z_sorted)
contribution_variance = sig_variance/total_variance
if sig_index is not 0:
if verbose: print("Magnitude : " + str('%.1f'%(contribution_magnitude*100)) + "%")
if verbose: print("Variance : " + str('%.1f'%(contribution_variance*100)) + "%")
else:
if verbose: print("Bias contribution omitted from calculation.")
if verbose: print("============================================================")
if contribution_magnitude > contrib_thresh or contribution_variance > contrib_thresh:
# Determine the arguments of the product function.
arg_list = []
for input_id in range(0, input_channels):
for element_id, element in enumerate(sig_impulse[0, input_id, :].astype(int)):
if element == 1:
delay = history - 1 - element_id
arg_list.append({"input_channel": input_id, "delay": delay})
new_mask[input_id, element_id] = 1
# Create the product function template string.
f_list = []
f_str = "f("
for _, arg in enumerate(arg_list):
f_list.append("x" + str(arg["input_channel"]+1) + "(k-" + str(arg["delay"]) + ")")
for arg_num, arg_str in enumerate(f_list):
f_str = f_str + arg_str
if arg_num < len(f_list)-1:
f_str = f_str + ","
if len(arg_list) == 0:
f_str = f_str + "0"
f_str = f_str + ")"
# Estimate the product function.
def fcn_empty(_):
return 0
def txt_empty(_):
return ""
dct_empty = {
"txt": "?",
"txt_fcn": txt_empty,
"fcn": fcn_empty,
"upper": [],
"lower": [],
"weight": 1.0
}
product_function = {
"arg_list": arg_list,
"template_string": f_str,
"estimate_string": "f(?)",
"parameters": [],
"function": dct_empty,
"shift": []
}
if len(arg_list) > 0:
# Obtain sample points for curve fitting.
x_data = np.zeros([sweep_detailed, input_channels, history])
y_data = np.zeros([sweep_detailed, output_channels])
for idx in range(0, sweep_detailed):
mult = fit_sweep_set[idx, :, :]
model_input = mult*sig_impulse*input_mask
x_data[idx, :, :] = model_input
y_data[idx, :] = (model(torch.tensor((model_input-mu_x)/sig_x, dtype=torch.float))).detach().numpy()*sig_y+mu_y
# Recursively subtract contributions from product functions of arguments.
contribution_list = []
for idf in range(0, len(arg_list)):
new_contributions = []
for arg_combination in itertools.combinations(arg_list, idf):
arg_impulse = np.zeros([sweep_detailed, input_channels, history])
for arg in arg_combination:
arg_impulse[:, arg["input_channel"], history-1-arg["delay"]] = 1
model_input = arg_impulse * fit_sweep_set
output = (model(torch.tensor((model_input-mu_x)/sig_x, dtype=torch.float))).detach().numpy()*sig_y+mu_y
for contribution in contribution_list:
output = output - contribution
new_contributions.append(output)
contribution_list[0:0] = new_contributions
for contribution in contribution_list:
y_data = y_data - contribution
# Format data for curve fitting
arg_count = len(arg_list)
x_data_fit = np.zeros([arg_count, sweep_detailed])
y_data_fit = np.zeros([sweep_detailed])
arg = 0
for i in range(0, input_channels):
for j in range(0, history):
if sig_impulse[0, i, j] == 1:
x_data_fit[arg, :] = x_data[:, i, j]
y_data_fit[:] = y_data[:, channel_id]
product_function["shift"].append(input_shift[i])
arg = arg + 1
# Plot 2D and 3D data for visual inspection.
if save_visual == True or visual == True:
if arg_count == 1:
plt.figure()
plt.scatter(x_data_fit[0], y_data_fit, marker='.')
plt.title(product_function["template_string"])
plt.xlabel(f_list[0])
if save_visual == True:
plt.savefig('./output/analysis_{}/{}.pdf'.format(analysis_dir_count, \
product_function["template_string"]))
pltDict = {"x": x_data_fit[0].tolist(),
"y": y_data_fit.tolist()}
mat4py.savemat('./output/analysis_{}/{}.mat'.format(analysis_dir_count, \
product_function["template_string"]), pltDict)
if visual == True: plt.show()
if arg_count == 2:
plt.figure()
ax = plt.axes(projection='3d')
ax.scatter3D(x_data_fit[0], x_data_fit[1], y_data_fit, c=y_data_fit, marker='o')
ax.set_title(product_function["template_string"])
ax.set_xlabel(f_list[0])
ax.set_ylabel(f_list[1])
if save_visual == True:
plt.savefig('./output/analysis_{}/{}.pdf'.format(analysis_dir_count, \
product_function["template_string"]))
pltDict = {"x": x_data_fit[0].tolist(),
"y": x_data_fit[1].tolist(),
"z": y_data_fit.tolist()}
mat4py.savemat('./output/analysis_{}/{}.mat'.format(analysis_dir_count, \
product_function["template_string"]), pltDict)
if visual == True: plt.show()
# Estimate the product function using curve fitting.
if arg_count in functions:
candidate_functions = functions[arg_count]
else:
candidate_functions = []
product_function["estimate_string"] = product_function["template_string"]
best_fit = 100
for f in candidate_functions:
try:
popt, pcov = curve_fit(f["fcn"],
x_data_fit,
y_data_fit,
bounds=(f["lower"], f["upper"]),
maxfev=250000)
pcount = len(popt)
err = y_data_fit-f["fcn"](x_data_fit, *popt)
# Compute root mean squared error.
rmse = np.sqrt(sum(pow(err, 2))/sweep_detailed)
# Compute mean average error.
mae = np.mean(abs(err))
# Compute one standard deviation errors (just the normal std).
#std = np.sqrt(np.diag(pcov))
if verbose:
print("Fit for " + f["txt_fcn"](arg_list, product_function["shift"], *popt))
print("MAE : " + str(FORMAT%mae))
print("RMSE : " + str(FORMAT%rmse))
#print("STD : " + str(std))
f_weight = 1.0
if use_f_weight == True: f_weight = f["weight"]
if mae/f_weight < best_fit:
best_fit = mae/f_weight
product_function["parameters"] = popt
product_function["function"] = f
product_function["estimate_string"] = f["txt_fcn"](arg_list,
product_function["shift"],
*popt)
if verbose: print("Current best fit for Response " + str(sig_index))
if verbose: print()
# Perform curve fitting with different parameter initializations in attempt to improve fit.
fit_iterations = 5*pcount
for _ in range(1, fit_iterations):
pinit = np.random.rand(pcount)*(np.array(f["upper"])-np.array(f["lower"])) \
+ np.array(f["lower"])
popt_new, pcov = curve_fit(f["fcn"],
x_data_fit,
y_data_fit,
bounds=(f["lower"], f["upper"]),
p0=pinit,
maxfev=10000)
err = y_data_fit-f["fcn"](x_data_fit, *popt_new)
# Compute root mean squared error.
rmse = np.sqrt(sum(pow(err, 2))/sweep_detailed)
# Compute mean average error.
mae = np.mean(abs(err))
if mae/f_weight < 0.999*best_fit:
best_fit = mae/f_weight
product_function["parameters"] = popt_new
product_function["function"] = f
product_function["estimate_string"] = f["txt_fcn"](arg_list,
product_function["shift"],
*popt_new)
if verbose:
print("Revised fit for " + f["txt_fcn"](arg_list,
product_function["shift"],
*popt_new))
print("MAE : " + str(FORMAT%mae))
print("RMSE : " + str(FORMAT%rmse))
print("Current best fit for Response " + str(sig_index))
print()
except Exception as e:
if best_fit == 100:
product_function["estimate_string"] = product_function["template_string"]
if verbose:
print("Warning: Fit could not be estimated for " + f["txt"] + ",")
print(" " + str(e))
print("")
else:
# Handle constant bias at the zero point.
channel_bias = bias[0, channel_id].detach().numpy()
channel_bias_str = str('%.3f'%channel_bias)
product_function["parameters"] = [channel_bias]
def fcn_bias(x, a):
return a
def txt_bias(argList, argShift, a):
return str('%.3f'%a)
dct_bias = {
"txt": "a",
"fcn": fcn_bias,
"txt_fcn": txt_bias,
"upper": [2*channel_bias],
"lower": [0],
"weight": 1.0
}
product_function["function"] = dct_bias
product_function["estimate_string"] = channel_bias_str
if verbose:
print("Constant " + channel_bias_str)
print()
# Check if the candidate product function improves the accuracy of the model.
if sig_index > 0:
current_function = [channel_function]
candidate_function = [channel_function + [product_function]]
current_metrics = evaluate_function(current_function,
input_data,
output_data[:, channel_id:channel_id+1])
candidate_metrics = evaluate_function(candidate_function,
input_data,
output_data[:, channel_id:channel_id+1])
# Include product functions that are above a threshold and improve the overall MAE.
if candidate_metrics[0]["MAE"] > current_metrics[0]["MAE"]:
if verbose:
print("Warning: Candidate product function worsens overall MAE.")
print(" MAE increases from " + str(FORMAT%current_metrics[0]["MAE"])+\
" to " + str(FORMAT%candidate_metrics[0]["MAE"]) + ".")
if contribution_magnitude < contrib_thresh_omit \
and contribution_variance < contrib_thresh_omit:
if verbose: print(" Candidate product function omitted.")
else:
channel_function.append(product_function)
if verbose: print(" Candidate product function added.")
else:
if verbose: print("Overall MAE declines from " + str(FORMAT%current_metrics[0]["MAE"]) \
+ " to " + str(FORMAT%candidate_metrics[0]["MAE"]) + ".")
channel_function.append(product_function)
else:
channel_function.append(product_function)
else:
# Stop building the channel equation.
if verbose:
print("Insignificant product function response.")
print()
print("############################################################")
print("Channel " + str(channel_id+1) + " function completed.")
print("############################################################")
break
if verbose: print()
# Print the completed equation for the current output channel.
if verbose: print("System equation")
if verbose: print("============================================================")
# Print the function template for the current output channel.
y_str = "y" + str(channel_id+1) + "[k] = "
for idf, product_function in enumerate(channel_function):
y_str = y_str + product_function["template_string"]
if idf < len(channel_function) - 1:
y_str = y_str + " + "
print(y_str)
y_str = "y" + str(channel_id+1) + "[k] = "
for idf, product_function in enumerate(channel_function):
if product_function["estimate_string"] != None:
y_str = y_str + product_function["estimate_string"]
if idf < len(channel_function) - 1:
y_str = y_str + " + "
print(y_str)
print()
model_function.append(channel_function)
return model_function, new_mask
# Future work: Use better fit metric than weighted MAE.
# https://autarkaw.org/2008/07/05/finding-the-optimum-polynomial-order-to-use-for-regression/
``` |
{
"source": "JM-Maynard/12stepsCFD",
"score": 4
} |
#### File: 12stepsCFD/JM_code/Step3_CFL_Condition.py
```python
import numpy #numpy is a library for array operations akin to MATLAB
from matplotlib import pyplot #matplotlib is 2D plotting library
def linearconv(nx):
dx = 2.0 / (nx - 1)
nt = 20 #nt is the number of timesteps we want to calculate
dt = .025 #dt is the amount of time each timestep covers (delta t)
c = 1
u = numpy.ones(nx) #defining a numpy array which is nx elements long with every value equal to 1.
u[int(.5/dx):int(1 / dx + 1)] = 2 #setting u = 2 between 0.5 and 1 as per our I.C.s
un = numpy.ones(nx) #initializing our placeholder array, un, to hold the values we calculate for the n+1 timestep
for n in range(nt): #iterate through time
un = u.copy() ##copy the existing values of u into un
for i in range(1, nx):
u[i] = un[i] - c * dt / dx * (un[i] - un[i-1])
pyplot.figure()
pyplot.plot(numpy.linspace(0, 2, nx), u);
# Now let's examine the results of our linear convection problem with an increasingly fine mesh.
linearconv(41) #convection using 41 grid points
linearconv(61)
linearconv(71)
linearconv(85)
# In[8]:
def linearconv(nx):
dx = 2.0 / (nx - 1)
nt = 20 #nt is the number of timesteps we want to calculate
c = 1 # this is the wave speed
sigma = .5 # This is the condition on the CFL number
dt = sigma * dx
u = numpy.ones(nx)
u[int(.5/dx):int(1 / dx + 1)] = 2
un = numpy.ones(nx)
for n in range(nt): #iterate through time
un = u.copy() ##copy the existing values of u into un
for i in range(1, nx):
u[i] = un[i] - c * dt / dx * (un[i] - un[i-1])
pyplot.figure()
pyplot.plot(numpy.linspace(0, 2, nx), u)
linearconv(41)
linearconv(61)
linearconv(81)
linearconv(101)
linearconv(121)
# Notice that as the number of points `nx` increases, the wave convects a shorter and shorter distance.
# The number of time iterations we have advanced the solution at is held constant at `nt = 20`,
# but depending on the value of `nx` and the corresponding values of `dx` and `dt`, a shorter time window is being examined overall.
# It's possible to do rigurous analysis of the stability of numerical schemes, in some cases.
#Watch Prof. Barba's presentation of this topic in **Video Lecture 9** on You Tube.
``` |
{
"source": "jmmccray/pycryptobot",
"score": 2
} |
#### File: models/telegram/actions.py
```python
import os
import json
import subprocess
# import logging
import csv
from datetime import datetime
from time import sleep
from models.telegram.helper import TelegramHelper
from models.telegram.settings import SettingsEditor
# # Enable logging
# logging.basicConfig(
# format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
# )
# logger = logging.getLogger(__name__)
class TelegramActions:
''' Telegram Bot Action Class '''
def __init__(self, datafolder, tg_helper: TelegramHelper) -> None:
self.datafolder = datafolder
self.helper = tg_helper
self.settings = SettingsEditor(datafolder, tg_helper)
def _get_margin_text(self, market):
''' Get marin text '''
light_icon, margin_icon = (
"\U0001F7E2" if "-" not in self.helper.data["margin"] else "\U0001F534",
"\U0001F973" if "-" not in self.helper.data["margin"] else "\U0001F97A",
)
result = (
f"{light_icon} <b>{market}</b> (<i>{self.helper.data['exchange']}</i>)\n"
f"{margin_icon} Margin: {self.helper.data['margin']} "
f"\U0001F4B0 P/L: {self.helper.data['delta']}\n"
f"TSL Trg: {self.helper.data['trailingstoplosstriggered']} "
f"TSL Change: {float(self.helper.data['change_pcnt_high']).__round__(4)}\n"
# f"TPL Trg: {self.helper.data['preventlosstriggered']} "
# f"TPL Change: {float(self.helper.data['change_pcnt_high']).__round__(4)}\n"
)
return result
@staticmethod
def _get_uptime(date: str):
''' Get uptime '''
now = str(datetime.now())
# If date passed from datetime.now() remove milliseconds
if date.find(".") != -1:
date_time = date.split(".")[0]
date = date_time
if now.find(".") != -1:
date_time = now.split(".", maxsplit=1)[0]
now = date_time
now = now.replace("T", " ")
now = f"{now}"
# Add time in case only a date is passed in
# new_date_str = f"{date} 00:00:00" if len(date) == 10 else date
date = date.replace("T", " ") if date.find("T") != -1 else date
# Add time in case only a date is passed in
new_date_str = f"{date} 00:00:00" if len(date) == 10 else date
started = datetime.strptime(new_date_str, "%Y-%m-%d %H:%M:%S")
now = datetime.strptime(now, "%Y-%m-%d %H:%M:%S")
duration = now - started
duration_in_s = duration.total_seconds()
hours = divmod(duration_in_s, 3600)[0]
duration_in_s -= 3600 * hours
minutes = divmod(duration_in_s, 60)[0]
return f"{round(hours)}h {round(minutes)}m"
def start_open_orders(self, update, context):
''' Start bots for open trades (data.json) '''
self.helper.logger.info("called start_open_orders")
query = update.callback_query
if query is not None:
query.answer()
self.helper.send_telegram_message(
update, "<b>Starting markets with open trades..</b>", context=context
)
else:
self.helper.send_telegram_message(
update, "<b>Starting markets with open trades..</b>", context=context
)
# update.effective_message.reply_html("<b>Starting markets with open trades..</b>")
self.helper.read_data()
for market in self.helper.data["opentrades"]:
if not self.helper.is_bot_running(market):
# update.effective_message.reply_html(f"<i>Starting {market} crypto bot</i>")
self.helper.start_process(
market,
self.helper.data["opentrades"][market]["exchange"],
"",
"scanner",
)
sleep(10)
self.helper.send_telegram_message(update, "<i>Markets have been started</i>", context=context)
# update.effective_message.reply_html("<i>Markets have been started</i>")
sleep(1)
self.get_bot_info(update, context)
def sell_response(self, update, context):
"""create the manual sell order"""
query = update.callback_query
self.helper.logger.info("called sell_response - %s", query.data)
if query.data.__contains__("all"):
self.helper.send_telegram_message(
update, "<b><i>Initiating sell orders..</i></b>", context=context
)
for market in self.helper.get_active_bot_list("active"):
while self.helper.read_data(market) is False:
sleep(0.2)
if "margin" in self.helper.data and self.helper.data["margin"] != " ":
while self.helper.read_data(market) is False:
sleep(0.2)
if "botcontrol" in self.helper.data:
self.helper.data["botcontrol"]["manualsell"] = True
self.helper.write_data(market)
self.helper.send_telegram_message(
update,
f"Selling: {market}\n<i>Please wait for sale notification...</i>", context=context
)
sleep(0.2)
else:
while (
self.helper.read_data(query.data.replace("confirm_sell_", "")) is False
):
sleep(0.2)
if "botcontrol" in self.helper.data:
self.helper.data["botcontrol"]["manualsell"] = True
self.helper.write_data(query.data.replace("confirm_sell_", ""))
self.helper.send_telegram_message(
update,
f"Selling: {query.data.replace('confirm_sell_', '').replace('.json','')}"
"\n<i>Please wait for sale notification...</i>", context=context
)
def buy_response(self, update, context):
"""create the manual buy order"""
query = update.callback_query
self.helper.logger.info("called buy_response - %s", query.data)
# if self.helper.read_data(query.data.replace("confirm_buy_", "")):
while self.helper.read_data(query.data.replace("confirm_buy_", "")) is False:
sleep(0.2)
if "botcontrol" in self.helper.data:
self.helper.data["botcontrol"]["manualbuy"] = True
self.helper.write_data(query.data.replace("confirm_buy_", ""))
self.helper.send_telegram_message(
update,
f"Buying: {query.data.replace('confirm_buy_', '').replace('.json','')}"
"\n<i>Please wait for sale notification...</i>", context=context
)
def show_config_response(self, update):
"""display config settings based on exchanged selected"""
self.helper.read_config()
# with open(os.path.join(self.helper.config_file), "r", encoding="utf8") as json_file:
# self.helper.config = json.load(json_file)
query = update.callback_query
self.helper.logger.info("called show_config_response - %s", query.data)
if query.data == "ex_scanner":
pbot = self.helper.config[query.data.replace("ex_", "")]
else:
pbot = self.helper.config[query.data.replace("ex_", "")]["config"]
self.helper.send_telegram_message(
update, query.data.replace("ex_", "") + "\n" + json.dumps(pbot, indent=4)
)
def get_bot_info(self, update, context):
''' Get running bot information '''
count = 0
for file in self.helper.get_active_bot_list():
output = ""
count += 1
while self.helper.read_data(file) is False:
sleep(0.2)
output = output + f"\U0001F4C8 <b>{file} ({self.helper.data['exchange']})</b> "
last_modified = datetime.now() - datetime.fromtimestamp(
os.path.getmtime(
os.path.join(self.datafolder, "telegram_data", f"{file}.json")
)
)
icon = "\U0001F6D1" # red dot
if last_modified.seconds > 90 and last_modified.seconds != 86399:
output = f"{output} {icon} <b>Status</b>: <i>defaulted</i>"
elif (
"botcontrol" in self.helper.data
and "status" in self.helper.data["botcontrol"]
):
if self.helper.data["botcontrol"]["status"] == "active":
icon = "\U00002705" # green tick
if self.helper.data["botcontrol"]["status"] == "paused":
icon = "\U000023F8" # pause icon
if self.helper.data["botcontrol"]["status"] == "exit":
icon = "\U0000274C" # stop icon
output = f"{output} {icon} <b>Status</b>: <i> {self.helper.data['botcontrol']['status']}</i>"
output = f"{output} \u23F1 <b>Uptime</b>: <i> {self._get_uptime(self.helper.data['botcontrol']['started'])}</i>\n"
else:
output = f"{output} {icon} <b>Status</b>: <i>stopped</i> "
if count == 1:
self.helper.send_telegram_message(update, output, context=context)
else:
update.effective_message.reply_html(f"{output}")
sleep(0.2)
if count == 0:
self.helper.send_telegram_message(update, f"<b>Bot Count ({count})</b>", context=context)
else:
update.effective_message.reply_html(f"<b>Bot Count ({count})</b>")
def get_margins(self, update):
''' Get margins '''
query = update.callback_query
self.helper.send_telegram_message(update, "<i>Getting Margins..</i>")
closed_output = []
open_output = []
closed_count = 0
open_count = 0
# print(self.helper.get_active_bot_list())
for market in self.helper.get_active_bot_list():
while self.helper.read_data(market) is False:
sleep(0.2)
closed_output_text = ""
open_output_text = ""
if "margin" in self.helper.data:
if "margin" in self.helper.data and self.helper.data["margin"] == " ":
closed_output_text = closed_output_text + f"<b>{market}</b>"
closed_output_text = (
closed_output_text + f"\n<i>{self.helper.data['message']}</i>\n"
)
closed_output.append(closed_output_text)
closed_count += 1
elif len(self.helper.data) > 2:
open_output_text = open_output_text + self._get_margin_text(market)
open_output.append(open_output_text)
open_count += 1
if (
query.data.__contains__("orders") or query.data.__contains__("all")
) and open_count > 0:
for output in open_output:
update.effective_message.reply_html(f"{output}")
sleep(0.5)
elif (
query.data.__contains__("orders") or query.data.__contains__("all")
) and open_count == 0:
update.effective_message.reply_html("<b>No open orders found.</b>")
if (
query.data.__contains__("pairs") or query.data.__contains__("all")
) and closed_count > 0:
for output in closed_output:
update.effective_message.reply_html(f"{output}")
sleep(1)
elif (
query.data.__contains__("pairs") or query.data.__contains__("all")
) and closed_count == 0:
update.effective_message.reply_html("<b>No active pairs found.</b>")
def start_market_scan(
self,
update,
context,
use_default_scanner: bool = True,
scanmarkets: bool = True,
startbots: bool = True,
debug: bool = False,
):
''' Start market scanner/screener '''
# Check whether using the scanner or the screener - use correct config file etc
if use_default_scanner is True:
scanner_config_file = "scanner.json"
scanner_script_file = "scanner.py"
elif use_default_scanner is False:
scanner_config_file = "screener.json"
scanner_script_file = "screener.py"
self.helper.logger.info("called start_market_scan - %s", scanner_script_file)
try:
with open(f"{scanner_config_file}", encoding="utf8") as json_file:
config = json.load(json_file)
except IOError as err:
self.helper.send_telegram_message(
update, f"<i>{scanner_config_file} config error</i>\n{err}", context=context
)
return
# If a bulk load file for the exchange exists - start up all the bulk bots for this
for ex in config:
for quote in config[ex]["quote_currency"]:
if os.path.exists(
os.path.join(
self.datafolder, "telegram_data", f"{ex}_bulkstart.csv"
)
):
update.effective_message.reply_html(
f"<i>Found bulk load CSV file for {ex}... Loading pairs</i>"
)
try:
with open(
os.path.join(
self.datafolder, "telegram_data", f"{ex}_bulkstart.csv"
),
newline="",
encoding="utf-8",
) as csv_obj:
csv_file = csv.DictReader(csv_obj)
for row in csv_file:
# update.effective_message.reply_html(row["market"])
if (
"market" in row
and row["market"] is not None
and quote in row["market"]
):
# Start the process disregarding bot limits for the moment
update.effective_message.reply_html(
f"Bulk Starting {row['market']} on {ex}..."
)
self.helper.start_process(
row["market"], ex, "", "scanner"
)
sleep(7)
except IOError:
pass
else:
# No Bulk Start File Found
pass
if scanmarkets:
if bool(self.helper.settings["notifications"]["enable_screener"]):
reply = "<i>Gathering market data\nplease wait...</i> \u23F3"
self.helper.send_telegram_message(update, reply, context=context)
try:
self.helper.logger.info("Starting Market Scanner")
subprocess.getoutput(f"python3 {scanner_script_file}")
except Exception as err:
update.effective_message.reply_html("<b>scanning failed.</b>")
self.helper.logger.error(err)
raise
if bool(self.helper.settings["notifications"]["enable_screener"]):
update.effective_message.reply_html("<b>Scan Complete.</b>")
# Watchdog process - check for hung bots and force restart them
if bool(self.helper.settings["notifications"]["enable_screener"]):
update.effective_message.reply_html("<i>Fido checking for hung bots..</i>")
for file in self.helper.get_hung_bot_list():
ex = self.helper.get_running_bot_exchange(file)
self.helper.stop_running_bot(file, "exit", True)
sleep(3)
os.remove(os.path.join(self.datafolder, "telegram_data", f"{file}.json"))
sleep(1)
if bool(self.helper.settings["notifications"]["enable_screener"]):
update.effective_message.reply_html(
f"Restarting {file} as it appears to have hung..."
)
self.helper.start_process(file, ex, "", "scanner")
sleep(1)
if not startbots:
if bool(self.helper.settings["notifications"]["enable_screener"]):
update.effective_message.reply_html(
"<b>Operation Complete (0 started)</b>"
)
return
# Check to see if the bot would be restarted anyways from the scanner
# and dont stop to maintain trailingbuypcnt etc
scanned_bots = []
for ex in config:
for quote in config[ex]["quote_currency"]:
try:
with open(
os.path.join(
self.datafolder,
"telegram_data",
f"{ex}_{quote}_output.json",
),
"r",
encoding="utf8",
) as json_file:
data = json.load(json_file)
for row in data:
if data[row]["atr72_pcnt"] is not None:
if (
data[row]["atr72_pcnt"]
>= self.helper.config["scanner"]["atr72_pcnt"]
):
scanned_bots.append(row)
except:
pass
if bool(self.helper.settings["notifications"]["enable_screener"]):
update.effective_message.reply_html("<i>stopping bots..</i>")
active_bots_list = self.helper.get_active_bot_list()
open_order_bot_list = self.helper.get_active_bot_list_with_open_orders()
for file in active_bots_list:
if (file not in scanned_bots) or (file not in open_order_bot_list):
self.helper.stop_running_bot(file, "exit")
sleep(3)
else:
if bool(self.helper.settings["notifications"]["enable_screener"]):
update.effective_message.reply_html(
f"Not stopping {file} - in scanner list, or has open order..."
)
botcounter = 0
runningcounter = len(self.helper.get_active_bot_list())
maxbotcount = (
self.helper.config["scanner"]["maxbotcount"]
if "maxbotcount" in self.helper.config["scanner"]
else 0
)
self.helper.read_data()
for ex in config:
if maxbotcount > 0 and (botcounter + runningcounter) >= maxbotcount:
break
for quote in config[ex]["quote_currency"]:
if bool(self.helper.settings["notifications"]["enable_screener"]):
update.effective_message.reply_html(
f"Starting {ex} ({quote}) bots..."
)
self.helper.logger.info("%s - (%s)", ex, quote)
if not os.path.isfile(
os.path.join(
self.datafolder, "telegram_data", f"{ex}_{quote}_output.json"
)
):
continue
with open(
os.path.join(
self.datafolder, "telegram_data", f"{ex}_{quote}_output.json"
),
"r",
encoding="utf8",
) as json_file:
data = json.load(json_file)
outputmsg = f"<b>{ex} ({quote})</b> \u23F3 \n"
msg_cnt = 1
for row in data:
if debug:
self.helper.logger.info("%s", row)
if maxbotcount > 0 and (botcounter + runningcounter) >= maxbotcount:
break
if self.helper.config["scanner"]["enableleverage"] is not False and (
str(row).__contains__(f"DOWN{quote}")
or str(row).__contains__(f"UP{quote}")
or str(row).__contains__(f"3L-{quote}")
or str(row).__contains__(f"3S-{quote}")
):
if msg_cnt == 1:
if bool(
self.helper.settings["notifications"]["enable_screener"]
):
update.effective_message.reply_html(
f"Ignoring {ex} ({quote}) "\
"Leverage Pairs (enableleverage is disabled)..."
)
msg_cnt += 1
continue
if row in self.helper.data["scannerexceptions"]:
outputmsg = (
outputmsg
+ f"*** {row} found on scanner exception list ***\n"
)
else:
if data[row]["atr72_pcnt"] is not None:
if (
data[row]["atr72_pcnt"]
>= self.helper.config["scanner"]["atr72_pcnt"]
):
if (
self.helper.config["scanner"]["enable_buy_next"]
and data[row]["buy_next"]
):
outputmsg = (
outputmsg
+ f"<i><b>{row}</b> //--// "\
f"<b>atr72_pcnt:</b> {data[row]['atr72_pcnt']}% //--//"\
f" <b>buy_next:</b> {data[row]['buy_next']}</i>\n"
)
self.helper.start_process(row, ex, "", "scanner")
botcounter += 1
elif not self.helper.config["scanner"][
"enable_buy_next"
]:
outputmsg = (
outputmsg
+ f"<i><b>{row}</b> //--// "\
f"<b>atr72_pcnt:</b> {data[row]['atr72_pcnt']}%</i>\n"
)
self.helper.start_process(row, ex, "", "scanner")
botcounter += 1
if debug is False:
sleep(6)
if bool(self.helper.settings["notifications"]["enable_screener"]):
update.effective_message.reply_html(f"{outputmsg}")
# if bool(self.helper.settings["notifications"]["enable_screener"]):
update.effective_message.reply_html(
f"<b>{scanner_config_file.replace('.json', '').capitalize()} " \
f"Operation Complete.</b><i>\n- {botcounter} started"\
f"\n- {runningcounter + botcounter} running</i>"
)
def delete_response(self, update):
"""delete selected bot"""
query = update.callback_query
self.helper.logger.info("called delete_response - %s", query.data)
write_ok, try_cnt = False, 0
while not write_ok and try_cnt <= 5:
try_cnt += 1
self.helper.read_data()
self.helper.data["markets"].pop(str(query.data).replace("delete_", ""))
write_ok = self.helper.write_data()
if not write_ok:
sleep(1)
self.helper.send_telegram_message(
update,
f"<i>Deleted {str(query.data).replace('delete_', '')} crypto bot</i>",
)
def remove_exception_callback(self, update):
"""remove bot exception """
query = update.callback_query
self.helper.logger.info("called remove_exception_callback")
write_ok, try_cnt = False, 0
while not write_ok and try_cnt <= 5:
try_cnt += 1
self.helper.read_data()
self.helper.data["scannerexceptions"].pop(
str(query.data).replace("delexcep_", "")
)
write_ok = self.helper.write_data()
if not write_ok:
sleep(1)
self.helper.send_telegram_message(
update,
f"<i>Removed {str(query.data).replace('delexcep_', '')} from exception list. bot</i>",
)
``` |
{
"source": "jmmcd/networkx",
"score": 4
} |
#### File: examples/advanced/plot_parallel_betweenness.py
```python
from multiprocessing import Pool
import time
import itertools
import matplotlib.pyplot as plt
import networkx as nx
def chunks(l, n):
"""Divide a list of nodes `l` in `n` chunks"""
l_c = iter(l)
while 1:
x = tuple(itertools.islice(l_c, n))
if not x:
return
yield x
def betweenness_centrality_parallel(G, processes=None):
"""Parallel betweenness centrality function"""
p = Pool(processes=processes)
node_divisor = len(p._pool) * 4
node_chunks = list(chunks(G.nodes(), int(G.order() / node_divisor)))
num_chunks = len(node_chunks)
bt_sc = p.starmap(
nx.betweenness_centrality_source,
zip([G] * num_chunks, [True] * num_chunks, [None] * num_chunks, node_chunks),
)
# Reduce the partial solutions
bt_c = bt_sc[0]
for bt in bt_sc[1:]:
for n in bt:
bt_c[n] += bt[n]
return bt_c
if __name__ == "__main__":
G_ba = nx.barabasi_albert_graph(1000, 3)
G_er = nx.gnp_random_graph(1000, 0.01)
G_ws = nx.connected_watts_strogatz_graph(1000, 4, 0.1)
for G in [G_ba, G_er, G_ws]:
print("")
print("Computing betweenness centrality for:")
print(nx.info(G))
print("\tParallel version")
start = time.time()
bt = betweenness_centrality_parallel(G)
print("\t\tTime: %.4F" % (time.time() - start))
print("\t\tBetweenness centrality for node 0: %.5f" % (bt[0]))
print("\tNon-Parallel version")
start = time.time()
bt = nx.betweenness_centrality(G)
print("\t\tTime: %.4F seconds" % (time.time() - start))
print("\t\tBetweenness centrality for node 0: %.5f" % (bt[0]))
print("")
nx.draw(G_ba)
plt.show()
```
#### File: approximation/tests/test_clique.py
```python
import networkx as nx
from networkx.algorithms.approximation import max_clique
from networkx.algorithms.approximation import clique_removal
from networkx.algorithms.approximation import large_clique_size
def is_independent_set(G, nodes):
"""Returns True if and only if `nodes` is a clique in `G`.
`G` is a NetworkX graph. `nodes` is an iterable of nodes in
`G`.
"""
return G.subgraph(nodes).number_of_edges() == 0
def is_clique(G, nodes):
"""Returns True if and only if `nodes` is an independent set
in `G`.
`G` is an undirected simple graph. `nodes` is an iterable of
nodes in `G`.
"""
H = G.subgraph(nodes)
n = len(H)
return H.number_of_edges() == n * (n - 1) // 2
class TestCliqueRemoval(object):
"""Unit tests for the
:func:`~networkx.algorithms.approximation.clique_removal` function.
"""
def test_trivial_graph(self):
G = nx.trivial_graph()
independent_set, cliques = clique_removal(G)
assert is_independent_set(G, independent_set)
assert all(is_clique(G, clique) for clique in cliques)
# In fact, we should only have 1-cliques, that is, singleton nodes.
assert all(len(clique) == 1 for clique in cliques)
def test_complete_graph(self):
G = nx.complete_graph(10)
independent_set, cliques = clique_removal(G)
assert is_independent_set(G, independent_set)
assert all(is_clique(G, clique) for clique in cliques)
def test_barbell_graph(self):
G = nx.barbell_graph(10, 5)
independent_set, cliques = clique_removal(G)
assert is_independent_set(G, independent_set)
assert all(is_clique(G, clique) for clique in cliques)
class TestMaxClique(object):
"""Unit tests for the :func:`networkx.algorithms.approximation.max_clique`
function.
"""
def test_null_graph(self):
G = nx.null_graph()
assert len(max_clique(G)) == 0
def test_complete_graph(self):
graph = nx.complete_graph(30)
# this should return the entire graph
mc = max_clique(graph)
assert 30 == len(mc)
def test_maximal_by_cardinality(self):
"""Tests that the maximal clique is computed according to maximum
cardinality of the sets.
For more information, see pull request #1531.
"""
G = nx.complete_graph(5)
G.add_edge(4, 5)
clique = max_clique(G)
assert len(clique) > 1
G = nx.lollipop_graph(30, 2)
clique = max_clique(G)
assert len(clique) > 2
def test_large_clique_size():
G = nx.complete_graph(9)
nx.add_cycle(G, [9, 10, 11])
G.add_edge(8, 9)
G.add_edge(1, 12)
G.add_node(13)
assert large_clique_size(G) == 9
G.remove_node(5)
assert large_clique_size(G) == 8
G.remove_edge(2, 3)
assert large_clique_size(G) == 7
```
#### File: isomorphism/tests/test_isomorphism.py
```python
import networkx as nx
from networkx.algorithms import isomorphism as iso
class TestIsomorph:
@classmethod
def setup_class(cls):
cls.G1 = nx.Graph()
cls.G2 = nx.Graph()
cls.G3 = nx.Graph()
cls.G4 = nx.Graph()
cls.G1.add_edges_from([[1, 2], [1, 3], [1, 5], [2, 3]])
cls.G2.add_edges_from([[10, 20], [20, 30], [10, 30], [10, 50]])
cls.G3.add_edges_from([[1, 2], [1, 3], [1, 5], [2, 5]])
cls.G4.add_edges_from([[1, 2], [1, 3], [1, 5], [2, 4]])
def test_could_be_isomorphic(self):
assert iso.could_be_isomorphic(self.G1, self.G2)
assert iso.could_be_isomorphic(self.G1, self.G3)
assert not iso.could_be_isomorphic(self.G1, self.G4)
assert iso.could_be_isomorphic(self.G3, self.G2)
def test_fast_could_be_isomorphic(self):
assert iso.fast_could_be_isomorphic(self.G3, self.G2)
def test_faster_could_be_isomorphic(self):
assert iso.faster_could_be_isomorphic(self.G3, self.G2)
def test_is_isomorphic(self):
assert iso.is_isomorphic(self.G1, self.G2)
assert not iso.is_isomorphic(self.G1, self.G4)
```
#### File: algorithms/node_classification/utils.py
```python
def _propagate(P, F, B):
"""Propagate labels by one step
Parameters
----------
P : scipy sparse matrix, shape = [n_samples, n_samples]
Propagation matrix
F : numpy array, shape = [n_samples, n_classes]
Label matrix
B : numpy array, shape = [n_samples, n_classes]
Base matrix
Returns
----------
F_new : array, shape = [n_samples, n_classes]
Label matrix
"""
F_new = P.dot(F) + B
return F_new
def _get_label_info(G, label_name):
"""Get and return information of labels from the input graph
Parameters
----------
G : Network X graph
label_name : string
Name of the target label
Returns
----------
labels : numpy array, shape = [n_labeled_samples, 2]
Array of pairs of labeled node ID and label ID
label_dict : numpy array, shape = [n_classes]
Array of labels
i-th element contains the label corresponding label ID `i`
"""
import numpy as np
labels = []
label_to_id = {}
lid = 0
for i, n in enumerate(G.nodes(data=True)):
if label_name in n[1]:
label = n[1][label_name]
if label not in label_to_id:
label_to_id[label] = lid
lid += 1
labels.append([i, label_to_id[label]])
labels = np.array(labels)
label_dict = np.array([label for label, _ in sorted(
label_to_id.items(), key=lambda x:x[1])])
return (labels, label_dict)
def _init_label_matrix(n_samples, n_classes):
"""Create and return zero matrix
Parameters
----------
n_samples : integer
The number of nodes (samples) on the input graph
n_classes : integer
The number of classes (distinct labels) on the input graph
Returns
----------
F : numpy array, shape = [n_samples, n_classes]
Label matrix
"""
import numpy as np
F = np.zeros((n_samples, n_classes))
return F
def _predict(F, label_dict):
"""Predict labels by learnt label matrix
Parameters
----------
F : numpy array, shape = [n_samples, n_classes]
Learnt (resulting) label matrix
label_dict : numpy array, shape = [n_classes]
Array of labels
i-th element contains the label corresponding label ID `i`
Returns
----------
predicted : numpy array, shape = [n_samples]
Array of predicted labels
"""
import numpy as np
predicted_label_ids = np.argmax(F, axis=1)
predicted = label_dict[predicted_label_ids].tolist()
return predicted
```
#### File: algorithms/tests/test_distance_measures.py
```python
from random import Random
import pytest
import networkx as nx
from networkx import convert_node_labels_to_integers as cnlti
class TestDistance:
def setup_method(self):
G = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted")
self.G = G
def test_eccentricity(self):
assert nx.eccentricity(self.G, 1) == 6
e = nx.eccentricity(self.G)
assert e[1] == 6
sp = dict(nx.shortest_path_length(self.G))
e = nx.eccentricity(self.G, sp=sp)
assert e[1] == 6
e = nx.eccentricity(self.G, v=1)
assert e == 6
# This behavior changed in version 1.8 (ticket #739)
e = nx.eccentricity(self.G, v=[1, 1])
assert e[1] == 6
e = nx.eccentricity(self.G, v=[1, 2])
assert e[1] == 6
# test against graph with one node
G = nx.path_graph(1)
e = nx.eccentricity(G)
assert e[0] == 0
e = nx.eccentricity(G, v=0)
assert e == 0
pytest.raises(nx.NetworkXError, nx.eccentricity, G, 1)
# test against empty graph
G = nx.empty_graph()
e = nx.eccentricity(G)
assert e == {}
def test_diameter(self):
assert nx.diameter(self.G) == 6
def test_radius(self):
assert nx.radius(self.G) == 4
def test_periphery(self):
assert set(nx.periphery(self.G)) == set([1, 4, 13, 16])
def test_center(self):
assert set(nx.center(self.G)) == set([6, 7, 10, 11])
def test_bound_diameter(self):
assert nx.diameter(self.G, usebounds=True) == 6
def test_bound_radius(self):
assert nx.radius(self.G, usebounds=True) == 4
def test_bound_periphery(self):
result = set([1, 4, 13, 16])
assert set(nx.periphery(self.G, usebounds=True)) == result
def test_bound_center(self):
result = set([6, 7, 10, 11])
assert set(nx.center(self.G, usebounds=True)) == result
def test_radius_exception(self):
G = nx.Graph()
G.add_edge(1, 2)
G.add_edge(3, 4)
pytest.raises(nx.NetworkXError, nx.diameter, G)
def test_eccentricity_infinite(self):
with pytest.raises(nx.NetworkXError):
G = nx.Graph([(1, 2), (3, 4)])
e = nx.eccentricity(G)
def test_eccentricity_undirected_not_connected(self):
with pytest.raises(nx.NetworkXError):
G = nx.Graph([(1, 2), (3, 4)])
e = nx.eccentricity(G, sp=1)
def test_eccentricity_directed_weakly_connected(self):
with pytest.raises(nx.NetworkXError):
DG = nx.DiGraph([(1, 2), (1, 3)])
nx.eccentricity(DG)
class TestResistanceDistance:
@classmethod
def setup_class(cls):
global np
global sp_sparse
np = pytest.importorskip('numpy')
scipy = pytest.importorskip('scipy')
sp_sparse = pytest.importorskip('scipy.sparse')
def setup_method(self):
G = nx.Graph()
G.add_edge(1, 2, weight=2)
G.add_edge(2, 3, weight=4)
G.add_edge(3, 4, weight=1)
G.add_edge(1, 4, weight=3)
self.G = G
def test_laplacian_submatrix(self):
from networkx.algorithms.distance_measures import _laplacian_submatrix
M = sp_sparse.csr_matrix([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=np.float32)
N = sp_sparse.csr_matrix([[5, 6],
[8, 9]], dtype=np.float32)
Mn, Mn_nodelist = _laplacian_submatrix(1, M, [1, 2, 3])
assert Mn_nodelist == [2, 3]
assert np.allclose(Mn.toarray(), N.toarray())
def test_laplacian_submatrix_square(self):
with pytest.raises(nx.NetworkXError):
from networkx.algorithms.distance_measures import _laplacian_submatrix
M = sp_sparse.csr_matrix([[1, 2],
[4, 5],
[7, 8]], dtype=np.float32)
_laplacian_submatrix(1, M, [1, 2, 3])
def test_laplacian_submatrix_matrix_node_dim(self):
with pytest.raises(nx.NetworkXError):
from networkx.algorithms.distance_measures import _laplacian_submatrix
M = sp_sparse.csr_matrix([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=np.float32)
_laplacian_submatrix(1, M, [1, 2, 3, 4])
def test_resistance_distance(self):
rd = nx.resistance_distance(self.G, 1, 3, 'weight', True)
test_data = 1/(1/(2+4) + 1/(1+3))
assert round(rd, 5) == round(test_data, 5)
def test_resistance_distance_noinv(self):
rd = nx.resistance_distance(self.G, 1, 3, 'weight', False)
test_data = 1/(1/(1/2+1/4) + 1/(1/1+1/3))
assert round(rd, 5) == round(test_data, 5)
def test_resistance_distance_no_weight(self):
rd = nx.resistance_distance(self.G, 1, 3)
assert round(rd, 5) == 1
def test_resistance_distance_neg_weight(self):
self.G[2][3]['weight'] = -4
rd = nx.resistance_distance(self.G, 1, 3, 'weight', True)
test_data = 1/(1/(2+-4) + 1/(1+3))
assert round(rd, 5) == round(test_data, 5)
def test_multigraph(self):
G = nx.MultiGraph()
G.add_edge(1, 2, weight=2)
G.add_edge(2, 3, weight=4)
G.add_edge(3, 4, weight=1)
G.add_edge(1, 4, weight=3)
rd = nx.resistance_distance(G, 1, 3, 'weight', True)
assert np.isclose(rd, 1/(1/(2+4) + 1/(1+3)))
def test_resistance_distance_div0(self):
with pytest.raises(ZeroDivisionError):
self.G[1][2]['weight'] = 0
nx.resistance_distance(self.G, 1, 3, 'weight')
def test_resistance_distance_not_connected(self):
with pytest.raises(nx.NetworkXError):
self.G.add_node(5)
nx.resistance_distance(self.G, 1, 5)
def test_resistance_distance_same_node(self):
with pytest.raises(nx.NetworkXError):
nx.resistance_distance(self.G, 1, 1)
def test_resistance_distance_nodeA_not_in_graph(self):
with pytest.raises(nx.NetworkXError):
nx.resistance_distance(self.G, 9, 1)
def test_resistance_distance_nodeB_not_in_graph(self):
with pytest.raises(nx.NetworkXError):
nx.resistance_distance(self.G, 1, 9)
class TestBarycenter(object):
"""Test :func:`networkx.algorithms.distance_measures.barycenter`."""
def barycenter_as_subgraph(self, g, **kwargs):
"""Return the subgraph induced on the barycenter of g"""
b = nx.barycenter(g, **kwargs)
assert isinstance(b, list)
assert set(b) <= set(g)
return g.subgraph(b)
def test_must_be_connected(self):
pytest.raises(nx.NetworkXNoPath, nx.barycenter, nx.empty_graph(5))
def test_sp_kwarg(self):
# Complete graph K_5. Normally it works...
K_5 = nx.complete_graph(5)
sp = dict(nx.shortest_path_length(K_5))
assert nx.barycenter(K_5, sp=sp) == list(K_5)
# ...but not with the weight argument
for u, v, data in K_5.edges.data():
data['weight'] = 1
pytest.raises(ValueError, nx.barycenter, K_5, sp=sp, weight='weight')
# ...and a corrupted sp can make it seem like K_5 is disconnected
del sp[0][1]
pytest.raises(nx.NetworkXNoPath, nx.barycenter, K_5, sp=sp)
def test_trees(self):
"""The barycenter of a tree is a single vertex or an edge.
See [West01]_, p. 78.
"""
prng = Random(0xdeadbeef)
for i in range(50):
RT = nx.random_tree(prng.randint(1, 75), prng)
b = self.barycenter_as_subgraph(RT)
if len(b) == 2:
assert b.size() == 1
else:
assert len(b) == 1
assert b.size() == 0
def test_this_one_specific_tree(self):
"""Test the tree pictured at the bottom of [West01]_, p. 78."""
g = nx.Graph({
'a': ['b'],
'b': ['a', 'x'],
'x': ['b', 'y'],
'y': ['x', 'z'],
'z': ['y', 0, 1, 2, 3, 4],
0: ['z'], 1: ['z'], 2: ['z'], 3: ['z'], 4: ['z']})
b = self.barycenter_as_subgraph(g, attr='barycentricity')
assert list(b) == ['z']
assert not b.edges
expected_barycentricity = {0: 23, 1: 23, 2: 23, 3: 23, 4: 23,
'a': 35, 'b': 27, 'x': 21, 'y': 17, 'z': 15
}
for node, barycentricity in expected_barycentricity.items():
assert g.nodes[node]['barycentricity'] == barycentricity
# Doubling weights should do nothing but double the barycentricities
for edge in g.edges:
g.edges[edge]['weight'] = 2
b = self.barycenter_as_subgraph(g, weight='weight',
attr='barycentricity2')
assert list(b) == ['z']
assert not b.edges
for node, barycentricity in expected_barycentricity.items():
assert g.nodes[node]['barycentricity2'] == barycentricity*2
```
#### File: algorithms/tests/test_smallworld.py
```python
import pytest
numpy = pytest.importorskip('numpy')
import random
from networkx import random_reference, lattice_reference, sigma, omega
import networkx as nx
rng = random.Random(0)
rng = 42
def test_random_reference():
G = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng)
Gr = random_reference(G, niter=1, seed=rng)
C = nx.average_clustering(G)
Cr = nx.average_clustering(Gr)
assert C > Cr
pytest.raises(nx.NetworkXError, random_reference, nx.Graph())
pytest.raises(nx.NetworkXNotImplemented, random_reference, nx.DiGraph())
H = nx.Graph(((0, 1), (2, 3)))
Hl = random_reference(H, niter=1, seed=rng)
def test_lattice_reference():
G = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng)
Gl = lattice_reference(G, niter=1, seed=rng)
L = nx.average_shortest_path_length(G)
Ll = nx.average_shortest_path_length(Gl)
assert Ll > L
pytest.raises(nx.NetworkXError, lattice_reference, nx.Graph())
pytest.raises(nx.NetworkXNotImplemented, lattice_reference, nx.DiGraph())
H = nx.Graph(((0, 1), (2, 3)))
Hl = lattice_reference(H, niter=1)
def test_sigma():
Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng)
Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng)
sigmas = sigma(Gs, niter=1, nrand=2, seed=rng)
sigmar = sigma(Gr, niter=1, nrand=2, seed=rng)
assert sigmar < sigmas
def test_omega():
Gl = nx.connected_watts_strogatz_graph(50, 6, 0, seed=rng)
Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng)
Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng)
omegal = omega(Gl, niter=1, nrand=1, seed=rng)
omegar = omega(Gr, niter=1, nrand=1, seed=rng)
omegas = omega(Gs, niter=1, nrand=1, seed=rng)
print("omegas, omegal, omegar")
print(omegas, omegal, omegar)
assert omegal < omegas and omegas < omegar
```
#### File: traversal/tests/test_dfs.py
```python
import networkx as nx
class TestDFS:
@classmethod
def setup_class(cls):
# simple graph
G = nx.Graph()
G.add_edges_from([(0, 1), (1, 2), (1, 3), (2, 4), (3, 4)])
cls.G = G
# simple graph, disconnected
D = nx.Graph()
D.add_edges_from([(0, 1), (2, 3)])
cls.D = D
def test_preorder_nodes(self):
assert (list(nx.dfs_preorder_nodes(self.G, source=0)) ==
[0, 1, 2, 4, 3])
assert list(nx.dfs_preorder_nodes(self.D)) == [0, 1, 2, 3]
def test_postorder_nodes(self):
assert (list(nx.dfs_postorder_nodes(self.G, source=0)) ==
[3, 4, 2, 1, 0])
assert list(nx.dfs_postorder_nodes(self.D)) == [1, 0, 3, 2]
def test_successor(self):
assert (nx.dfs_successors(self.G, source=0) ==
{0: [1], 1: [2], 2: [4], 4: [3]})
assert nx.dfs_successors(self.D) == {0: [1], 2: [3]}
def test_predecessor(self):
assert (nx.dfs_predecessors(self.G, source=0) ==
{1: 0, 2: 1, 3: 4, 4: 2})
assert nx.dfs_predecessors(self.D) == {1: 0, 3: 2}
def test_dfs_tree(self):
exp_nodes = sorted(self.G.nodes())
exp_edges = [(0, 1), (1, 2), (2, 4), (4, 3)]
# Search from first node
T = nx.dfs_tree(self.G, source=0)
assert sorted(T.nodes()) == exp_nodes
assert sorted(T.edges()) == exp_edges
# Check source=None
T = nx.dfs_tree(self.G, source=None)
assert sorted(T.nodes()) == exp_nodes
assert sorted(T.edges()) == exp_edges
# Check source=None is the default
T = nx.dfs_tree(self.G)
assert sorted(T.nodes()) == exp_nodes
assert sorted(T.edges()) == exp_edges
def test_dfs_edges(self):
edges = nx.dfs_edges(self.G, source=0)
assert list(edges) == [(0, 1), (1, 2), (2, 4), (4, 3)]
edges = nx.dfs_edges(self.D)
assert list(edges) == [(0, 1), (2, 3)]
def test_dfs_labeled_edges(self):
edges = list(nx.dfs_labeled_edges(self.G, source=0))
forward = [(u, v) for (u, v, d) in edges if d == 'forward']
assert forward == [(0, 0), (0, 1), (1, 2), (2, 4), (4, 3)]
def test_dfs_labeled_disconnected_edges(self):
edges = list(nx.dfs_labeled_edges(self.D))
forward = [(u, v) for (u, v, d) in edges if d == 'forward']
assert forward == [(0, 0), (0, 1), (2, 2), (2, 3)]
def test_dfs_tree_isolates(self):
G = nx.Graph()
G.add_node(1)
G.add_node(2)
T = nx.dfs_tree(G, source=1)
assert sorted(T.nodes()) == [1]
assert sorted(T.edges()) == []
T = nx.dfs_tree(G, source=None)
assert sorted(T.nodes()) == [1, 2]
assert sorted(T.edges()) == []
class TestDepthLimitedSearch:
@classmethod
def setup_class(cls):
# a tree
G = nx.Graph()
nx.add_path(G, [0, 1, 2, 3, 4, 5, 6])
nx.add_path(G, [2, 7, 8, 9, 10])
cls.G = G
# a disconnected graph
D = nx.Graph()
D.add_edges_from([(0, 1), (2, 3)])
nx.add_path(D, [2, 7, 8, 9, 10])
cls.D = D
def test_dls_preorder_nodes(self):
assert list(nx.dfs_preorder_nodes(self.G, source=0,
depth_limit=2)) == [0, 1, 2]
assert list(nx.dfs_preorder_nodes(self.D, source=1,
depth_limit=2)) == ([1, 0])
def test_dls_postorder_nodes(self):
assert list(nx.dfs_postorder_nodes(self.G,
source=3, depth_limit=3)) == [1, 7, 2, 5, 4, 3]
assert list(nx.dfs_postorder_nodes(self.D,
source=2, depth_limit=2)) == ([3, 7, 2])
def test_dls_successor(self):
result = nx.dfs_successors(self.G, source=4, depth_limit=3)
assert ({n: set(v) for n, v in result.items()} ==
{2: {1, 7}, 3: {2}, 4: {3, 5}, 5: {6}})
result = nx.dfs_successors(self.D, source=7, depth_limit=2)
assert ({n: set(v) for n, v in result.items()} ==
{8: {9}, 2: {3}, 7: {8, 2}})
def test_dls_predecessor(self):
assert (nx.dfs_predecessors(self.G, source=0, depth_limit=3) ==
{1: 0, 2: 1, 3: 2, 7: 2})
assert (nx.dfs_predecessors(self.D, source=2, depth_limit=3) ==
{8: 7, 9: 8, 3: 2, 7: 2})
def test_dls_tree(self):
T = nx.dfs_tree(self.G, source=3, depth_limit=1)
assert sorted(T.edges()) == [(3, 2), (3, 4)]
def test_dls_edges(self):
edges = nx.dfs_edges(self.G, source=9, depth_limit=4)
assert list(edges) == [(9, 8), (8, 7),
(7, 2), (2, 1), (2, 3), (9, 10)]
def test_dls_labeled_edges(self):
edges = list(nx.dfs_labeled_edges(self.G, source=5, depth_limit=1))
forward = [(u, v) for (u, v, d) in edges if d == 'forward']
assert forward == [(5, 5), (5, 4), (5, 6)]
def test_dls_labeled_disconnected_edges(self):
edges = list(nx.dfs_labeled_edges(self.G, source=6, depth_limit=2))
forward = [(u, v) for (u, v, d) in edges if d == 'forward']
assert forward == [(6, 6), (6, 5), (5, 4)]
```
#### File: classes/tests/test_filters.py
```python
import pytest
import networkx as nx
class TestFilterFactory(object):
def test_no_filter(self):
nf = nx.filters.no_filter
assert nf()
assert nf(1)
assert nf(2, 1)
def test_hide_nodes(self):
f = nx.classes.filters.hide_nodes([1, 2, 3])
assert not f(1)
assert not f(2)
assert not f(3)
assert f(4)
assert f(0)
assert f('a')
pytest.raises(TypeError, f, 1, 2)
pytest.raises(TypeError, f)
def test_show_nodes(self):
f = nx.classes.filters.show_nodes([1, 2, 3])
assert f(1)
assert f(2)
assert f(3)
assert not f(4)
assert not f(0)
assert not f('a')
pytest.raises(TypeError, f, 1, 2)
pytest.raises(TypeError, f)
def test_hide_edges(self):
factory = nx.classes.filters.hide_edges
f = factory([(1, 2), (3, 4)])
assert not f(1, 2)
assert not f(3, 4)
assert not f(4, 3)
assert f(2, 3)
assert f(0, -1)
assert f('a', 'b')
pytest.raises(TypeError, f, 1, 2, 3)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2, 3)])
def test_show_edges(self):
factory = nx.classes.filters.show_edges
f = factory([(1, 2), (3, 4)])
assert f(1, 2)
assert f(3, 4)
assert f(4, 3)
assert not f(2, 3)
assert not f(0, -1)
assert not f('a', 'b')
pytest.raises(TypeError, f, 1, 2, 3)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2, 3)])
def test_hide_diedges(self):
factory = nx.classes.filters.hide_diedges
f = factory([(1, 2), (3, 4)])
assert not f(1, 2)
assert not f(3, 4)
assert f(4, 3)
assert f(2, 3)
assert f(0, -1)
assert f('a', 'b')
pytest.raises(TypeError, f, 1, 2, 3)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2, 3)])
def test_show_diedges(self):
factory = nx.classes.filters.show_diedges
f = factory([(1, 2), (3, 4)])
assert f(1, 2)
assert f(3, 4)
assert not f(4, 3)
assert not f(2, 3)
assert not f(0, -1)
assert not f('a', 'b')
pytest.raises(TypeError, f, 1, 2, 3)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2, 3)])
def test_hide_multiedges(self):
factory = nx.classes.filters.hide_multiedges
f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)])
assert not f(1, 2, 0)
assert not f(1, 2, 1)
assert f(1, 2, 2)
assert f(3, 4, 0)
assert not f(3, 4, 1)
assert not f(4, 3, 1)
assert f(4, 3, 0)
assert f(2, 3, 0)
assert f(0, -1, 0)
assert f('a', 'b', 0)
pytest.raises(TypeError, f, 1, 2, 3, 4)
pytest.raises(TypeError, f, 1, 2)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2)])
pytest.raises(ValueError, factory, [(1, 2, 3, 4)])
def test_show_multiedges(self):
factory = nx.classes.filters.show_multiedges
f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)])
assert f(1, 2, 0)
assert f(1, 2, 1)
assert not f(1, 2, 2)
assert not f(3, 4, 0)
assert f(3, 4, 1)
assert f(4, 3, 1)
assert not f(4, 3, 0)
assert not f(2, 3, 0)
assert not f(0, -1, 0)
assert not f('a', 'b', 0)
pytest.raises(TypeError, f, 1, 2, 3, 4)
pytest.raises(TypeError, f, 1, 2)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2)])
pytest.raises(ValueError, factory, [(1, 2, 3, 4)])
def test_hide_multidiedges(self):
factory = nx.classes.filters.hide_multidiedges
f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)])
assert not f(1, 2, 0)
assert not f(1, 2, 1)
assert f(1, 2, 2)
assert f(3, 4, 0)
assert not f(3, 4, 1)
assert f(4, 3, 1)
assert f(4, 3, 0)
assert f(2, 3, 0)
assert f(0, -1, 0)
assert f('a', 'b', 0)
pytest.raises(TypeError, f, 1, 2, 3, 4)
pytest.raises(TypeError, f, 1, 2)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2)])
pytest.raises(ValueError, factory, [(1, 2, 3, 4)])
def test_show_multidiedges(self):
factory = nx.classes.filters.show_multidiedges
f = factory([(1, 2, 0), (3, 4, 1), (1, 2, 1)])
assert f(1, 2, 0)
assert f(1, 2, 1)
assert not f(1, 2, 2)
assert not f(3, 4, 0)
assert f(3, 4, 1)
assert not f(4, 3, 1)
assert not f(4, 3, 0)
assert not f(2, 3, 0)
assert not f(0, -1, 0)
assert not f('a', 'b', 0)
pytest.raises(TypeError, f, 1, 2, 3, 4)
pytest.raises(TypeError, f, 1, 2)
pytest.raises(TypeError, f, 1)
pytest.raises(TypeError, f)
pytest.raises(TypeError, factory, [1, 2, 3])
pytest.raises(ValueError, factory, [(1, 2)])
pytest.raises(ValueError, factory, [(1, 2, 3, 4)])
```
#### File: classes/tests/test_reportviews.py
```python
import pytest
import networkx as nx
# Nodes
class TestNodeView(object):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.nv = cls.G.nodes # NodeView(G)
def test_pickle(self):
import pickle
nv = self.nv
pnv = pickle.loads(pickle.dumps(nv, -1))
assert nv == pnv
assert nv.__slots__ == pnv.__slots__
def test_str(self):
assert str(self.nv) == "[0, 1, 2, 3, 4, 5, 6, 7, 8]"
def test_repr(self):
assert repr(self.nv) == "NodeView((0, 1, 2, 3, 4, 5, 6, 7, 8))"
def test_contains(self):
G = self.G.copy()
nv = G.nodes
assert 7 in nv
assert 9 not in nv
G.remove_node(7)
G.add_node(9)
assert 7 not in nv
assert 9 in nv
def test_getitem(self):
G = self.G.copy()
nv = G.nodes
G.nodes[3]['foo'] = 'bar'
assert nv[7] == {}
assert nv[3] == {'foo': 'bar'}
def test_iter(self):
nv = self.nv
for i, n in enumerate(nv):
assert i == n
inv = iter(nv)
assert next(inv) == 0
assert iter(nv) != nv
assert iter(inv) == inv
inv2 = iter(nv)
next(inv2)
assert list(inv) == list(inv2)
# odd case where NodeView calls NodeDataView with data=False
nnv = nv(data=False)
for i, n in enumerate(nnv):
assert i == n
def test_call(self):
nodes = self.nv
assert nodes is nodes()
assert nodes is not nodes(data=True)
assert nodes is not nodes(data='weight')
class TestNodeDataView(object):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.nv = cls.G.nodes.data() # NodeDataView(G)
cls.ndv = cls.G.nodes.data(True)
cls.nwv = cls.G.nodes.data('foo')
def test_viewtype(self):
nv = self.G.nodes
ndvfalse = nv.data(False)
assert nv is ndvfalse
assert nv is not self.ndv
def test_pickle(self):
import pickle
nv = self.nv
pnv = pickle.loads(pickle.dumps(nv, -1))
assert nv == pnv
assert nv.__slots__ == pnv.__slots__
def test_str(self):
msg = str([(n, {}) for n in range(9)])
assert str(self.ndv) == msg
def test_repr(self):
msg = "NodeDataView({0: {}, 1: {}, 2: {}, 3: {}, " + \
"4: {}, 5: {}, 6: {}, 7: {}, 8: {}})"
assert repr(self.ndv) == msg
def test_contains(self):
G = self.G.copy()
nv = G.nodes.data()
nwv = G.nodes.data('foo')
G.nodes[3]['foo'] = 'bar'
assert (7, {}) in nv
assert (3, {'foo': 'bar'}) in nv
assert (3, 'bar') in nwv
assert (7, None) in nwv
# default
nwv_def = G.nodes(data='foo', default='biz')
assert (7, 'biz') in nwv_def
assert (3, 'bar') in nwv_def
def test_getitem(self):
G = self.G.copy()
nv = G.nodes
G.nodes[3]['foo'] = 'bar'
assert nv[3] == {'foo': 'bar'}
# default
nwv_def = G.nodes(data='foo', default='biz')
assert nwv_def[7], 'biz'
assert nwv_def[3] == 'bar'
def test_iter(self):
G = self.G.copy()
nv = G.nodes.data()
ndv = G.nodes.data(True)
nwv = G.nodes.data('foo')
for i, (n, d) in enumerate(nv):
assert i == n
assert d == {}
inv = iter(nv)
assert next(inv) == (0, {})
G.nodes[3]['foo'] = 'bar'
# default
for n, d in nv:
if n == 3:
assert d == {'foo': 'bar'}
else:
assert d == {}
# data=True
for n, d in ndv:
if n == 3:
assert d == {'foo': 'bar'}
else:
assert d == {}
# data='foo'
for n, d in nwv:
if n == 3:
assert d == 'bar'
else:
assert d is None
# data='foo', default=1
for n, d in G.nodes.data('foo', default=1):
if n == 3:
assert d == 'bar'
else:
assert d == 1
def test_nodedataview_unhashable():
G = nx.path_graph(9)
G.nodes[3]['foo'] = 'bar'
nvs = [G.nodes.data()]
nvs.append(G.nodes.data(True))
H = G.copy()
H.nodes[4]['foo'] = {1, 2, 3}
nvs.append(H.nodes.data(True))
# raise unhashable
for nv in nvs:
pytest.raises(TypeError, set, nv)
pytest.raises(TypeError, eval, 'nv | nv', locals())
# no raise... hashable
Gn = G.nodes.data(False)
set(Gn)
Gn | Gn
Gn = G.nodes.data('foo')
set(Gn)
Gn | Gn
class TestNodeViewSetOps(object):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.G.nodes[3]['foo'] = 'bar'
cls.nv = cls.G.nodes
def n_its(self, nodes):
return {node for node in nodes}
def test_len(self):
G = self.G.copy()
nv = G.nodes
assert len(nv) == 9
G.remove_node(7)
assert len(nv) == 8
G.add_node(9)
assert len(nv) == 9
def test_and(self):
# print("G & H nodes:", gnv & hnv)
nv = self.nv
some_nodes = self.n_its(range(5, 12))
assert nv & some_nodes == self.n_its(range(5, 9))
assert some_nodes & nv == self.n_its(range(5, 9))
def test_or(self):
# print("G | H nodes:", gnv | hnv)
nv = self.nv
some_nodes = self.n_its(range(5, 12))
assert nv | some_nodes == self.n_its(range(12))
assert some_nodes | nv == self.n_its(range(12))
def test_xor(self):
# print("G ^ H nodes:", gnv ^ hnv)
nv = self.nv
some_nodes = self.n_its(range(5, 12))
nodes = {0, 1, 2, 3, 4, 9, 10, 11}
assert nv ^ some_nodes == self.n_its(nodes)
assert some_nodes ^ nv == self.n_its(nodes)
def test_sub(self):
# print("G - H nodes:", gnv - hnv)
nv = self.nv
some_nodes = self.n_its(range(5, 12))
assert nv - some_nodes == self.n_its(range(5))
assert some_nodes - nv == self.n_its(range(9, 12))
class TestNodeDataViewSetOps(TestNodeViewSetOps):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.G.nodes[3]['foo'] = 'bar'
cls.nv = cls.G.nodes.data('foo')
def n_its(self, nodes):
return {(node, 'bar' if node == 3 else None) for node in nodes}
class TestNodeDataViewDefaultSetOps(TestNodeDataViewSetOps):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.G.nodes[3]['foo'] = 'bar'
cls.nv = cls.G.nodes.data('foo', default=1)
def n_its(self, nodes):
return {(node, 'bar' if node == 3 else 1) for node in nodes}
# Edges Data View
class TestEdgeDataView(object):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.eview = nx.reportviews.EdgeView
def test_pickle(self):
import pickle
ev = self.eview(self.G)(data=True)
pev = pickle.loads(pickle.dumps(ev, -1))
assert list(ev) == list(pev)
assert ev.__slots__ == pev.__slots__
def modify_edge(self, G, e, **kwds):
G._adj[e[0]][e[1]].update(kwds)
def test_str(self):
ev = self.eview(self.G)(data=True)
rep = str([(n, n + 1, {}) for n in range(8)])
assert str(ev) == rep
def test_repr(self):
ev = self.eview(self.G)(data=True)
rep = "EdgeDataView([(0, 1, {}), (1, 2, {}), " + \
"(2, 3, {}), (3, 4, {}), " + \
"(4, 5, {}), (5, 6, {}), " + \
"(6, 7, {}), (7, 8, {})])"
assert repr(ev) == rep
def test_iterdata(self):
G = self.G.copy()
evr = self.eview(G)
ev = evr(data=True)
ev_def = evr(data='foo', default=1)
for u, v, d in ev:
pass
assert d == {}
for u, v, wt in ev_def:
pass
assert wt == 1
self.modify_edge(G, (2, 3), foo='bar')
for e in ev:
assert len(e) == 3
if set(e[:2]) == {2, 3}:
assert e[2] == {'foo': 'bar'}
checked = True
else:
assert e[2] == {}
assert checked
for e in ev_def:
assert len(e) == 3
if set(e[:2]) == {2, 3}:
assert e[2] == 'bar'
checked_wt = True
else:
assert e[2] == 1
assert checked_wt
def test_iter(self):
evr = self.eview(self.G)
ev = evr()
for u, v in ev:
pass
iev = iter(ev)
assert next(iev) == (0, 1)
assert iter(ev) != ev
assert iter(iev) == iev
def test_contains(self):
evr = self.eview(self.G)
ev = evr()
if self.G.is_directed():
assert (1, 2) in ev and (2, 1) not in ev
else:
assert (1, 2) in ev and (2, 1) in ev
assert not (1, 4) in ev
assert not (1, 90) in ev
assert not (90, 1) in ev
def test_len(self):
evr = self.eview(self.G)
ev = evr(data='foo')
assert len(ev) == 8
assert len(evr(1)) == 2
assert len(evr([1, 2, 3])) == 4
assert len(self.G.edges(1)) == 2
assert len(self.G.edges()) == 8
assert len(self.G.edges) == 8
H = self.G.copy()
H.add_edge(1, 1)
assert len(H.edges(1)) == 3
assert len(H.edges()) == 9
assert len(H.edges) == 9
class TestOutEdgeDataView(TestEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=nx.DiGraph())
cls.eview = nx.reportviews.OutEdgeView
def test_repr(self):
ev = self.eview(self.G)(data=True)
rep = "OutEdgeDataView([(0, 1, {}), (1, 2, {}), " + \
"(2, 3, {}), (3, 4, {}), " + \
"(4, 5, {}), (5, 6, {}), " + \
"(6, 7, {}), (7, 8, {})])"
assert repr(ev) == rep
def test_len(self):
evr = self.eview(self.G)
ev = evr(data='foo')
assert len(ev) == 8
assert len(evr(1)) == 1
assert len(evr([1, 2, 3])) == 3
assert len(self.G.edges(1)) == 1
assert len(self.G.edges()) == 8
assert len(self.G.edges) == 8
H = self.G.copy()
H.add_edge(1, 1)
assert len(H.edges(1)) == 2
assert len(H.edges()) == 9
assert len(H.edges) == 9
class TestInEdgeDataView(TestOutEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=nx.DiGraph())
cls.eview = nx.reportviews.InEdgeView
def test_repr(self):
ev = self.eview(self.G)(data=True)
rep = "InEdgeDataView([(0, 1, {}), (1, 2, {}), " + \
"(2, 3, {}), (3, 4, {}), " + \
"(4, 5, {}), (5, 6, {}), " + \
"(6, 7, {}), (7, 8, {})])"
assert repr(ev) == rep
class TestMultiEdgeDataView(TestEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=nx.MultiGraph())
cls.eview = nx.reportviews.MultiEdgeView
def modify_edge(self, G, e, **kwds):
G._adj[e[0]][e[1]][0].update(kwds)
def test_repr(self):
ev = self.eview(self.G)(data=True)
rep = "MultiEdgeDataView([(0, 1, {}), (1, 2, {}), " + \
"(2, 3, {}), (3, 4, {}), " + \
"(4, 5, {}), (5, 6, {}), " + \
"(6, 7, {}), (7, 8, {})])"
assert repr(ev) == rep
class TestOutMultiEdgeDataView(TestOutEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=nx.MultiDiGraph())
cls.eview = nx.reportviews.OutMultiEdgeView
def modify_edge(self, G, e, **kwds):
G._adj[e[0]][e[1]][0].update(kwds)
def test_repr(self):
ev = self.eview(self.G)(data=True)
rep = "OutMultiEdgeDataView([(0, 1, {}), (1, 2, {}), " + \
"(2, 3, {}), (3, 4, {}), " + \
"(4, 5, {}), (5, 6, {}), " + \
"(6, 7, {}), (7, 8, {})])"
assert repr(ev) == rep
class TestInMultiEdgeDataView(TestOutMultiEdgeDataView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, create_using=nx.MultiDiGraph())
cls.eview = nx.reportviews.InMultiEdgeView
def test_repr(self):
ev = self.eview(self.G)(data=True)
rep = "InMultiEdgeDataView([(0, 1, {}), (1, 2, {}), " + \
"(2, 3, {}), (3, 4, {}), " + \
"(4, 5, {}), (5, 6, {}), " + \
"(6, 7, {}), (7, 8, {})])"
assert repr(ev) == rep
# Edge Views
class TestEdgeView(object):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9)
cls.eview = nx.reportviews.EdgeView
def test_pickle(self):
import pickle
ev = self.eview(self.G)
pev = pickle.loads(pickle.dumps(ev, -1))
assert ev == pev
assert ev.__slots__ == pev.__slots__
def modify_edge(self, G, e, **kwds):
G._adj[e[0]][e[1]].update(kwds)
def test_str(self):
ev = self.eview(self.G)
rep = str([(n, n + 1) for n in range(8)])
assert str(ev) == rep
def test_repr(self):
ev = self.eview(self.G)
rep = "EdgeView([(0, 1), (1, 2), (2, 3), (3, 4), " + \
"(4, 5), (5, 6), (6, 7), (7, 8)])"
assert repr(ev) == rep
def test_call(self):
ev = self.eview(self.G)
assert id(ev) == id(ev())
assert id(ev) == id(ev(data=False))
assert id(ev) != id(ev(data=True))
assert id(ev) != id(ev(nbunch=1))
def test_data(self):
ev = self.eview(self.G)
assert id(ev) != id(ev.data())
assert id(ev) == id(ev.data(data=False))
assert id(ev) != id(ev.data(data=True))
assert id(ev) != id(ev.data(nbunch=1))
def test_iter(self):
ev = self.eview(self.G)
for u, v in ev:
pass
iev = iter(ev)
assert next(iev) == (0, 1)
assert iter(ev) != ev
assert iter(iev) == iev
def test_contains(self):
ev = self.eview(self.G)
edv = ev()
if self.G.is_directed():
assert (1, 2) in ev and (2, 1) not in ev
assert (1, 2) in edv and (2, 1) not in edv
else:
assert (1, 2) in ev and (2, 1) in ev
assert (1, 2) in edv and (2, 1) in edv
assert not (1, 4) in ev
assert not (1, 4) in edv
# edge not in graph
assert not (1, 90) in ev
assert not (90, 1) in ev
assert not (1, 90) in edv
assert not (90, 1) in edv
def test_len(self):
ev = self.eview(self.G)
num_ed = 9 if self.G.is_multigraph() else 8
assert len(ev) == num_ed
H = self.G.copy()
H.add_edge(1, 1)
assert len(H.edges(1)) == 3 + H.is_multigraph() - H.is_directed()
assert len(H.edges()) == num_ed + 1
assert len(H.edges) == num_ed + 1
def test_and(self):
# print("G & H edges:", gnv & hnv)
ev = self.eview(self.G)
some_edges = {(0, 1), (1, 0), (0, 2)}
if self.G.is_directed():
assert some_edges & ev, {(0, 1)}
assert ev & some_edges, {(0, 1)}
else:
assert ev & some_edges == {(0, 1), (1, 0)}
assert some_edges & ev == {(0, 1), (1, 0)}
return
def test_or(self):
# print("G | H edges:", gnv | hnv)
ev = self.eview(self.G)
some_edges = {(0, 1), (1, 0), (0, 2)}
result1 = {(n, n + 1) for n in range(8)}
result1.update(some_edges)
result2 = {(n + 1, n) for n in range(8)}
result2.update(some_edges)
assert (ev | some_edges) in (result1, result2)
assert (some_edges | ev) in (result1, result2)
def test_xor(self):
# print("G ^ H edges:", gnv ^ hnv)
ev = self.eview(self.G)
some_edges = {(0, 1), (1, 0), (0, 2)}
if self.G.is_directed():
result = {(n, n + 1) for n in range(1, 8)}
result.update({(1, 0), (0, 2)})
assert ev ^ some_edges == result
else:
result = {(n, n + 1) for n in range(1, 8)}
result.update({(0, 2)})
assert ev ^ some_edges == result
return
def test_sub(self):
# print("G - H edges:", gnv - hnv)
ev = self.eview(self.G)
some_edges = {(0, 1), (1, 0), (0, 2)}
result = {(n, n + 1) for n in range(8)}
result.remove((0, 1))
assert ev - some_edges, result
class TestOutEdgeView(TestEdgeView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, nx.DiGraph())
cls.eview = nx.reportviews.OutEdgeView
def test_repr(self):
ev = self.eview(self.G)
rep = "OutEdgeView([(0, 1), (1, 2), (2, 3), (3, 4), " + \
"(4, 5), (5, 6), (6, 7), (7, 8)])"
assert repr(ev) == rep
class TestInEdgeView(TestEdgeView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, nx.DiGraph())
cls.eview = nx.reportviews.InEdgeView
def test_repr(self):
ev = self.eview(self.G)
rep = "InEdgeView([(0, 1), (1, 2), (2, 3), (3, 4), " + \
"(4, 5), (5, 6), (6, 7), (7, 8)])"
assert repr(ev) == rep
class TestMultiEdgeView(TestEdgeView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, nx.MultiGraph())
cls.G.add_edge(1, 2, key=3, foo='bar')
cls.eview = nx.reportviews.MultiEdgeView
def modify_edge(self, G, e, **kwds):
if len(e) == 2:
e = e + (0,)
G._adj[e[0]][e[1]][e[2]].update(kwds)
def test_str(self):
ev = self.eview(self.G)
replist = [(n, n + 1, 0) for n in range(8)]
replist.insert(2, (1, 2, 3))
rep = str(replist)
assert str(ev) == rep
def test_repr(self):
ev = self.eview(self.G)
rep = "MultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 3), (2, 3, 0), " + \
"(3, 4, 0), (4, 5, 0), (5, 6, 0), (6, 7, 0), (7, 8, 0)])"
assert repr(ev) == rep
def test_call(self):
ev = self.eview(self.G)
assert id(ev) == id(ev(keys=True))
assert id(ev) == id(ev(data=False, keys=True))
assert id(ev) != id(ev(keys=False))
assert id(ev) != id(ev(data=True))
assert id(ev) != id(ev(nbunch=1))
def test_data(self):
ev = self.eview(self.G)
assert id(ev) != id(ev.data())
assert id(ev) == id(ev.data(data=False, keys=True))
assert id(ev) != id(ev.data(keys=False))
assert id(ev) != id(ev.data(data=True))
assert id(ev) != id(ev.data(nbunch=1))
def test_iter(self):
ev = self.eview(self.G)
for u, v, k in ev:
pass
iev = iter(ev)
assert next(iev) == (0, 1, 0)
assert iter(ev) != ev
assert iter(iev) == iev
def test_iterkeys(self):
G = self.G
evr = self.eview(G)
ev = evr(keys=True)
for u, v, k in ev:
pass
assert k == 0
ev = evr(keys=True, data="foo", default=1)
for u, v, k, wt in ev:
pass
assert wt == 1
self.modify_edge(G, (2, 3, 0), foo='bar')
ev = evr(keys=True, data=True)
for e in ev:
assert len(e) == 4
print('edge:', e)
if set(e[:2]) == {2, 3}:
print(self.G._adj[2][3])
assert e[2] == 0
assert e[3] == {'foo': 'bar'}
checked = True
elif set(e[:3]) == {1, 2, 3}:
assert e[2] == 3
assert e[3] == {'foo': 'bar'}
checked_multi = True
else:
assert e[2] == 0
assert e[3] == {}
assert checked
assert checked_multi
ev = evr(keys=True, data='foo', default=1)
for e in ev:
if set(e[:2]) == {1, 2} and e[2] == 3:
assert e[3] == 'bar'
if set(e[:2]) == {1, 2} and e[2] == 0:
assert e[3] == 1
if set(e[:2]) == {2, 3}:
assert e[2] == 0
assert e[3] == 'bar'
assert len(e) == 4
checked_wt = True
assert checked_wt
ev = evr(keys=True)
for e in ev:
assert len(e) == 3
elist = sorted([(i, i + 1, 0) for i in range(8)] + [(1, 2, 3)])
assert sorted(list(ev)) == elist
# test order of arguments:graph, nbunch, data, keys, default
ev = evr((1, 2), 'foo', True, 1)
for e in ev:
if set(e[:2]) == {1, 2}:
assert e[2] in {0, 3}
if e[2] == 3:
assert e[3] == 'bar'
else: # e[2] == 0
assert e[3] == 1
if G.is_directed():
assert len(list(ev)) == 3
else:
assert len(list(ev)) == 4
def test_or(self):
# print("G | H edges:", gnv | hnv)
ev = self.eview(self.G)
some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)}
result = {(n, n + 1, 0) for n in range(8)}
result.update(some_edges)
result.update({(1, 2, 3)})
assert ev | some_edges == result
assert some_edges | ev == result
def test_sub(self):
# print("G - H edges:", gnv - hnv)
ev = self.eview(self.G)
some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)}
result = {(n, n + 1, 0) for n in range(8)}
result.remove((0, 1, 0))
result.update({(1, 2, 3)})
assert ev - some_edges, result
assert some_edges - ev, result
def test_xor(self):
# print("G ^ H edges:", gnv ^ hnv)
ev = self.eview(self.G)
some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)}
if self.G.is_directed():
result = {(n, n + 1, 0) for n in range(1, 8)}
result.update({(1, 0, 0), (0, 2, 0), (1, 2, 3)})
assert ev ^ some_edges == result
assert some_edges ^ ev == result
else:
result = {(n, n + 1, 0) for n in range(1, 8)}
result.update({(0, 2, 0), (1, 2, 3)})
assert ev ^ some_edges == result
assert some_edges ^ ev == result
def test_and(self):
# print("G & H edges:", gnv & hnv)
ev = self.eview(self.G)
some_edges = {(0, 1, 0), (1, 0, 0), (0, 2, 0)}
if self.G.is_directed():
assert ev & some_edges == {(0, 1, 0)}
assert some_edges & ev == {(0, 1, 0)}
else:
assert ev & some_edges == {(0, 1, 0), (1, 0, 0)}
assert some_edges & ev == {(0, 1, 0), (1, 0, 0)}
class TestOutMultiEdgeView(TestMultiEdgeView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, nx.MultiDiGraph())
cls.G.add_edge(1, 2, key=3, foo='bar')
cls.eview = nx.reportviews.OutMultiEdgeView
def modify_edge(self, G, e, **kwds):
if len(e) == 2:
e = e + (0,)
G._adj[e[0]][e[1]][e[2]].update(kwds)
def test_repr(self):
ev = self.eview(self.G)
rep = "OutMultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 3), (2, 3, 0),"\
+ " (3, 4, 0), (4, 5, 0), (5, 6, 0), (6, 7, 0), (7, 8, 0)])"
assert repr(ev) == rep
class TestInMultiEdgeView(TestMultiEdgeView):
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(9, nx.MultiDiGraph())
cls.G.add_edge(1, 2, key=3, foo='bar')
cls.eview = nx.reportviews.InMultiEdgeView
def modify_edge(self, G, e, **kwds):
if len(e) == 2:
e = e + (0,)
G._adj[e[0]][e[1]][e[2]].update(kwds)
def test_repr(self):
ev = self.eview(self.G)
rep = "InMultiEdgeView([(0, 1, 0), (1, 2, 0), (1, 2, 3), (2, 3, 0), "\
+ "(3, 4, 0), (4, 5, 0), (5, 6, 0), (6, 7, 0), (7, 8, 0)])"
assert repr(ev) == rep
# Degrees
class TestDegreeView(object):
GRAPH = nx.Graph
dview = nx.reportviews.DegreeView
@classmethod
def setup_class(cls):
cls.G = nx.path_graph(6, cls.GRAPH())
cls.G.add_edge(1, 3, foo=2)
cls.G.add_edge(1, 3, foo=3)
def test_pickle(self):
import pickle
deg = self.G.degree
pdeg = pickle.loads(pickle.dumps(deg, -1))
assert dict(deg) == dict(pdeg)
def test_str(self):
dv = self.dview(self.G)
rep = str([(0, 1), (1, 3), (2, 2), (3, 3), (4, 2), (5, 1)])
assert str(dv) == rep
dv = self.G.degree()
assert str(dv) == rep
def test_repr(self):
dv = self.dview(self.G)
rep = "DegreeView({0: 1, 1: 3, 2: 2, 3: 3, 4: 2, 5: 1})"
assert repr(dv) == rep
def test_iter(self):
dv = self.dview(self.G)
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (0, dv[0])
assert next(idv) == (1, dv[1])
# weighted
dv = self.dview(self.G, weight='foo')
for n, d in dv:
pass
idv = iter(dv)
assert iter(dv) != dv
assert iter(idv) == idv
assert next(idv) == (0, dv[0])
assert next(idv) == (1, dv[1])
def test_nbunch(self):
dv = self.dview(self.G)
dvn = dv(0)
assert dvn == 1
dvn = dv([2, 3])
assert sorted(dvn) == [(2, 2), (3, 3)]
def test_getitem(self):
dv = self.dview(self.G)
assert dv[0] == 1
assert dv[1] == 3
assert dv[2] == 2
assert dv[3] == 3
dv = self.dview(self.G, weight='foo')
assert dv[0] == 1
assert dv[1] == 5
assert dv[2] == 2
assert dv[3] == 5
def test_weight(self):
dv = self.dview(self.G)
dvw = dv(0, weight='foo')
assert dvw == 1
dvw = dv(1, weight='foo')
assert dvw == 5
dvw = dv([2, 3], weight='foo')
assert sorted(dvw) == [(2, 2), (3, 5)]
dvd = dict(dv(weight='foo'))
assert dvd[0] == 1
assert dvd[1] == 5
assert dvd[2] == 2
assert dvd[3] == 5
def test_len(self):
dv = self.dview(self.G)
assert len(dv) == 6
class TestDiDegreeView(TestDegreeView):
GRAPH = nx.DiGraph
dview = nx.reportviews.DiDegreeView
def test_repr(self):
dv = self.G.degree()
rep = "DiDegreeView({0: 1, 1: 3, 2: 2, 3: 3, 4: 2, 5: 1})"
assert repr(dv) == rep
class TestOutDegreeView(TestDegreeView):
GRAPH = nx.DiGraph
dview = nx.reportviews.OutDegreeView
def test_str(self):
dv = self.dview(self.G)
rep = str([(0, 1), (1, 2), (2, 1), (3, 1), (4, 1), (5, 0)])
assert str(dv) == rep
dv = self.G.out_degree()
assert str(dv) == rep
def test_repr(self):
dv = self.G.out_degree()
rep = "OutDegreeView({0: 1, 1: 2, 2: 1, 3: 1, 4: 1, 5: 0})"
assert repr(dv) == rep
def test_nbunch(self):
dv = self.dview(self.G)
dvn = dv(0)
assert dvn == 1
dvn = dv([2, 3])
assert sorted(dvn) == [(2, 1), (3, 1)]
def test_getitem(self):
dv = self.dview(self.G)
assert dv[0] == 1
assert dv[1] == 2
assert dv[2] == 1
assert dv[3] == 1
dv = self.dview(self.G, weight='foo')
assert dv[0] == 1
assert dv[1] == 4
assert dv[2] == 1
assert dv[3] == 1
def test_weight(self):
dv = self.dview(self.G)
dvw = dv(0, weight='foo')
assert dvw == 1
dvw = dv(1, weight='foo')
assert dvw == 4
dvw = dv([2, 3], weight='foo')
assert sorted(dvw) == [(2, 1), (3, 1)]
dvd = dict(dv(weight='foo'))
assert dvd[0] == 1
assert dvd[1] == 4
assert dvd[2] == 1
assert dvd[3] == 1
class TestInDegreeView(TestDegreeView):
GRAPH = nx.DiGraph
dview = nx.reportviews.InDegreeView
def test_str(self):
dv = self.dview(self.G)
rep = str([(0, 0), (1, 1), (2, 1), (3, 2), (4, 1), (5, 1)])
assert str(dv) == rep
dv = self.G.in_degree()
assert str(dv) == rep
def test_repr(self):
dv = self.G.in_degree()
rep = "InDegreeView({0: 0, 1: 1, 2: 1, 3: 2, 4: 1, 5: 1})"
assert repr(dv) == rep
def test_nbunch(self):
dv = self.dview(self.G)
dvn = dv(0)
assert dvn == 0
dvn = dv([2, 3])
assert sorted(dvn) == [(2, 1), (3, 2)]
def test_getitem(self):
dv = self.dview(self.G)
assert dv[0] == 0
assert dv[1] == 1
assert dv[2] == 1
assert dv[3] == 2
dv = self.dview(self.G, weight='foo')
assert dv[0] == 0
assert dv[1] == 1
assert dv[2] == 1
assert dv[3] == 4
def test_weight(self):
dv = self.dview(self.G)
dvw = dv(0, weight='foo')
assert dvw == 0
dvw = dv(1, weight='foo')
assert dvw == 1
dvw = dv([2, 3], weight='foo')
assert sorted(dvw) == [(2, 1), (3, 4)]
dvd = dict(dv(weight='foo'))
assert dvd[0] == 0
assert dvd[1] == 1
assert dvd[2] == 1
assert dvd[3] == 4
class TestMultiDegreeView(TestDegreeView):
GRAPH = nx.MultiGraph
dview = nx.reportviews.MultiDegreeView
def test_str(self):
dv = self.dview(self.G)
rep = str([(0, 1), (1, 4), (2, 2), (3, 4), (4, 2), (5, 1)])
assert str(dv) == rep
dv = self.G.degree()
assert str(dv) == rep
def test_repr(self):
dv = self.G.degree()
rep = "MultiDegreeView({0: 1, 1: 4, 2: 2, 3: 4, 4: 2, 5: 1})"
assert repr(dv) == rep
def test_nbunch(self):
dv = self.dview(self.G)
dvn = dv(0)
assert dvn == 1
dvn = dv([2, 3])
assert sorted(dvn) == [(2, 2), (3, 4)]
def test_getitem(self):
dv = self.dview(self.G)
assert dv[0] == 1
assert dv[1] == 4
assert dv[2] == 2
assert dv[3] == 4
dv = self.dview(self.G, weight='foo')
assert dv[0] == 1
assert dv[1] == 7
assert dv[2] == 2
assert dv[3] == 7
def test_weight(self):
dv = self.dview(self.G)
dvw = dv(0, weight='foo')
assert dvw == 1
dvw = dv(1, weight='foo')
assert dvw == 7
dvw = dv([2, 3], weight='foo')
assert sorted(dvw) == [(2, 2), (3, 7)]
dvd = dict(dv(weight='foo'))
assert dvd[0] == 1
assert dvd[1] == 7
assert dvd[2] == 2
assert dvd[3] == 7
class TestDiMultiDegreeView(TestMultiDegreeView):
GRAPH = nx.MultiDiGraph
dview = nx.reportviews.DiMultiDegreeView
def test_repr(self):
dv = self.G.degree()
rep = "DiMultiDegreeView({0: 1, 1: 4, 2: 2, 3: 4, 4: 2, 5: 1})"
assert repr(dv) == rep
class TestOutMultiDegreeView(TestDegreeView):
GRAPH = nx.MultiDiGraph
dview = nx.reportviews.OutMultiDegreeView
def test_str(self):
dv = self.dview(self.G)
rep = str([(0, 1), (1, 3), (2, 1), (3, 1), (4, 1), (5, 0)])
assert str(dv) == rep
dv = self.G.out_degree()
assert str(dv) == rep
def test_repr(self):
dv = self.G.out_degree()
rep = "OutMultiDegreeView({0: 1, 1: 3, 2: 1, 3: 1, 4: 1, 5: 0})"
assert repr(dv) == rep
def test_nbunch(self):
dv = self.dview(self.G)
dvn = dv(0)
assert dvn == 1
dvn = dv([2, 3])
assert sorted(dvn) == [(2, 1), (3, 1)]
def test_getitem(self):
dv = self.dview(self.G)
assert dv[0] == 1
assert dv[1] == 3
assert dv[2] == 1
assert dv[3] == 1
dv = self.dview(self.G, weight='foo')
assert dv[0] == 1
assert dv[1] == 6
assert dv[2] == 1
assert dv[3] == 1
def test_weight(self):
dv = self.dview(self.G)
dvw = dv(0, weight='foo')
assert dvw == 1
dvw = dv(1, weight='foo')
assert dvw == 6
dvw = dv([2, 3], weight='foo')
assert sorted(dvw) == [(2, 1), (3, 1)]
dvd = dict(dv(weight='foo'))
assert dvd[0] == 1
assert dvd[1] == 6
assert dvd[2] == 1
assert dvd[3] == 1
class TestInMultiDegreeView(TestDegreeView):
GRAPH = nx.MultiDiGraph
dview = nx.reportviews.InMultiDegreeView
def test_str(self):
dv = self.dview(self.G)
rep = str([(0, 0), (1, 1), (2, 1), (3, 3), (4, 1), (5, 1)])
assert str(dv) == rep
dv = self.G.in_degree()
assert str(dv) == rep
def test_repr(self):
dv = self.G.in_degree()
rep = "InMultiDegreeView({0: 0, 1: 1, 2: 1, 3: 3, 4: 1, 5: 1})"
assert repr(dv) == rep
def test_nbunch(self):
dv = self.dview(self.G)
dvn = dv(0)
assert dvn == 0
dvn = dv([2, 3])
assert sorted(dvn) == [(2, 1), (3, 3)]
def test_getitem(self):
dv = self.dview(self.G)
assert dv[0] == 0
assert dv[1] == 1
assert dv[2] == 1
assert dv[3] == 3
dv = self.dview(self.G, weight='foo')
assert dv[0] == 0
assert dv[1] == 1
assert dv[2] == 1
assert dv[3] == 6
def test_weight(self):
dv = self.dview(self.G)
dvw = dv(0, weight='foo')
assert dvw == 0
dvw = dv(1, weight='foo')
assert dvw == 1
dvw = dv([2, 3], weight='foo')
assert sorted(dvw) == [(2, 1), (3, 6)]
dvd = dict(dv(weight='foo'))
assert dvd[0] == 0
assert dvd[1] == 1
assert dvd[2] == 1
assert dvd[3] == 6
```
#### File: networkx/generators/classic.py
```python
import itertools
import networkx as nx
from networkx.classes import Graph
from networkx.exception import NetworkXError
from itertools import accumulate
from networkx.utils import nodes_or_number
from networkx.utils import pairwise
__all__ = ['balanced_tree',
'barbell_graph',
'binomial_tree',
'complete_graph',
'complete_multipartite_graph',
'circular_ladder_graph',
'circulant_graph',
'cycle_graph',
'dorogovtsev_goltsev_mendes_graph',
'empty_graph',
'full_rary_tree',
'ladder_graph',
'lollipop_graph',
'null_graph',
'path_graph',
'star_graph',
'trivial_graph',
'turan_graph',
'wheel_graph']
# -------------------------------------------------------------------
# Some Classic Graphs
# -------------------------------------------------------------------
def _tree_edges(n, r):
if n == 0:
return
# helper function for trees
# yields edges in rooted tree at 0 with n nodes and branching ratio r
nodes = iter(range(n))
parents = [next(nodes)] # stack of max length r
while parents:
source = parents.pop(0)
for i in range(r):
try:
target = next(nodes)
parents.append(target)
yield source, target
except StopIteration:
break
def full_rary_tree(r, n, create_using=None):
"""Creates a full r-ary tree of n vertices.
Sometimes called a k-ary, n-ary, or m-ary tree.
"... all non-leaf vertices have exactly r children and all levels
are full except for some rightmost position of the bottom level
(if a leaf at the bottom level is missing, then so are all of the
leaves to its right." [1]_
Parameters
----------
r : int
branching factor of the tree
n : int
Number of nodes in the tree
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Returns
-------
G : networkx Graph
An r-ary tree with n nodes
References
----------
.. [1] An introduction to data structures and algorithms,
<NAME>, Birkhauser Boston 2001, (page 225).
"""
G = empty_graph(n, create_using)
G.add_edges_from(_tree_edges(n, r))
return G
def balanced_tree(r, h, create_using=None):
"""Returns the perfectly balanced `r`-ary tree of height `h`.
Parameters
----------
r : int
Branching factor of the tree; each node will have `r`
children.
h : int
Height of the tree.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Returns
-------
G : NetworkX graph
A balanced `r`-ary tree of height `h`.
Notes
-----
This is the rooted tree where all leaves are at distance `h` from
the root. The root has degree `r` and all other internal nodes
have degree `r + 1`.
Node labels are integers, starting from zero.
A balanced tree is also known as a *complete r-ary tree*.
"""
# The number of nodes in the balanced tree is `1 + r + ... + r^h`,
# which is computed by using the closed-form formula for a geometric
# sum with ratio `r`. In the special case that `r` is 1, the number
# of nodes is simply `h + 1` (since the tree is actually a path
# graph).
if r == 1:
n = h + 1
else:
# This must be an integer if both `r` and `h` are integers. If
# they are not, we force integer division anyway.
n = (1 - r ** (h + 1)) // (1 - r)
return full_rary_tree(r, n, create_using=create_using)
def barbell_graph(m1, m2, create_using=None):
"""Returns the Barbell Graph: two complete graphs connected by a path.
For $m1 > 1$ and $m2 >= 0$.
Two identical complete graphs $K_{m1}$ form the left and right bells,
and are connected by a path $P_{m2}$.
The `2*m1+m2` nodes are numbered
`0, ..., m1-1` for the left barbell,
`m1, ..., m1+m2-1` for the path,
and `m1+m2, ..., 2*m1+m2-1` for the right barbell.
The 3 subgraphs are joined via the edges `(m1-1, m1)` and
`(m1+m2-1, m1+m2)`. If `m2=0`, this is merely two complete
graphs joined together.
This graph is an extremal example in David Aldous
and <NAME>'s e-text on Random Walks on Graphs.
"""
if m1 < 2:
raise NetworkXError(
"Invalid graph description, m1 should be >=2")
if m2 < 0:
raise NetworkXError(
"Invalid graph description, m2 should be >=0")
# left barbell
G = complete_graph(m1, create_using)
if G.is_directed():
raise NetworkXError("Directed Graph not supported")
# connecting path
G.add_nodes_from(range(m1, m1 + m2 - 1))
if m2 > 1:
G.add_edges_from(pairwise(range(m1, m1 + m2)))
# right barbell
G.add_edges_from((u, v) for u in range(m1 + m2, 2 * m1 + m2)
for v in range(u + 1, 2 * m1 + m2))
# connect it up
G.add_edge(m1 - 1, m1)
if m2 > 0:
G.add_edge(m1 + m2 - 1, m1 + m2)
return G
def binomial_tree(n):
"""Returns the Binomial Tree of order n.
The binomial tree of order 0 consists of a single vertex. A binomial tree of order k
is defined recursively by linking two binomial trees of order k-1: the root of one is
the leftmost child of the root of the other.
Parameters
----------
n : int
Order of the binomial tree.
Returns
-------
G : NetworkX graph
A binomial tree of $2^n$ vertices and $2^n - 1$ edges.
"""
G = nx.empty_graph(1)
N = 1
for i in range(n):
edges = [(u + N, v + N) for (u, v) in G.edges]
G.add_edges_from(edges)
G.add_edge(0, N)
N *= 2
return G
@nodes_or_number(0)
def complete_graph(n, create_using=None):
""" Return the complete graph `K_n` with n nodes.
Parameters
----------
n : int or iterable container of nodes
If n is an integer, nodes are from range(n).
If n is a container of nodes, those nodes appear in the graph.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Examples
--------
>>> G = nx.complete_graph(9)
>>> len(G)
9
>>> G.size()
36
>>> G = nx.complete_graph(range(11, 14))
>>> list(G.nodes())
[11, 12, 13]
>>> G = nx.complete_graph(4, nx.DiGraph())
>>> G.is_directed()
True
"""
n_name, nodes = n
G = empty_graph(n_name, create_using)
if len(nodes) > 1:
if G.is_directed():
edges = itertools.permutations(nodes, 2)
else:
edges = itertools.combinations(nodes, 2)
G.add_edges_from(edges)
return G
def circular_ladder_graph(n, create_using=None):
"""Returns the circular ladder graph $CL_n$ of length n.
$CL_n$ consists of two concentric n-cycles in which
each of the n pairs of concentric nodes are joined by an edge.
Node labels are the integers 0 to n-1
"""
G = ladder_graph(n, create_using)
G.add_edge(0, n - 1)
G.add_edge(n, 2 * n - 1)
return G
def circulant_graph(n, offsets, create_using=None):
"""Generates the circulant graph $Ci_n(x_1, x_2, ..., x_m)$ with $n$ vertices.
Returns
-------
The graph $Ci_n(x_1, ..., x_m)$ consisting of $n$ vertices $0, ..., n-1$ such
that the vertex with label $i$ is connected to the vertices labelled $(i + x)$
and $(i - x)$, for all $x$ in $x_1$ up to $x_m$, with the indices taken modulo $n$.
Parameters
----------
n : integer
The number of vertices the generated graph is to contain.
offsets : list of integers
A list of vertex offsets, $x_1$ up to $x_m$, as described above.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Examples
--------
Many well-known graph families are subfamilies of the circulant graphs;
for example, to generate the cycle graph on n points, we connect every
vertex to every other at offset plus or minus one. For n = 10,
>>> import networkx
>>> G = networkx.generators.classic.circulant_graph(10, [1])
>>> edges = [
... (0, 9), (0, 1), (1, 2), (2, 3), (3, 4),
... (4, 5), (5, 6), (6, 7), (7, 8), (8, 9)]
...
>>> sorted(edges) == sorted(G.edges())
True
Similarly, we can generate the complete graph on 5 points with the set of
offsets [1, 2]:
>>> G = networkx.generators.classic.circulant_graph(5, [1, 2])
>>> edges = [
... (0, 1), (0, 2), (0, 3), (0, 4), (1, 2),
... (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)]
...
>>> sorted(edges) == sorted(G.edges())
True
"""
G = empty_graph(n, create_using)
for i in range(n):
for j in offsets:
G.add_edge(i, (i - j) % n)
G.add_edge(i, (i + j) % n)
return G
@nodes_or_number(0)
def cycle_graph(n, create_using=None):
"""Returns the cycle graph $C_n$ of cyclically connected nodes.
$C_n$ is a path with its two end-nodes connected.
Parameters
----------
n : int or iterable container of nodes
If n is an integer, nodes are from `range(n)`.
If n is a container of nodes, those nodes appear in the graph.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Notes
-----
If create_using is directed, the direction is in increasing order.
"""
n_orig, nodes = n
G = empty_graph(nodes, create_using)
G.add_edges_from(pairwise(nodes))
G.add_edge(nodes[-1], nodes[0])
return G
def dorogovtsev_goltsev_mendes_graph(n, create_using=None):
"""Returns the hierarchically constructed Dorogovtsev-Goltsev-Mendes graph.
n is the generation.
See: arXiv:/cond-mat/0112143 by Dorogovtsev, Goltsev and Mendes.
"""
G = empty_graph(0, create_using)
if G.is_directed():
raise NetworkXError("Directed Graph not supported")
if G.is_multigraph():
raise NetworkXError("Multigraph not supported")
G.add_edge(0, 1)
if n == 0:
return G
new_node = 2 # next node to be added
for i in range(1, n + 1): # iterate over number of generations.
last_generation_edges = list(G.edges())
number_of_edges_in_last_generation = len(last_generation_edges)
for j in range(0, number_of_edges_in_last_generation):
G.add_edge(new_node, last_generation_edges[j][0])
G.add_edge(new_node, last_generation_edges[j][1])
new_node += 1
return G
@nodes_or_number(0)
def empty_graph(n=0, create_using=None, default=nx.Graph):
"""Returns the empty graph with n nodes and zero edges.
Parameters
----------
n : int or iterable container of nodes (default = 0)
If n is an integer, nodes are from `range(n)`.
If n is a container of nodes, those nodes appear in the graph.
create_using : Graph Instance, Constructor or None
Indicator of type of graph to return.
If a Graph-type instance, then clear and use it.
If None, use the `default` constructor.
If a constructor, call it to create an empty graph.
default : Graph constructor (optional, default = nx.Graph)
The constructor to use if create_using is None.
If None, then nx.Graph is used.
This is used when passing an unknown `create_using` value
through your home-grown function to `empty_graph` and
you want a default constructor other than nx.Graph.
Examples
--------
>>> G = nx.empty_graph(10)
>>> G.number_of_nodes()
10
>>> G.number_of_edges()
0
>>> G = nx.empty_graph("ABC")
>>> G.number_of_nodes()
3
>>> sorted(G)
['A', 'B', 'C']
Notes
-----
The variable create_using should be a Graph Constructor or a
"graph"-like object. Constructors, e.g. `nx.Graph` or `nx.MultiGraph`
will be used to create the returned graph. "graph"-like objects
will be cleared (nodes and edges will be removed) and refitted as
an empty "graph" with nodes specified in n. This capability
is useful for specifying the class-nature of the resulting empty
"graph" (i.e. Graph, DiGraph, MyWeirdGraphClass, etc.).
The variable create_using has three main uses:
Firstly, the variable create_using can be used to create an
empty digraph, multigraph, etc. For example,
>>> n = 10
>>> G = nx.empty_graph(n, create_using=nx.DiGraph)
will create an empty digraph on n nodes.
Secondly, one can pass an existing graph (digraph, multigraph,
etc.) via create_using. For example, if G is an existing graph
(resp. digraph, multigraph, etc.), then empty_graph(n, create_using=G)
will empty G (i.e. delete all nodes and edges using G.clear())
and then add n nodes and zero edges, and return the modified graph.
Thirdly, when constructing your home-grown graph creation function
you can use empty_graph to construct the graph by passing a user
defined create_using to empty_graph. In this case, if you want the
default constructor to be other than nx.Graph, specify `default`.
>>> def mygraph(n, create_using=None):
... G = nx.empty_graph(n, create_using, nx.MultiGraph)
... G.add_edges_from([(0, 1), (0, 1)])
... return G
>>> G = mygraph(3)
>>> G.is_multigraph()
True
>>> G = mygraph(3, nx.Graph)
>>> G.is_multigraph()
False
See also create_empty_copy(G).
"""
if create_using is None:
G = default()
elif hasattr(create_using, '_adj'):
# create_using is a NetworkX style Graph
create_using.clear()
G = create_using
else:
# try create_using as constructor
G = create_using()
n_name, nodes = n
G.add_nodes_from(nodes)
return G
def ladder_graph(n, create_using=None):
"""Returns the Ladder graph of length n.
This is two paths of n nodes, with
each pair connected by a single edge.
Node labels are the integers 0 to 2*n - 1.
"""
G = empty_graph(2 * n, create_using)
if G.is_directed():
raise NetworkXError("Directed Graph not supported")
G.add_edges_from(pairwise(range(n)))
G.add_edges_from(pairwise(range(n, 2 * n)))
G.add_edges_from((v, v + n) for v in range(n))
return G
@nodes_or_number([0, 1])
def lollipop_graph(m, n, create_using=None):
"""Returns the Lollipop Graph; `K_m` connected to `P_n`.
This is the Barbell Graph without the right barbell.
Parameters
----------
m, n : int or iterable container of nodes (default = 0)
If an integer, nodes are from `range(m)` and `range(m,m+n)`.
If a container, the entries are the coordinate of the node.
The nodes for m appear in the complete graph $K_m$ and the nodes
for n appear in the path $P_n$
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Notes
-----
The 2 subgraphs are joined via an edge (m-1, m).
If n=0, this is merely a complete graph.
(This graph is an extremal example in <NAME> and Jim
Fill's etext on Random Walks on Graphs.)
"""
m, m_nodes = m
n, n_nodes = n
M = len(m_nodes)
N = len(n_nodes)
if isinstance(m, int):
n_nodes = [len(m_nodes) + i for i in n_nodes]
if M < 2:
raise NetworkXError(
"Invalid graph description, m should be >=2")
if N < 0:
raise NetworkXError(
"Invalid graph description, n should be >=0")
# the ball
G = complete_graph(m_nodes, create_using)
if G.is_directed():
raise NetworkXError("Directed Graph not supported")
# the stick
G.add_nodes_from(n_nodes)
if N > 1:
G.add_edges_from(pairwise(n_nodes))
# connect ball to stick
if M > 0 and N > 0:
G.add_edge(m_nodes[-1], n_nodes[0])
return G
def null_graph(create_using=None):
"""Returns the Null graph with no nodes or edges.
See empty_graph for the use of create_using.
"""
G = empty_graph(0, create_using)
return G
@nodes_or_number(0)
def path_graph(n, create_using=None):
"""Returns the Path graph `P_n` of linearly connected nodes.
Parameters
----------
n : int or iterable
If an integer, node labels are 0 to n with center 0.
If an iterable of nodes, the center is the first.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
"""
n_name, nodes = n
G = empty_graph(nodes, create_using)
G.add_edges_from(pairwise(nodes))
return G
@nodes_or_number(0)
def star_graph(n, create_using=None):
""" Return the star graph
The star graph consists of one center node connected to n outer nodes.
Parameters
----------
n : int or iterable
If an integer, node labels are 0 to n with center 0.
If an iterable of nodes, the center is the first.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Notes
-----
The graph has n+1 nodes for integer n.
So star_graph(3) is the same as star_graph(range(4)).
"""
n_name, nodes = n
if isinstance(n_name, int):
nodes = nodes + [n_name] # there should be n+1 nodes
first = nodes[0]
G = empty_graph(nodes, create_using)
if G.is_directed():
raise NetworkXError("Directed Graph not supported")
G.add_edges_from((first, v) for v in nodes[1:])
return G
def trivial_graph(create_using=None):
""" Return the Trivial graph with one node (with label 0) and no edges.
"""
G = empty_graph(1, create_using)
return G
def turan_graph(n, r):
r""" Return the Turan Graph
The Turan Graph is a complete multipartite graph on $n$ vertices
with $r$ disjoint subsets. It is the graph with the edges for any graph with
$n$ vertices and $r$ disjoint subsets.
Given $n$ and $r$, we generate a complete multipartite graph with
$r-(n \mod r)$ partitions of size $n/r$, rounded down, and
$n \mod r$ partitions of size $n/r+1$, rounded down.
Parameters
----------
n : int
The number of vertices.
r : int
The number of partitions.
Must be less than or equal to n.
Notes
-----
Must satisfy $1 <= r <= n$.
The graph has $(r-1)(n^2)/(2r)$ edges, rounded down.
"""
if not 1 <= r <= n:
raise NetworkXError("Must satisfy 1 <= r <= n")
partitions = [n // r] * (r - (n % r)) + [n // r + 1] * (n % r)
G = complete_multipartite_graph(*partitions)
return G
@nodes_or_number(0)
def wheel_graph(n, create_using=None):
""" Return the wheel graph
The wheel graph consists of a hub node connected to a cycle of (n-1) nodes.
Parameters
----------
n : int or iterable
If an integer, node labels are 0 to n with center 0.
If an iterable of nodes, the center is the first.
create_using : NetworkX graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
Node labels are the integers 0 to n - 1.
"""
n_name, nodes = n
if n_name == 0:
G = empty_graph(0, create_using)
return G
G = star_graph(nodes, create_using)
if len(G) > 2:
G.add_edges_from(pairwise(nodes[1:]))
G.add_edge(nodes[-1], nodes[1])
return G
def complete_multipartite_graph(*subset_sizes):
"""Returns the complete multipartite graph with the specified subset sizes.
Parameters
----------
subset_sizes : tuple of integers or tuple of node iterables
The arguments can either all be integer number of nodes or they
can all be iterables of nodes. If integers, they represent the
number of vertices in each subset of the multipartite graph.
If iterables, each is used to create the nodes for that subset.
The length of subset_sizes is the number of subsets.
Returns
-------
G : NetworkX Graph
Returns the complete multipartite graph with the specified subsets.
For each node, the node attribute 'subset' is an integer
indicating which subset contains the node.
Examples
--------
Creating a complete tripartite graph, with subsets of one, two, and three
vertices, respectively.
>>> import networkx as nx
>>> G = nx.complete_multipartite_graph(1, 2, 3)
>>> [G.nodes[u]['subset'] for u in G]
[0, 1, 1, 2, 2, 2]
>>> list(G.edges(0))
[(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)]
>>> list(G.edges(2))
[(2, 0), (2, 3), (2, 4), (2, 5)]
>>> list(G.edges(4))
[(4, 0), (4, 1), (4, 2)]
>>> G = nx.complete_multipartite_graph('a', 'bc', 'def')
>>> [G.nodes[u]['subset'] for u in sorted(G)]
[0, 1, 1, 2, 2, 2]
Notes
-----
This function generalizes several other graph generator functions.
- If no subset sizes are given, this returns the null graph.
- If a single subset size `n` is given, this returns the empty graph on
`n` nodes.
- If two subset sizes `m` and `n` are given, this returns the complete
bipartite graph on `m + n` nodes.
- If subset sizes `1` and `n` are given, this returns the star graph on
`n + 1` nodes.
See also
--------
complete_bipartite_graph
"""
# The complete multipartite graph is an undirected simple graph.
G = Graph()
if len(subset_sizes) == 0:
return G
# set up subsets of nodes
try:
extents = pairwise(accumulate((0,) + subset_sizes))
subsets = [range(start, end) for start, end in extents]
except TypeError:
subsets = subset_sizes
# add nodes with subset attribute
# while checking that ints are not mixed with iterables
try:
for (i, subset) in enumerate(subsets):
G.add_nodes_from(subset, subset=i)
except TypeError:
raise NetworkXError("Arguments must be all ints or all iterables")
# Across subsets, all vertices should be adjacent.
# We can use itertools.combinations() because undirected.
for subset1, subset2 in itertools.combinations(subsets, 2):
G.add_edges_from(itertools.product(subset1, subset2))
return G
```
#### File: generators/tests/test_small.py
```python
import pytest
import networkx as nx
from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic
is_isomorphic = graph_could_be_isomorphic
"""Generators - Small
=====================
Some small graphs
"""
null = nx.null_graph()
class TestGeneratorsSmall():
def test_make_small_graph(self):
d = ["adjacencylist", "Bull Graph", 5, [[2, 3], [1, 3, 4], [1, 2, 5],
[2], [3]]]
G = nx.make_small_graph(d)
assert is_isomorphic(G, nx.bull_graph())
# Test small graph creation error with wrong ltype
d[0] = "erroneouslist"
pytest.raises(nx.NetworkXError, nx.make_small_graph,
graph_description=d)
def test__LCF_graph(self):
# If n<=0, then return the null_graph
G = nx.LCF_graph(-10, [1, 2], 100)
assert is_isomorphic(G, null)
G = nx.LCF_graph(0, [1, 2], 3)
assert is_isomorphic(G, null)
G = nx.LCF_graph(0, [1, 2], 10)
assert is_isomorphic(G, null)
# Test that LCF(n,[],0) == cycle_graph(n)
for a, b, c in [(5, [], 0), (10, [], 0), (5, [], 1), (10, [], 10)]:
G = nx.LCF_graph(a, b, c)
assert is_isomorphic(G, nx.cycle_graph(a))
# Generate the utility graph K_{3,3}
G = nx.LCF_graph(6, [3, -3], 3)
utility_graph = nx.complete_bipartite_graph(3, 3)
assert is_isomorphic(G, utility_graph)
def test_properties_named_small_graphs(self):
G = nx.bull_graph()
assert G.number_of_nodes() == 5
assert G.number_of_edges() == 5
assert sorted(d for n, d in G.degree()) == [1, 1, 2, 3, 3]
assert nx.diameter(G) == 3
assert nx.radius(G) == 2
G = nx.chvatal_graph()
assert G.number_of_nodes() == 12
assert G.number_of_edges() == 24
assert list(d for n, d in G.degree()) == 12 * [4]
assert nx.diameter(G) == 2
assert nx.radius(G) == 2
G = nx.cubical_graph()
assert G.number_of_nodes() == 8
assert G.number_of_edges() == 12
assert list(d for n, d in G.degree()) == 8 * [3]
assert nx.diameter(G) == 3
assert nx.radius(G) == 3
G = nx.desargues_graph()
assert G.number_of_nodes() == 20
assert G.number_of_edges() == 30
assert list(d for n, d in G.degree()) == 20 * [3]
G = nx.diamond_graph()
assert G.number_of_nodes() == 4
assert sorted(d for n, d in G.degree()) == [2, 2, 3, 3]
assert nx.diameter(G) == 2
assert nx.radius(G) == 1
G = nx.dodecahedral_graph()
assert G.number_of_nodes() == 20
assert G.number_of_edges() == 30
assert list(d for n, d in G.degree()) == 20 * [3]
assert nx.diameter(G) == 5
assert nx.radius(G) == 5
G = nx.frucht_graph()
assert G.number_of_nodes() == 12
assert G.number_of_edges() == 18
assert list(d for n, d in G.degree()) == 12 * [3]
assert nx.diameter(G) == 4
assert nx.radius(G) == 3
G = nx.heawood_graph()
assert G.number_of_nodes() == 14
assert G.number_of_edges() == 21
assert list(d for n, d in G.degree()) == 14 * [3]
assert nx.diameter(G) == 3
assert nx.radius(G) == 3
G = nx.hoffman_singleton_graph()
assert G.number_of_nodes() == 50
assert G.number_of_edges() == 175
assert list(d for n, d in G.degree()) == 50 * [7]
assert nx.diameter(G) == 2
assert nx.radius(G) == 2
G = nx.house_graph()
assert G.number_of_nodes() == 5
assert G.number_of_edges() == 6
assert sorted(d for n, d in G.degree()) == [2, 2, 2, 3, 3]
assert nx.diameter(G) == 2
assert nx.radius(G) == 2
G = nx.house_x_graph()
assert G.number_of_nodes() == 5
assert G.number_of_edges() == 8
assert sorted(d for n, d in G.degree()) == [2, 3, 3, 4, 4]
assert nx.diameter(G) == 2
assert nx.radius(G) == 1
G = nx.icosahedral_graph()
assert G.number_of_nodes() == 12
assert G.number_of_edges() == 30
assert (list(d for n, d in G.degree()) ==
[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5])
assert nx.diameter(G) == 3
assert nx.radius(G) == 3
G = nx.krackhardt_kite_graph()
assert G.number_of_nodes() == 10
assert G.number_of_edges() == 18
assert (sorted(d for n, d in G.degree()) ==
[1, 2, 3, 3, 3, 4, 4, 5, 5, 6])
G = nx.moebius_kantor_graph()
assert G.number_of_nodes() == 16
assert G.number_of_edges() == 24
assert list(d for n, d in G.degree()) == 16 * [3]
assert nx.diameter(G) == 4
G = nx.octahedral_graph()
assert G.number_of_nodes() == 6
assert G.number_of_edges() == 12
assert list(d for n, d in G.degree()) == 6 * [4]
assert nx.diameter(G) == 2
assert nx.radius(G) == 2
G = nx.pappus_graph()
assert G.number_of_nodes() == 18
assert G.number_of_edges() == 27
assert list(d for n, d in G.degree()) == 18 * [3]
assert nx.diameter(G) == 4
G = nx.petersen_graph()
assert G.number_of_nodes() == 10
assert G.number_of_edges() == 15
assert list(d for n, d in G.degree()) == 10 * [3]
assert nx.diameter(G) == 2
assert nx.radius(G) == 2
G = nx.sedgewick_maze_graph()
assert G.number_of_nodes() == 8
assert G.number_of_edges() == 10
assert sorted(d for n, d in G.degree()) == [1, 2, 2, 2, 3, 3, 3, 4]
G = nx.tetrahedral_graph()
assert G.number_of_nodes() == 4
assert G.number_of_edges() == 6
assert list(d for n, d in G.degree()) == [3, 3, 3, 3]
assert nx.diameter(G) == 1
assert nx.radius(G) == 1
G = nx.truncated_cube_graph()
assert G.number_of_nodes() == 24
assert G.number_of_edges() == 36
assert list(d for n, d in G.degree()) == 24 * [3]
G = nx.truncated_tetrahedron_graph()
assert G.number_of_nodes() == 12
assert G.number_of_edges() == 18
assert list(d for n, d in G.degree()) == 12 * [3]
G = nx.tutte_graph()
assert G.number_of_nodes() == 46
assert G.number_of_edges() == 69
assert list(d for n, d in G.degree()) == 46 * [3]
# Test create_using with directed or multigraphs on small graphs
pytest.raises(nx.NetworkXError, nx.tutte_graph,
create_using=nx.DiGraph)
MG = nx.tutte_graph(create_using=nx.MultiGraph)
assert sorted(MG.edges()) == sorted(G.edges())
```
#### File: networkx/testing/test.py
```python
def run(verbosity=1, doctest=False):
"""Run NetworkX tests.
Parameters
----------
verbosity: integer, optional
Level of detail in test reports. Higher numbers provide more detail.
doctest: bool, optional
True to run doctests in code modules
"""
import pytest
pytest_args = ['-l']
if verbosity and int(verbosity) > 1:
pytest_args += ["-" + "v"*(int(verbosity)-1)]
if doctest:
pytest_args += ["--doctest-modules"]
pytest_args += ["--pyargs", "networkx"]
try:
code = pytest.main(pytest_args)
except SystemExit as exc:
code = exc.code
return (code == 0)
if __name__ == "__main__":
run()
```
#### File: networkx/utils/rcm.py
```python
from collections import deque
from operator import itemgetter
import networkx as nx
from ..utils import arbitrary_element
__author__ = """\n""".join(['<NAME> <<EMAIL>>'])
__all__ = ['cuthill_mckee_ordering',
'reverse_cuthill_mckee_ordering']
def cuthill_mckee_ordering(G, heuristic=None):
"""Generate an ordering (permutation) of the graph nodes to make
a sparse matrix.
Uses the Cuthill-McKee heuristic (based on breadth-first search) [1]_.
Parameters
----------
G : graph
A NetworkX graph
heuristic : function, optional
Function to choose starting node for RCM algorithm. If None
a node from a pseudo-peripheral pair is used. A user-defined function
can be supplied that takes a graph object and returns a single node.
Returns
-------
nodes : generator
Generator of nodes in Cuthill-McKee ordering.
Examples
--------
>>> from networkx.utils import cuthill_mckee_ordering
>>> G = nx.path_graph(4)
>>> rcm = list(cuthill_mckee_ordering(G))
>>> A = nx.adjacency_matrix(G, nodelist=rcm)
Smallest degree node as heuristic function:
>>> def smallest_degree(G):
... return min(G, key=G.degree)
>>> rcm = list(cuthill_mckee_ordering(G, heuristic=smallest_degree))
See Also
--------
reverse_cuthill_mckee_ordering
Notes
-----
The optimal solution the the bandwidth reduction is NP-complete [2]_.
References
----------
.. [1] <NAME> and <NAME>.
Reducing the bandwidth of sparse symmetric matrices,
In Proc. 24th Nat. Conf. ACM, pages 157-172, 1969.
http://doi.acm.org/10.1145/800195.805928
.. [2] <NAME>. 1997. The Algorithm Design Manual.
Springer-Verlag New York, Inc., New York, NY, USA.
"""
for c in nx.connected_components(G):
for n in connected_cuthill_mckee_ordering(G.subgraph(c), heuristic):
yield n
def reverse_cuthill_mckee_ordering(G, heuristic=None):
"""Generate an ordering (permutation) of the graph nodes to make
a sparse matrix.
Uses the reverse Cuthill-McKee heuristic (based on breadth-first search)
[1]_.
Parameters
----------
G : graph
A NetworkX graph
heuristic : function, optional
Function to choose starting node for RCM algorithm. If None
a node from a pseudo-peripheral pair is used. A user-defined function
can be supplied that takes a graph object and returns a single node.
Returns
-------
nodes : generator
Generator of nodes in reverse Cuthill-McKee ordering.
Examples
--------
>>> from networkx.utils import reverse_cuthill_mckee_ordering
>>> G = nx.path_graph(4)
>>> rcm = list(reverse_cuthill_mckee_ordering(G))
>>> A = nx.adjacency_matrix(G, nodelist=rcm)
Smallest degree node as heuristic function:
>>> def smallest_degree(G):
... return min(G, key=G.degree)
>>> rcm = list(reverse_cuthill_mckee_ordering(G, heuristic=smallest_degree))
See Also
--------
cuthill_mckee_ordering
Notes
-----
The optimal solution the the bandwidth reduction is NP-complete [2]_.
References
----------
.. [1] <NAME> and <NAME>.
Reducing the bandwidth of sparse symmetric matrices,
In Proc. 24th Nat. Conf. ACM, pages 157-72, 1969.
http://doi.acm.org/10.1145/800195.805928
.. [2] <NAME>. 1997. The Algorithm Design Manual.
Springer-Verlag New York, Inc., New York, NY, USA.
"""
return reversed(list(cuthill_mckee_ordering(G, heuristic=heuristic)))
def connected_cuthill_mckee_ordering(G, heuristic=None):
# the cuthill mckee algorithm for connected graphs
if heuristic is None:
start = pseudo_peripheral_node(G)
else:
start = heuristic(G)
visited = {start}
queue = deque([start])
while queue:
parent = queue.popleft()
yield parent
nd = sorted(list(G.degree(set(G[parent]) - visited)),
key=itemgetter(1))
children = [n for n, d in nd]
visited.update(children)
queue.extend(children)
def pseudo_peripheral_node(G):
# helper for cuthill-mckee to find a node in a "pseudo peripheral pair"
# to use as good starting node
u = arbitrary_element(G)
lp = 0
v = u
while True:
spl = dict(nx.shortest_path_length(G, v))
l = max(spl.values())
if l <= lp:
break
lp = l
farthest = (n for n, dist in spl.items() if dist == l)
v, deg = min(G.degree(farthest), key=itemgetter(1))
return v
``` |
{
"source": "jmmease/ipyplotly",
"score": 2
} |
#### File: ipyplotly/codegen/utils.py
```python
import importlib
import inspect
import textwrap
from typing import List, Dict
from io import StringIO
from yapf.yapflib.yapf_api import FormatCode
from ipyplotly.basevalidators import BaseValidator, CompoundValidator, CompoundArrayValidator
def format_source(validator_source):
formatted_source, _ = FormatCode(validator_source,
style_config={'based_on_style': 'google',
'DEDENT_CLOSING_BRACKETS': True,
'COLUMN_LIMIT': 119})
return formatted_source
custom_validator_datatypes = {
'layout.image.source': 'ipyplotly.basevalidators.ImageUriValidator',
'frame.data': 'ipyplotly.validators.DataValidator',
'frame.layout': 'ipyplotly.validators.LayoutValidator'
}
class PlotlyNode:
# Constructor
# -----------
def __init__(self, plotly_schema, node_path=(), parent=None):
self.plotly_schema = plotly_schema
if isinstance(node_path, str):
node_path = (node_path,)
self.node_path = node_path
# Compute children
if isinstance(self.node_data, dict):
self._children = [self.__class__(self.plotly_schema,
node_path=self.node_path + (c,),
parent=self)
for c in self.node_data if c and c[0] != '_']
else:
self._children = []
# Parent
self._parent = parent
def __repr__(self):
return self.dir_str
# Abstract methods
# ----------------
@property
def node_data(self) -> dict:
raise NotImplementedError()
@property
def description(self) -> str:
raise NotImplementedError()
@property
def base_datatype_class(self):
raise NotImplementedError
# Names
# -----
@property
def base_name(self):
raise NotImplementedError()
@property
def plotly_name(self) -> str:
if len(self.node_path) == 0:
return self.base_name
else:
return self.node_path[-1]
@property
def name_pascal_case(self) -> str:
return self.plotly_name.title().replace('_', '')
@property
def name_undercase(self) -> str:
if not self.plotly_name:
# Empty plotly_name
return self.plotly_name
# Lowercase leading char
# ----------------------
name1 = self.plotly_name[0].lower() + self.plotly_name[1:]
# Replace capital chars by underscore-lower
# -----------------------------------------
name2 = ''.join([('' if not c.isupper() else '_') + c.lower() for c in name1])
return name2
@property
def name_property(self) -> str:
return self.plotly_name + ('s' if self.is_array_element else '')
@property
def name_validator(self) -> str:
return self.name_pascal_case + ('s' if self.is_array_element else '') + 'Validator'
@property
def name_base_validator(self) -> str:
if self.dir_str in custom_validator_datatypes:
validator_base = f"{custom_validator_datatypes[self.dir_str]}"
else:
validator_base = f"ipyplotly.basevalidators.{self.datatype_pascal_case}Validator"
return validator_base
def get_constructor_params_docstring(self, indent=12, extra_nodes=[]):
assert self.is_compound
buffer = StringIO()
subtype_nodes = self.child_datatypes + extra_nodes
for subtype_node in subtype_nodes:
raw_description = subtype_node.description
subtype_description = '\n'.join(textwrap.wrap(raw_description,
subsequent_indent=' ' * (indent + 4),
width=80 - (indent + 4)))
buffer.write('\n' + ' ' * indent + subtype_node.name_property)
buffer.write('\n' + ' ' * (indent + 4) + subtype_description)
return buffer.getvalue()
@property
def validator_instance(self) -> BaseValidator:
module_parts = self.name_base_validator.split('.')
module_path = '.'.join(module_parts[:-1])
cls_name = module_parts[-1]
validators_module = importlib.import_module(module_path)
validator_class_list = [cls
for _, cls in inspect.getmembers(validators_module, inspect.isclass)
if cls.__name__ == cls_name]
if not validator_class_list:
raise ValueError(f"Unknown base validator '{self.name_base_validator}'")
validator_class = validator_class_list[0]
args = dict(plotly_name=self.name_property, parent_name=self.parent_dir_str)
if validator_class == CompoundValidator:
data_class_str = f"<class ipyplotly.datatypes.{self.parent_dir_str}.{self.name_class}>"
extra_args = {'data_class': data_class_str, 'data_docs': self.get_constructor_params_docstring()}
elif validator_class == CompoundArrayValidator:
element_class_str = f"<class ipyplotly.datatypes.{self.parent_dir_str}.{self.name_class}>"
extra_args = {'element_class': element_class_str, 'element_docs': self.get_constructor_params_docstring()}
else:
extra_args = {n.name_undercase: n.node_data for n in self.simple_attrs}
# Add extra properties
if self.datatype == 'color':
# Check for colorscale sibling
colorscale_node_list = [node for node in self.parent.child_datatypes
if node.datatype == 'colorscale']
if colorscale_node_list:
colorscale_path = colorscale_node_list[0].dir_str
extra_args['colorscale_path'] = repr(colorscale_path)
return validator_class(**args, **extra_args)
@property
def name_class(self) -> str:
return self.name_pascal_case
# Datatypes
# ---------
@property
def datatype(self) -> str:
if self.is_array_element:
return 'compound_array'
elif self.is_compound:
return 'compound'
elif self.is_simple:
return self.node_data.get('valType')
else:
return 'literal'
@property
def datatype_pascal_case(self) -> str:
return self.datatype.title().replace('_', '')
@property
def is_compound(self) -> bool:
return isinstance(self.node_data, dict) and not self.is_simple and self.plotly_name != 'impliedEdits'
@property
def is_literal(self) -> bool:
return isinstance(self.node_data, str)
@property
def is_simple(self) -> bool:
return isinstance(self.node_data, dict) and 'valType' in self.node_data
@property
def is_array(self) -> bool:
return isinstance(self.node_data, dict) and \
self.node_data.get('role', '') == 'object' and \
'items' in self.node_data
@property
def is_array_element(self):
if self.parent and self.parent.parent:
return self.parent.parent.is_array
else:
return False
@property
def is_datatype(self) -> bool:
return self.is_simple or self.is_compound
# Node path
# ---------
def tidy_dir_path(self, p):
return p
@property
def dir_path(self) -> List[str]:
res = [self.base_name] if self.base_name else []
for i, p in enumerate(self.node_path):
if p == 'items' or \
(i < len(self.node_path) - 1 and self.node_path[i+1] == 'items'):
# e.g. [parcoords, dimensions, items, dimension] -> [parcoords, dimension]
pass
else:
res.append(self.tidy_dir_path(p))
return res
# Node path strings
# -----------------
@property
def dir_str(self) -> str:
return '.'.join(self.dir_path)
@property
def parent_dir_str(self) -> str:
return '.'.join(self.dir_path[:-1])
@property
def pkg_str(self) -> str:
path_str = ''
for p in self.dir_path:
path_str += '.' + p
return path_str
# Children
# --------
@property
def children(self) -> List['PlotlyNode']:
return self._children
@property
def simple_attrs(self) -> List['PlotlyNode']:
if not self.is_simple:
raise ValueError(f"Cannot get simple attributes of the simple object '{self.dir_str}'")
return [n for n in self.children if n.plotly_name not in ['valType', 'description', 'role']]
@property
def parent(self) -> 'PlotlyNode':
return self._parent
@property
def child_datatypes(self) -> List['PlotlyNode']:
"""
Returns
-------
children: list of TraceNode
"""
# if self.is_array:
# items_child = [c for c in self.children if c.plotly_name == 'items'][0]
# return items_child.children
# else:
nodes = []
for n in self.children:
if n.is_array:
nodes.append(n.children[0].children[0])
elif n.is_datatype:
nodes.append(n)
return nodes
@property
def child_compound_datatypes(self) -> List['PlotlyNode']:
return [n for n in self.child_datatypes if n.is_compound]
@property
def child_simple_datatypes(self) -> List['PlotlyNode']:
return [n for n in self.child_datatypes if n.is_simple]
@property
def child_literals(self) -> List['PlotlyNode']:
return [n for n in self.children if n.is_literal]
# Static helpers
# --------------
@staticmethod
def get_all_compound_datatype_nodes(plotly_schema, node_class) -> List['PlotlyNode']:
nodes = []
nodes_to_process = [node_class(plotly_schema)]
while nodes_to_process:
node = nodes_to_process.pop()
if not node.is_array:
nodes.append(node)
nodes_to_process.extend(node.child_compound_datatypes)
return nodes
@staticmethod
def get_all_trace_layout_nodes(plotly_schema) -> Dict[str, 'LayoutNode']:
trace_names = plotly_schema['traces'].keys()
datatype_nodes = {}
nodes_to_process = [TraceLayoutNode(plotly_schema, trace_name)
for trace_name in trace_names]
while nodes_to_process:
parent_node = nodes_to_process.pop()
for node in parent_node.child_simple_datatypes:
datatype_nodes[node.dir_str] = node
return datatype_nodes
class TraceNode(PlotlyNode):
# Constructor
# -----------
def __init__(self, plotly_schema, node_path=(), parent=None):
super().__init__(plotly_schema, node_path, parent)
@property
def base_datatype_class(self):
if len(self.node_path) == 0:
return 'BaseTraceType'
else:
return 'BaseTraceHierarchyType'
@property
def base_name(self):
return 'trace'
# Raw data
# --------
@property
def node_data(self) -> dict:
if not self.node_path:
node_data = self.plotly_schema['traces']
else:
node_data = self.plotly_schema['traces'][self.node_path[0]]['attributes']
for prop_name in self.node_path[1:]:
node_data = node_data[prop_name]
return node_data
# Description
# -----------
@property
def description(self) -> str:
if len(self.node_path) == 0:
desc = ""
elif len(self.node_path) == 1:
desc = self.plotly_schema['traces'][self.node_path[0]]['meta'].get('description', '')
else:
desc = self.node_data.get('description', '')
if isinstance(desc, list):
desc = ''.join(desc)
return desc
class LayoutNode(PlotlyNode):
# Constructor
# -----------
def __init__(self, plotly_schema, node_path=(), parent=None):
super().__init__(plotly_schema, node_path, parent)
@property
def base_datatype_class(self):
if len(self.node_path) == 0:
return 'BaseLayoutType'
else:
return 'BaseLayoutHierarchyType'
@property
def base_name(self):
return ''
@property
def plotly_name(self) -> str:
if len(self.node_path) == 0:
return self.base_name
elif len(self.node_path) == 1:
return 'layout' # override 'layoutAttributes'
else:
return self.node_path[-1]
def tidy_dir_path(self, p):
return 'layout' if p == 'layoutAttributes' else p
# Description
# -----------
@property
def description(self) -> str:
desc = self.node_data.get('description', '')
if isinstance(desc, list):
desc = ''.join(desc)
return desc
# Raw data
# --------
@property
def node_data(self) -> dict:
node_data = self.plotly_schema['layout']
for prop_name in self.node_path:
node_data = node_data[prop_name]
return node_data
class TraceLayoutNode(LayoutNode):
# Constructor
# -----------
def __init__(self, plotly_schema, trace_name=None, node_path=(), parent=None):
# Handle trace name
assert parent is not None or trace_name is not None
if parent is not None:
trace_name = parent.trace_name
self.trace_name = trace_name
super().__init__(plotly_schema, node_path, parent)
@property
def base_name(self):
return 'layout'
@property
def plotly_name(self) -> str:
if len(self.node_path) == 0:
return self.base_name
else:
return self.node_path[-1]
# Raw data
# --------
@property
def node_data(self) -> dict:
try:
node_data = (self.plotly_schema['traces']
[self.trace_name]['layoutAttributes'])
for prop_name in self.node_path:
node_data = node_data[prop_name]
except KeyError:
node_data = []
return node_data
class FrameNode(PlotlyNode):
# Constructor
# -----------
def __init__(self, plotly_schema, node_path=(), parent=None):
super().__init__(plotly_schema, node_path, parent)
@property
def base_datatype_class(self):
return 'BaseFrameHierarchyType'
@property
def base_name(self):
return ''
@property
def plotly_name(self) -> str:
if len(self.node_path) < 2:
return self.base_name
elif len(self.node_path) == 2:
return 'frame' # override 'frames_entry'
else:
return self.node_path[-1]
def tidy_dir_path(self, p):
return 'frame' if p == 'frames_entry' else p
# Description
# -----------
@property
def description(self) -> str:
desc = self.node_data.get('description', '')
if isinstance(desc, list):
desc = ''.join(desc)
return desc
# Raw data
# --------
@property
def node_data(self) -> dict:
node_data = self.plotly_schema['frames']
for prop_name in self.node_path:
node_data = node_data[prop_name]
return node_data
```
#### File: ipyplotly/ipyplotly/callbacks.py
```python
import typing as typ
class InputState:
def __init__(self, ctrl=None, alt=None, shift=None, meta=None, button=None, buttons=None, **_):
self._ctrl = ctrl
self._alt = alt
self._meta = meta
self._shift = shift
self._button = button
self._buttons = buttons
def __repr__(self):
return """\
InputState(ctrl={ctrl},
alt={alt},
shift={shift},
meta={meta},
button={button},
buttons={buttons})"""
@property
def alt(self) -> bool:
"""
Whether alt key pressed
Returns
-------
bool
"""
return self._alt
@property
def ctrl(self) -> bool:
"""
Whether ctrl key pressed
Returns
-------
bool
"""
return self._ctrl
@property
def shift(self) -> bool:
"""
Whether shift key pressed
Returns
-------
bool
"""
return self._shift
@property
def meta(self) -> bool:
"""
Whether meta key pressed
Returns
-------
bool
"""
return self._meta
@property
def button(self) -> int:
"""
Integer code for the button that was pressed on the mouse to trigger the event
- 0: Main button pressed, usually the left button or the un-initialized state
- 1: Auxiliary button pressed, usually the wheel button or the middle button (if present)
- 2: Secondary button pressed, usually the right button
- 3: Fourth button, typically the Browser Back button
- 4: Fifth button, typically the Browser Forward button
Returns
-------
int
"""
return self._button
@property
def buttons(self) -> int:
"""
Integer code for which combination of buttons are pressed on the mouse when the event is triggered.
- 0: No button or un-initialized
- 1: Primary button (usually left)
- 2: Secondary button (usually right)
- 4: Auxilary button (usually middle or mouse wheel button)
- 8: 4th button (typically the "Browser Back" button)
- 16: 5th button (typically the "Browser Forward" button)
Combinations of buttons are represented as the decimal form of the bitmask of the values above.
For example, pressing both the primary (1) and auxilary (4) buttons will result in a code of 5
Returns
-------
int
"""
return self._buttons
class Points:
def __init__(self, point_inds=None, xs=None, ys=None, trace_name=None, trace_index=None):
self._point_inds = point_inds
self._xs = xs
self._ys = ys
self._trace_name = trace_name
self._trace_index = trace_index
@property
def point_inds(self) -> typ.List[int]:
return self._point_inds
@property
def xs(self) -> typ.List:
return self._xs
@property
def ys(self) -> typ.List:
return self._ys
@property
def trace_name(self) -> str:
return self._trace_name
@property
def trace_index(self) -> int:
return self._trace_index
class BoxSelector:
def __init__(self, xrange=None, yrange=None, **_):
self._type = 'box'
self._xrange = xrange
self._yrange = yrange
@property
def type(self) -> str:
return self._type
@property
def xrange(self) -> typ.Tuple[float, float]:
return self._xrange
@property
def yrange(self) -> typ.Tuple[float, float]:
return self._yrange
class LassoSelector:
def __init__(self, xs=None, ys=None, **_):
self._type = 'lasso'
self._xs = xs
self._ys = ys
@property
def type(self) -> str:
return self._type
@property
def xs(self) -> typ.List[float]:
return self._xs
@property
def ys(self) -> typ.List[float]:
return self._ys
```
#### File: ipyplotly/ipyplotly/serializers.py
```python
from traitlets import Undefined
import numpy as np
def _py_to_js(v, widget_manager):
# print('_py_to_js')
# print(v)
if isinstance(v, dict):
return {k: _py_to_js(v, widget_manager) for k, v in v.items()}
elif isinstance(v, (list, tuple)):
return [_py_to_js(v, widget_manager) for v in v]
elif isinstance(v, np.ndarray):
if v.ndim == 1 and v.dtype.kind in ['u', 'i', 'f']: # (un)signed integer or float
return {'buffer': memoryview(v), 'dtype': str(v.dtype), 'shape': v.shape}
else:
return v.tolist()
else:
if v is Undefined:
return '_undefined_'
else:
return v
def _js_to_py(v, widget_manager):
# print('_js_to_py')
# print(v)
if isinstance(v, dict):
return {k: _js_to_py(v, widget_manager) for k, v in v.items()}
elif isinstance(v, (list, tuple)):
return [_js_to_py(v, widget_manager) for v in v]
elif isinstance(v, str) and v == '_undefined_':
return Undefined
else:
return v
custom_serializers = {
'from_json': _js_to_py,
'to_json': _py_to_js
}
```
#### File: datatypes/properties/test_compound_property.py
```python
from unittest import mock
import pytest
from ipyplotly.basedatatypes import BasePlotlyType
from ipyplotly.basevalidators import CompoundValidator
# Fixtures
# --------
@pytest.fixture()
def plotly_obj():
# ### Setup plotly obj (make fixture eventually) ###
plotly_obj = BasePlotlyType('plotly_obj')
# Add validator
validator = mock.Mock(spec=CompoundValidator,
wraps=CompoundValidator('prop1', 'plotly_obj', data_class=mock.Mock, data_docs=''))
plotly_obj._validators['prop1'] = validator
# Mock out _send_update
plotly_obj._send_update = mock.Mock()
return plotly_obj
# Validation
# ----------
def test_set_invalid_property(plotly_obj):
with pytest.raises(KeyError) as failure:
plotly_obj['bogus'] = 'Hello'
def test_get_invalid_property(plotly_obj):
with pytest.raises(KeyError) as failure:
p = plotly_obj['bogus']
# Orphan
# ------
@pytest.mark.xfail
def test_set_get_compound_property(plotly_obj):
# Setup value
# -----------
v = mock.Mock()
d = {'a': 23}
type(v)._data = mock.PropertyMock(return_value=d)
# Perform set_prop
# ----------------
plotly_obj['prop1'] = v
# Mutate d
# --------
# Just to make sure we copy data on assignment
d['a'] = 1
# Object Assertions
# -----------------
# ### test get object is a copy ###
assert plotly_obj['prop1'] is not v
# ### _send_update sent ###
plotly_obj._send_update.assert_called_once_with('prop1', {'a': 23})
# ### _orphan_data configured properly ###
assert plotly_obj._orphan_data == {'prop1': {'a': 23}}
# ### _data is mapped to _orphan_data
assert plotly_obj._props is plotly_obj._orphan_data
# ### validator called properly ###
plotly_obj._validators['prop1'].validate_coerce.assert_called_once_with(v)
# Value Assertions
# ----------------
# ### Parent set to plotly_obj
assert v._parent is plotly_obj
# ### Orphan data cleared ###
v._orphan_data.clear.assert_called_once()
# With parent
# -----------
@pytest.mark.xfail
def test_set_get_property_with_parent(plotly_obj, parent):
# Setup value
# -----------
v = mock.Mock()
d = {'a': 23}
type(v)._data = mock.PropertyMock(return_value=d)
# Setup parent
# ------------
plotly_obj._parent = parent
# Perform set_prop
# ----------------
plotly_obj['prop1'] = v
# Parent Assertions
# -----------------
parent._get_child_props.assert_called_with(plotly_obj)
# Object Assertions
# -----------------
# ### test get object is a copy ###
assert plotly_obj['prop1'] is not v
# ### _send_update sent ###
plotly_obj._send_update.assert_called_once_with('prop1', d)
# ### orphan data cleared ###
assert plotly_obj._orphan_data == {}
# ### _data bound to parent dict ###
assert parent._get_child_props(plotly_obj) is plotly_obj._props
# ### validator called properly ###
plotly_obj._validators['prop1'].validate_coerce.assert_called_once_with(v)
# Value Assertions
# ----------------
# ### Parent set to plotly_obj
assert v._parent is plotly_obj
# ### Orphan data cleared ###
v._orphan_data.clear.assert_called_once()
```
#### File: test/validators/test_dataarray_validator.py
```python
import pytest
from ipyplotly.basevalidators import DataArrayValidator
import numpy as np
# Fixtures
# --------
@pytest.fixture()
def validator():
return DataArrayValidator('prop', 'parent')
# Tests
# -----
# ### Acceptance ###
@pytest.mark.parametrize('val', [
[], [1], np.array([2, 3, 4]), [''], (), ('Hello, ', 'world!')
])
def test_validator_acceptance(val, validator: DataArrayValidator):
coerce_val = validator.validate_coerce(val)
assert isinstance(coerce_val, np.ndarray)
assert np.array_equal(coerce_val, val)
# ### Rejection ###
@pytest.mark.parametrize('val', [
'Hello', 23, set(), {},
])
def test_rejection(val, validator: DataArrayValidator):
with pytest.raises(ValueError) as validation_failure:
validator.validate_coerce(val)
assert 'Invalid value' in str(validation_failure.value)
```
#### File: test/validators/test_string_validator.py
```python
import pytest
from ipyplotly.basevalidators import StringValidator
import numpy as np
# Fixtures
# --------
@pytest.fixture()
def validator():
return StringValidator('prop', 'parent')
@pytest.fixture()
def validator_values():
return StringValidator('prop', 'parent', values=['foo', 'BAR', ''])
@pytest.fixture()
def validator_no_blanks():
return StringValidator('prop', 'parent', no_blank=True)
@pytest.fixture
def validator_aok():
return StringValidator('prop', 'parent', array_ok=True)
@pytest.fixture
def validator_aok_values():
return StringValidator('prop', 'parent', values=['foo', 'BAR', '', 'baz'], array_ok=True)
@pytest.fixture()
def validator_no_blanks_aok():
return StringValidator('prop', 'parent', no_blank=True, array_ok=True)
# Array not ok
# ------------
# ### Acceptance ###
@pytest.mark.parametrize('val',
['bar', 'HELLO!!!', 'world!@#$%^&*()', ''])
def test_acceptance(val, validator: StringValidator):
assert validator.validate_coerce(val) == val
# ### Rejection by value ###
@pytest.mark.parametrize('val',
[(), [], [1, 2, 3], set(), np.nan, np.pi])
def test_rejection(val, validator: StringValidator):
with pytest.raises(ValueError) as validation_failure:
validator.validate_coerce(val)
assert 'Invalid value' in str(validation_failure.value)
# Valid values
# ------------
@pytest.mark.parametrize('val',
['foo', 'BAR', ''])
def test_acceptance_values(val, validator_values: StringValidator):
assert validator_values.validate_coerce(val) == val
@pytest.mark.parametrize('val',
['FOO', 'bar', 'other', '1234'])
def test_rejection_values(val, validator_values: StringValidator):
with pytest.raises(ValueError) as validation_failure:
validator_values.validate_coerce(val)
assert 'Invalid value'.format(val=val) in str(validation_failure.value)
assert "['foo', 'BAR', '']" in str(validation_failure.value)
# ### No blanks ###
@pytest.mark.parametrize('val',
['bar', 'HELLO!!!', 'world!@#$%^&*()'])
def test_acceptance_no_blanks(val, validator_no_blanks: StringValidator):
assert validator_no_blanks.validate_coerce(val) == val
@pytest.mark.parametrize('val',
[''])
def test_rejection_no_blanks(val, validator_no_blanks: StringValidator):
with pytest.raises(ValueError) as validation_failure:
validator_no_blanks.validate_coerce(val)
assert 'A non-empty string' in str(validation_failure.value)
# Array ok
# --------
# ### Acceptance ###
@pytest.mark.parametrize('val',
['foo', 'BAR', '', 'baz'])
def test_acceptance_aok_scalars(val, validator_aok: StringValidator):
assert validator_aok.validate_coerce(val) == val
@pytest.mark.parametrize('val',
['foo', ['foo'], np.array(['BAR', ''], dtype='object'), ['baz', 'baz', 'baz']])
def test_acceptance_aok_list(val, validator_aok: StringValidator):
coerce_val = validator_aok.validate_coerce(val)
if isinstance(val, (list, np.ndarray)):
assert np.array_equal(coerce_val, np.array(val, dtype=coerce_val.dtype))
else:
assert coerce_val == val
# ### Rejection by type ###
@pytest.mark.parametrize('val',
[['foo', ()], ['foo', 3, 4], [3, 2, 1]])
def test_rejection_aok(val, validator_aok: StringValidator):
with pytest.raises(ValueError) as validation_failure:
validator_aok.validate_coerce(val)
assert 'Invalid element(s)' in str(validation_failure.value)
# ### Rejection by value ###
@pytest.mark.parametrize('val',
[['foo', 'bar'], ['3', '4'], ['BAR', 'BAR', 'hello!']])
def test_rejection_aok_values(val, validator_aok_values: StringValidator):
with pytest.raises(ValueError) as validation_failure:
validator_aok_values.validate_coerce(val)
assert 'Invalid element(s)' in str(validation_failure.value)
# ### No blanks ###
@pytest.mark.parametrize('val',
['123', ['bar', 'HELLO!!!'], ['world!@#$%^&*()']])
def test_acceptance_no_blanks_aok(val, validator_no_blanks_aok: StringValidator):
coerce_val = validator_no_blanks_aok.validate_coerce(val)
if isinstance(val, (list, np.ndarray)):
assert np.array_equal(coerce_val, np.array(val, dtype=coerce_val.dtype))
else:
assert coerce_val == val
@pytest.mark.parametrize('val',
['', ['foo', 'bar', ''], ['']])
def test_rejection_no_blanks_aok(val, validator_no_blanks_aok: StringValidator):
with pytest.raises(ValueError) as validation_failure:
validator_no_blanks_aok.validate_coerce(val)
assert 'A non-empty string' in str(validation_failure.value)
``` |
{
"source": "jmmedel/Python3-Data-Structure-References",
"score": 4
} |
#### File: Python3_Data_Structure/09_Node/02_Node.py
```python
class daynames:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
e1 = daynames('Mon')
e2 = daynames('Wed')
e3 = daynames('Tue')
e4 = daynames('Thu')
e1.nextval = e3
e3.nextval = e2
e2.nextval = e4
thisvalue = e1
while thisvalue:
print(thisvalue.dataval)
thisvalue = thisvalue.nextval
"""
When the above code is executed, it produces the following result.
Mon
Tue
Wed
Thu
The additional operations like insertion and deletion can be done by implementing appropriate methods by using this node containers in the general data structures like linked lists and trees. So we study them in the next chapters.
"""
```
#### File: Python3_Data_Structure/10_Python_Linked_Lists/02_Linked_Lists.py
```python
class Node:
def __init__(self, dataval=None):
self.dataval = dataval
self.nextval = None
class SLinkedList:
def __init__(self):
self.headval = None
def listprint(self):
printval = self.headval
while printval is not None:
print (printval.dataval)
printval = printval.nextval
list = SLinkedList()
list.headval = Node("Mon")
e2 = Node("Tue")
e3 = Node("Wed")
# Link first Node to second node
list.headval.nextval = e2
# Link second Node to third node
e2.nextval = e3
list.listprint()
"""
When the above code is executed, it produces the following result:
Mon
Tue
Wed
"""
```
#### File: Python3_Data_Structure/14_Python_Advance_Linked_List/02_Linked_List.py
```python
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
# Create the doubly linked list
class doubly_linked_list:
def __init__(self):
self.head = None
# Define the push method to add elements
def push(self, NewVal):
NewNode = Node(NewVal)
NewNode.next = self.head
if self.head is not None:
self.head.prev = NewNode
self.head = NewNode
# Define the insert method to insert the element
def insert(self, prev_node, NewVal):
if prev_node is None:
return
NewNode = Node(NewVal)
NewNode.next = prev_node.next
prev_node.next = NewNode
NewNode.prev = prev_node
if NewNode.next is not None:
NewNode.next.prev = NewNode
# Define the method to print the linked list
def listprint(self, node):
while (node is not None):
print(node.data),
last = node
node = node.next
dllist = doubly_linked_list()
dllist.push(12)
dllist.push(8)
dllist.push(62)
dllist.insert(dllist.head.next, 13)
dllist.listprint(dllist.head)
"""
When the above code is executed, it produces the following result −
62 8 13 12
"""
```
#### File: Python3_Data_Structure/14_Python_Advance_Linked_List/03_Linked_List.py
```python
class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
# Create the doubly linked list class
class doubly_linked_list:
def __init__(self):
self.head = None
# Define the push method to add elements at the begining
def push(self, NewVal):
NewNode = Node(NewVal)
NewNode.next = self.head
if self.head is not None:
self.head.prev = NewNode
self.head = NewNode
# Define the append method to add elements at the end
def append(self, NewVal):
NewNode = Node(NewVal)
NewNode.next = None
if self.head is None:
NewNode.prev = None
self.head = NewNode
return
last = self.head
while (last.next is not None):
last = last.next
last.next = NewNode
NewNode.prev = last
return
# Define the method to print
def listprint(self, node):
while (node is not None):
print(node.data),
last = node
node = node.next
dllist = doubly_linked_list()
dllist.push(12)
dllist.append(9)
dllist.push(8)
dllist.push(62)
dllist.append(45)
dllist.listprint(dllist.head)
"""
When the above code is executed, it produces the following result −
62 8 12 9 45
Please note the position of the elements 9 and 45 for the append operation.
"""
```
#### File: Python3_Data_Structure/27_Sorting_Algorithms/01_Bubble_Sort.py
```python
def bubblesort(list):
# Swap the elements to arrange in order
for iter_num in range(len(list)-1,0,-1):
for idx in range(iter_num):
if list[idx]>list[idx+1]:
temp = list[idx]
list[idx] = list[idx+1]
list[idx+1] = temp
list = [19,2,31,45,6,11,121,27]
bubblesort(list)
print(list)
"""
When the above code is executed, it produces the following result −
[2, 6, 11, 19, 27, 31, 45, 121]
"""
``` |
{
"source": "jmmedel/Python-Reference",
"score": 4
} |
#### File: Python-Reference/22_Inheritance/Inheritance3.py
```python
"""
Example
Use the Student class to create an object, and then execute the printname method:
"""
class Person:
def __init__(self, fname, lname):
self.firstname = fname
self.lastname = lname
def printname(self):
print(self.firstname, self.lastname)
class Student(Person):
pass
x = Student("Mike", "Olsen")
x.printname()
``` |
{
"source": "jmmedina00/PyRSServer",
"score": 3
} |
#### File: PyRSServer/rsserver/nodes.py
```python
import pathlib
import os.path
import json
import markdown
import xml.dom.minidom as minidom
from datetime import datetime, timezone, timedelta
import rsserver.server
class RSSConstructorException(Exception):
"""Exception thrown during RSS object generating.
This exception is thrown when, for example, the structure
requirements for building an item object or a feed object
aren't met.
"""
pass
class RSSFeed:
"""Represents a whole RSS feed.
Attributes:
info (dict): The feed's information, loaded from server_info.json
items (list of RSSItem): The feed's items.
"""
def __init__(self, path):
"""Loads all the feed's date from the specified path.
The feed's info gets loaded from a `server_info.json`, and the items
get loaded from the items folder.
Args:
path (str): The path for loading all the information from.
"""
if os.path.isdir(path):
path_info = path + "/server_info.json"
path_items = path + "/items"
if os.path.isfile(path_info) and os.path.isdir(path_items):
file_info = open(path_info)
self.info = json.load(file_info)
self.items = []
plib_items = pathlib.Path(path_items)
for file in plib_items.iterdir():
if file.is_dir():
pathtoitem = full_path(file)
self.items.append(RSSItem(pathtoitem))
self.items.sort()
else:
raise RSSConstructorException("Required information is missing")
else:
raise RSSConstructorException("Please specify a directory")
def generatedoc(self):
"""Generates the DOM document
xml.dom.minidom is used through the whole process, since CDATA nodes are
required inside description nodes in items, due to the fact that the
content gets translated from Markdown to HTML.
Returns:
xml.dom.minidom.Document: The feed's document
"""
document = minidom.Document()
root = document.createElement("rss")
root.setAttribute("version", "2.0")
document.appendChild(root)
cont = document.createElement("channel")
root.appendChild(cont)
title = document.createElement("title")
title.appendChild(document.createTextNode(self.info["title"]))
cont.appendChild(title)
link = document.createElement("link")
link.appendChild(document.createTextNode(
"http://" +
rsserver.server.address + ":" + str(rsserver.server.port)
))
cont.appendChild(link)
description = document.createElement("description")
description.appendChild(document.createTextNode(self.info["description"]))
cont.appendChild(description)
for item in self.items:
node = item.getnode(document)
cont.appendChild(node)
return document
class RSSItem:
"""Represents a single item inside an RSS feed.
Attributes:
name (str): The name of the original folder.
info (dict): The item's information, sourced from JSON file.
content (str): The item's content, written in Markdown.
"""
def __init__(self, path):
"""
Loads the required files from the selected folder, then stores
their content.
Note:
For initializing, the files `info.json` and `content.md` are
required.
Args:
path (str): The folder's path to be loaded.
Raises:
RSSConstructorException: If the path doesn't exist, is not a folder,
or doesn't contain the required files.
"""
path_info = path + "/info.json"
path_content = path + "/content.md"
parts = path.split("/")
self.name = parts[len(parts) - 1]
if os.path.isfile(path_info) and os.path.isfile(path_content):
file_info = open(path_info)
file_content = open(path_content)
self.info = json.load(file_info)
self.content = file_content.read()
file_info.close()
file_content.close()
else:
raise RSSConstructorException("Needed files not found.")
def __gt__(self, other):
"""For ordering items by reverse date.
The first items to appear in a RSS feed are the most recent ones.
Thus, they are ordered from newest to oldest,
Args:
other (RSSItem): The other item to compare
Returns:
True if the other's date is more recent, False otherwise
"""
self_date = dateobj(self.info["date"], self.info["time"],
self.info["timezone"])
other_date = dateobj(other.info["date"], other.info["time"],
other.info["timezone"])
return self_date < other_date
def renderHTML(self):
"""Generates a HTML document with the contents of the item.
The item's "link" could be attempted to open in a browser
(which is the original intention of the <link> node), so generating
a simple page might fill the gap.
The HTML code is constructed with strings because Markdown function
returns a string, and there was no incentive to make a XHTML document
from scratch.
Returns:
A basic HTML document for displaying in a web browser.
"""
title = self.info["title"]
body = "<h1>" + title + "</h1>"
body += markdown.markdown(self.content)
head = "<meta charset='UTF-8'>"
#Getting feed's name back from server_info.json
base_info_path = rsserver.server.base_directory + "/server_info.json"
base_info = open(base_info_path)
base_info_dict = json.load(base_info)
base_info.close()
title += " - " + base_info_dict["title"]
head += "<title>" + title +"</title>"
return "<html><head>" + head + "</head><body>" + body + "</body></html>"
def getnode(self, document):
"""Generates a node for a document.
It uses all the values gathered in the info dictionary to build the
nodes, so having all keys and values set correctly is necessary for
this method to work correctly.
Args:
document (xml.dom.minidom.Document): The document to which the final node
will be added
Returns:
xml.dom.minidom.Element: This item's node
"""
link = "http://" + rsserver.server.address + ":"
link += str(rsserver.server.port) + "/" + self.name
node = document.createElement("item")
title = document.createElement("title")
title_text = document.createTextNode(self.info["title"])
title.appendChild(title_text)
node.appendChild(title)
#Link nodes, which use the same text node
link_text = document.createTextNode(link)
link_node = document.createElement("link")
link_node.appendChild(link_text.cloneNode(True))
guid = document.createElement("guid")
guid.setAttribute("isPermaLink", "true")
guid.appendChild(link_text)
node.appendChild(link_node)
node.appendChild(guid)
description = document.createElement("description")
content = document.createCDATASection(markdown.markdown(self.content))
description.appendChild(content)
node.appendChild(description)
pubdate = document.createElement("pubDate")
pubdate.appendChild(document.createTextNode(genedate(self.info)))
node.appendChild(pubdate)
for c in self.info["categories"]:
category = document.createElement("category")
category.appendChild(document.createTextNode(c))
node.appendChild(category)
return node
def full_path(path):
"""Support function, reassembles a path from a Path object.
Used for reconstructing a directory Path object's original path.
Args;
path (pathlib.Path): The path object.
Returns:
str: Its original path.
"""
result = ""
if isinstance(path, pathlib.Path):
parts = path.parts
for x in parts:
result += x + "/"
return result[0:len(result)-1]
def dateobj(stringdate, stringtime, stringtz):
"""Support function, creates a datetime object from an item's info attribute.
Generates a full datetime object with date, time and timezone information
from the item's dictionary.
Args:
stringdate (str): The string representing the date in the format YYYY-MM-DD
(Y: year, M: month, D: day)
stringtime (str): The string representing the time in the format HH:MM:SS
(H: hours, M: minutes, S: seconds)
stringtz (str): The string representing the timezone in the format xHHMM
(x: Plus (+) or minus (-) symbol, H: hours, M: minutes)
Returns:
datetime.datetime: A datetime object with the required data filled
"""
tz_multipliers = {"+": 1, "-": -1}
date_num = [int(n) for n in stringdate.split("-")]
time_num = [int(n) for n in stringtime.split(":")]
tz_hours = int(stringtz[1:3]) * tz_multipliers[stringtz[0]]
tz_minutes = int(stringtz[3:5]) * tz_multipliers[stringtz[0]]
delta = timedelta(hours=tz_hours, minutes=tz_minutes)
return datetime(date_num[0], date_num[1], date_num[2],
time_num[0], time_num[1], time_num[2], tzinfo=timezone(delta))
def genedate(info):
"""Generates RFC 822 compliant date string
Creates, from an item's info attribute, the string required for
the pubDate node, which must be compliant with the standard
RFC 822: https://www.w3.org/Protocols/rfc822/#z28
Args:
info (dict): A dictionary sourced from an item object
Returns:
The pubDate string
"""
days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul",
"Aug", "Sep", "Oct", "Nov", "Dec"]
obj = dateobj(info["date"], info["time"], info["timezone"])
pubdate = days[obj.weekday()] + ", " + str(obj.day)
pubdate += " " + months[obj.month - 1] + " " + str(obj.year)
pubdate += " " + info["time"] + " " + info["timezone"]
return pubdate
``` |
{
"source": "jmmelis/DipteraTrack",
"score": 2
} |
#### File: jmmelis/DipteraTrack/diptera_track_ui.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1140, 683)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setMinimumSize(QtCore.QSize(1124, 674))
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.tabs = QtWidgets.QTabWidget(self.centralwidget)
self.tabs.setObjectName("tabs")
self.ses_par_tab = QtWidgets.QWidget()
self.ses_par_tab.setObjectName("ses_par_tab")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.ses_par_tab)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.widget = QtWidgets.QWidget(self.ses_par_tab)
self.widget.setMinimumSize(QtCore.QSize(0, 551))
self.widget.setObjectName("widget")
self.folder_select_tree = QtWidgets.QTreeView(self.widget)
self.folder_select_tree.setGeometry(QtCore.QRect(9, 30, 571, 321))
self.folder_select_tree.setMinimumSize(QtCore.QSize(451, 0))
self.folder_select_tree.setObjectName("folder_select_tree")
self.label = QtWidgets.QLabel(self.widget)
self.label.setGeometry(QtCore.QRect(9, 9, 128, 16))
self.label.setObjectName("label")
self.label_3 = QtWidgets.QLabel(self.widget)
self.label_3.setGeometry(QtCore.QRect(600, 0, 124, 16))
self.label_3.setObjectName("label_3")
self.line = QtWidgets.QFrame(self.widget)
self.line.setGeometry(QtCore.QRect(590, 20, 511, 20))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.label_2 = QtWidgets.QLabel(self.widget)
self.label_2.setGeometry(QtCore.QRect(600, 30, 91, 16))
self.label_2.setObjectName("label_2")
self.ses_folder_label = QtWidgets.QLabel(self.widget)
self.ses_folder_label.setGeometry(QtCore.QRect(620, 50, 471, 20))
self.ses_folder_label.setObjectName("ses_folder_label")
self.label_5 = QtWidgets.QLabel(self.widget)
self.label_5.setGeometry(QtCore.QRect(600, 90, 141, 16))
self.label_5.setObjectName("label_5")
self.bckg_folder_label = QtWidgets.QLabel(self.widget)
self.bckg_folder_label.setGeometry(QtCore.QRect(620, 110, 281, 20))
self.bckg_folder_label.setObjectName("bckg_folder_label")
self.line_2 = QtWidgets.QFrame(self.widget)
self.line_2.setGeometry(QtCore.QRect(580, 30, 20, 561))
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.label_7 = QtWidgets.QLabel(self.widget)
self.label_7.setGeometry(QtCore.QRect(600, 130, 291, 16))
self.label_7.setObjectName("label_7")
self.cal_folder_label = QtWidgets.QLabel(self.widget)
self.cal_folder_label.setGeometry(QtCore.QRect(620, 150, 381, 20))
self.cal_folder_label.setObjectName("cal_folder_label")
self.label_9 = QtWidgets.QLabel(self.widget)
self.label_9.setGeometry(QtCore.QRect(600, 200, 291, 16))
self.label_9.setObjectName("label_9")
self.mov_folder1_label = QtWidgets.QLabel(self.widget)
self.mov_folder1_label.setGeometry(QtCore.QRect(620, 220, 371, 20))
self.mov_folder1_label.setObjectName("mov_folder1_label")
self.mov_folder2_label = QtWidgets.QLabel(self.widget)
self.mov_folder2_label.setGeometry(QtCore.QRect(620, 240, 371, 20))
self.mov_folder2_label.setObjectName("mov_folder2_label")
self.mov_folder3_label = QtWidgets.QLabel(self.widget)
self.mov_folder3_label.setGeometry(QtCore.QRect(620, 260, 371, 20))
self.mov_folder3_label.setObjectName("mov_folder3_label")
self.mov_folder4_label = QtWidgets.QLabel(self.widget)
self.mov_folder4_label.setGeometry(QtCore.QRect(620, 280, 371, 20))
self.mov_folder4_label.setObjectName("mov_folder4_label")
self.mov_folder5_label = QtWidgets.QLabel(self.widget)
self.mov_folder5_label.setGeometry(QtCore.QRect(620, 300, 371, 20))
self.mov_folder5_label.setObjectName("mov_folder5_label")
self.mov_folder6_label = QtWidgets.QLabel(self.widget)
self.mov_folder6_label.setGeometry(QtCore.QRect(620, 320, 371, 20))
self.mov_folder6_label.setObjectName("mov_folder6_label")
self.mov_folder7_label = QtWidgets.QLabel(self.widget)
self.mov_folder7_label.setGeometry(QtCore.QRect(620, 340, 371, 20))
self.mov_folder7_label.setObjectName("mov_folder7_label")
self.mov_folder8_label = QtWidgets.QLabel(self.widget)
self.mov_folder8_label.setGeometry(QtCore.QRect(620, 360, 371, 20))
self.mov_folder8_label.setObjectName("mov_folder8_label")
self.label_18 = QtWidgets.QLabel(self.widget)
self.label_18.setGeometry(QtCore.QRect(600, 390, 301, 20))
self.label_18.setObjectName("label_18")
self.cam_folder1_label = QtWidgets.QLabel(self.widget)
self.cam_folder1_label.setGeometry(QtCore.QRect(620, 410, 371, 20))
self.cam_folder1_label.setObjectName("cam_folder1_label")
self.cam_folder2_label = QtWidgets.QLabel(self.widget)
self.cam_folder2_label.setGeometry(QtCore.QRect(620, 430, 371, 20))
self.cam_folder2_label.setObjectName("cam_folder2_label")
self.cam_folder3_label = QtWidgets.QLabel(self.widget)
self.cam_folder3_label.setGeometry(QtCore.QRect(620, 450, 371, 20))
self.cam_folder3_label.setObjectName("cam_folder3_label")
self.cam_folder4_label = QtWidgets.QLabel(self.widget)
self.cam_folder4_label.setGeometry(QtCore.QRect(620, 470, 371, 20))
self.cam_folder4_label.setObjectName("cam_folder4_label")
self.ses_folder_rbtn = QtWidgets.QRadioButton(self.widget)
self.ses_folder_rbtn.setGeometry(QtCore.QRect(600, 50, 21, 21))
self.ses_folder_rbtn.setObjectName("ses_folder_rbtn")
self.bckg_folder_rbtn = QtWidgets.QRadioButton(self.widget)
self.bckg_folder_rbtn.setGeometry(QtCore.QRect(600, 110, 21, 21))
self.bckg_folder_rbtn.setObjectName("bckg_folder_rbtn")
self.cal_folder_rbtn = QtWidgets.QRadioButton(self.widget)
self.cal_folder_rbtn.setGeometry(QtCore.QRect(600, 150, 21, 21))
self.cal_folder_rbtn.setObjectName("cal_folder_rbtn")
self.mov_folder1_rbtn = QtWidgets.QRadioButton(self.widget)
self.mov_folder1_rbtn.setGeometry(QtCore.QRect(600, 220, 21, 21))
self.mov_folder1_rbtn.setObjectName("mov_folder1_rbtn")
self.mov_folder2_rbtn = QtWidgets.QRadioButton(self.widget)
self.mov_folder2_rbtn.setGeometry(QtCore.QRect(600, 240, 21, 21))
self.mov_folder2_rbtn.setObjectName("mov_folder2_rbtn")
self.mov_folder3_rbtn = QtWidgets.QRadioButton(self.widget)
self.mov_folder3_rbtn.setGeometry(QtCore.QRect(600, 260, 21, 21))
self.mov_folder3_rbtn.setObjectName("mov_folder3_rbtn")
self.mov_folder4_rbtn = QtWidgets.QRadioButton(self.widget)
self.mov_folder4_rbtn.setGeometry(QtCore.QRect(600, 280, 21, 21))
self.mov_folder4_rbtn.setObjectName("mov_folder4_rbtn")
self.mov_folder5_rbtn = QtWidgets.QRadioButton(self.widget)
self.mov_folder5_rbtn.setGeometry(QtCore.QRect(600, 300, 21, 21))
self.mov_folder5_rbtn.setObjectName("mov_folder5_rbtn")
self.mov_folder6_rbtn = QtWidgets.QRadioButton(self.widget)
self.mov_folder6_rbtn.setGeometry(QtCore.QRect(600, 320, 21, 21))
self.mov_folder6_rbtn.setObjectName("mov_folder6_rbtn")
self.mov_folder7_rbtn = QtWidgets.QRadioButton(self.widget)
self.mov_folder7_rbtn.setGeometry(QtCore.QRect(600, 340, 21, 21))
self.mov_folder7_rbtn.setObjectName("mov_folder7_rbtn")
self.mov_folder8_rbtn = QtWidgets.QRadioButton(self.widget)
self.mov_folder8_rbtn.setGeometry(QtCore.QRect(600, 360, 21, 21))
self.mov_folder8_rbtn.setObjectName("mov_folder8_rbtn")
self.cam_folder1_rbtn = QtWidgets.QRadioButton(self.widget)
self.cam_folder1_rbtn.setGeometry(QtCore.QRect(600, 410, 21, 21))
self.cam_folder1_rbtn.setObjectName("cam_folder1_rbtn")
self.cam_folder2_rbtn = QtWidgets.QRadioButton(self.widget)
self.cam_folder2_rbtn.setGeometry(QtCore.QRect(600, 430, 21, 21))
self.cam_folder2_rbtn.setObjectName("cam_folder2_rbtn")
self.cam_folder3_rbtn = QtWidgets.QRadioButton(self.widget)
self.cam_folder3_rbtn.setGeometry(QtCore.QRect(600, 450, 21, 21))
self.cam_folder3_rbtn.setObjectName("cam_folder3_rbtn")
self.cam_folder4_rbtn = QtWidgets.QRadioButton(self.widget)
self.cam_folder4_rbtn.setGeometry(QtCore.QRect(600, 470, 21, 21))
self.cam_folder4_rbtn.setObjectName("cam_folder4_rbtn")
self.cam_folder5_rbtn = QtWidgets.QRadioButton(self.widget)
self.cam_folder5_rbtn.setGeometry(QtCore.QRect(600, 490, 21, 21))
self.cam_folder5_rbtn.setObjectName("cam_folder5_rbtn")
self.cam_folder6_rbtn = QtWidgets.QRadioButton(self.widget)
self.cam_folder6_rbtn.setGeometry(QtCore.QRect(600, 510, 21, 21))
self.cam_folder6_rbtn.setObjectName("cam_folder6_rbtn")
self.cam_folder5_label = QtWidgets.QLabel(self.widget)
self.cam_folder5_label.setGeometry(QtCore.QRect(620, 490, 371, 20))
self.cam_folder5_label.setObjectName("cam_folder5_label")
self.cam_folder6_label = QtWidgets.QLabel(self.widget)
self.cam_folder6_label.setGeometry(QtCore.QRect(620, 510, 371, 20))
self.cam_folder6_label.setObjectName("cam_folder6_label")
self.label_25 = QtWidgets.QLabel(self.widget)
self.label_25.setGeometry(QtCore.QRect(600, 540, 201, 16))
self.label_25.setObjectName("label_25")
self.frame_name_rbtn = QtWidgets.QRadioButton(self.widget)
self.frame_name_rbtn.setGeometry(QtCore.QRect(600, 560, 21, 21))
self.frame_name_rbtn.setObjectName("frame_name_rbtn")
self.frame_name_label = QtWidgets.QLabel(self.widget)
self.frame_name_label.setGeometry(QtCore.QRect(620, 560, 391, 20))
self.frame_name_label.setObjectName("frame_name_label")
self.label_27 = QtWidgets.QLabel(self.widget)
self.label_27.setGeometry(QtCore.QRect(930, 80, 161, 20))
self.label_27.setObjectName("label_27")
self.bck_img_fmt_box = QtWidgets.QComboBox(self.widget)
self.bck_img_fmt_box.setGeometry(QtCore.QRect(1020, 100, 79, 23))
self.bck_img_fmt_box.setObjectName("bck_img_fmt_box")
self.label_28 = QtWidgets.QLabel(self.widget)
self.label_28.setGeometry(QtCore.QRect(930, 130, 161, 20))
self.label_28.setObjectName("label_28")
self.cal_img_fmt_box = QtWidgets.QComboBox(self.widget)
self.cal_img_fmt_box.setGeometry(QtCore.QRect(1020, 150, 79, 23))
self.cal_img_fmt_box.setObjectName("cal_img_fmt_box")
self.label_29 = QtWidgets.QLabel(self.widget)
self.label_29.setGeometry(QtCore.QRect(970, 540, 131, 20))
self.label_29.setObjectName("label_29")
self.frame_img_fmt_box = QtWidgets.QComboBox(self.widget)
self.frame_img_fmt_box.setGeometry(QtCore.QRect(1020, 560, 79, 23))
self.frame_img_fmt_box.setObjectName("frame_img_fmt_box")
self.line_4 = QtWidgets.QFrame(self.widget)
self.line_4.setGeometry(QtCore.QRect(10, 460, 571, 16))
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.line_5 = QtWidgets.QFrame(self.widget)
self.line_5.setGeometry(QtCore.QRect(10, 580, 1091, 20))
self.line_5.setFrameShape(QtWidgets.QFrame.HLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName("line_5")
self.label_30 = QtWidgets.QLabel(self.widget)
self.label_30.setGeometry(QtCore.QRect(250, 470, 151, 16))
self.label_30.setObjectName("label_30")
self.start_frame_spin = QtWidgets.QSpinBox(self.widget)
self.start_frame_spin.setGeometry(QtCore.QRect(120, 490, 91, 24))
self.start_frame_spin.setObjectName("start_frame_spin")
self.label_31 = QtWidgets.QLabel(self.widget)
self.label_31.setGeometry(QtCore.QRect(10, 490, 101, 16))
self.label_31.setObjectName("label_31")
self.label_32 = QtWidgets.QLabel(self.widget)
self.label_32.setGeometry(QtCore.QRect(10, 520, 101, 16))
self.label_32.setObjectName("label_32")
self.trig_frame_spin = QtWidgets.QSpinBox(self.widget)
self.trig_frame_spin.setGeometry(QtCore.QRect(120, 520, 91, 24))
self.trig_frame_spin.setObjectName("trig_frame_spin")
self.label_33 = QtWidgets.QLabel(self.widget)
self.label_33.setGeometry(QtCore.QRect(10, 550, 101, 16))
self.label_33.setObjectName("label_33")
self.end_frame_spin = QtWidgets.QSpinBox(self.widget)
self.end_frame_spin.setGeometry(QtCore.QRect(120, 550, 91, 24))
self.end_frame_spin.setObjectName("end_frame_spin")
self.label_34 = QtWidgets.QLabel(self.widget)
self.label_34.setGeometry(QtCore.QRect(250, 490, 91, 16))
self.label_34.setObjectName("label_34")
self.trig_mode_box = QtWidgets.QComboBox(self.widget)
self.trig_mode_box.setGeometry(QtCore.QRect(350, 490, 111, 23))
self.trig_mode_box.setObjectName("trig_mode_box")
self.line_3 = QtWidgets.QFrame(self.widget)
self.line_3.setGeometry(QtCore.QRect(10, 350, 571, 16))
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.label_4 = QtWidgets.QLabel(self.widget)
self.label_4.setGeometry(QtCore.QRect(250, 360, 101, 16))
self.label_4.setObjectName("label_4")
self.mdl_loc_rbtn = QtWidgets.QRadioButton(self.widget)
self.mdl_loc_rbtn.setGeometry(QtCore.QRect(10, 400, 21, 21))
self.mdl_loc_rbtn.setObjectName("mdl_loc_rbtn")
self.mdl_loc_label = QtWidgets.QLabel(self.widget)
self.mdl_loc_label.setGeometry(QtCore.QRect(40, 400, 541, 21))
self.mdl_loc_label.setObjectName("mdl_loc_label")
self.label_10 = QtWidgets.QLabel(self.widget)
self.label_10.setGeometry(QtCore.QRect(10, 380, 171, 16))
self.label_10.setObjectName("label_10")
self.label_11 = QtWidgets.QLabel(self.widget)
self.label_11.setGeometry(QtCore.QRect(10, 420, 141, 16))
self.label_11.setObjectName("label_11")
self.mdl_name_rbtn = QtWidgets.QRadioButton(self.widget)
self.mdl_name_rbtn.setGeometry(QtCore.QRect(10, 440, 21, 21))
self.mdl_name_rbtn.setObjectName("mdl_name_rbtn")
self.mdl_name_label = QtWidgets.QLabel(self.widget)
self.mdl_name_label.setGeometry(QtCore.QRect(40, 440, 541, 21))
self.mdl_name_label.setObjectName("mdl_name_label")
self.label_6 = QtWidgets.QLabel(self.widget)
self.label_6.setGeometry(QtCore.QRect(600, 170, 101, 16))
self.label_6.setObjectName("label_6")
self.cal_file_label = QtWidgets.QLabel(self.widget)
self.cal_file_label.setGeometry(QtCore.QRect(710, 170, 291, 20))
self.cal_file_label.setObjectName("cal_file_label")
self.label_8 = QtWidgets.QLabel(self.widget)
self.label_8.setGeometry(QtCore.QRect(600, 70, 101, 16))
self.label_8.setObjectName("label_8")
self.ses_name_label = QtWidgets.QLabel(self.widget)
self.ses_name_label.setGeometry(QtCore.QRect(700, 70, 381, 20))
self.ses_name_label.setObjectName("ses_name_label")
self.reset_selection_push_btn = QtWidgets.QPushButton(self.widget)
self.reset_selection_push_btn.setGeometry(QtCore.QRect(470, 0, 101, 23))
self.reset_selection_push_btn.setObjectName("reset_selection_push_btn")
self.start_session_push_btn = QtWidgets.QPushButton(self.widget)
self.start_session_push_btn.setGeometry(QtCore.QRect(1010, 600, 85, 23))
self.start_session_push_btn.setObjectName("start_session_push_btn")
self.save_settings_push_btn = QtWidgets.QPushButton(self.widget)
self.save_settings_push_btn.setGeometry(QtCore.QRect(870, 600, 131, 23))
self.save_settings_push_btn.setObjectName("save_settings_push_btn")
self.load_settings_file_label = QtWidgets.QLabel(self.widget)
self.load_settings_file_label.setGeometry(QtCore.QRect(40, 600, 671, 21))
self.load_settings_file_label.setObjectName("load_settings_file_label")
self.load_settings_push_btn = QtWidgets.QPushButton(self.widget)
self.load_settings_push_btn.setGeometry(QtCore.QRect(720, 600, 141, 23))
self.load_settings_push_btn.setObjectName("load_settings_push_btn")
self.load_settings_rbtn = QtWidgets.QRadioButton(self.widget)
self.load_settings_rbtn.setGeometry(QtCore.QRect(10, 600, 21, 21))
self.load_settings_rbtn.setObjectName("load_settings_rbtn")
self.verticalLayout_4.addWidget(self.widget)
self.tabs.addTab(self.ses_par_tab, "")
self.focal_grid_tab = QtWidgets.QWidget()
self.focal_grid_tab.setObjectName("focal_grid_tab")
self.gridLayout_2 = QtWidgets.QGridLayout(self.focal_grid_tab)
self.gridLayout_2.setObjectName("gridLayout_2")
self.widget_2 = QtWidgets.QWidget(self.focal_grid_tab)
self.widget_2.setObjectName("widget_2")
self.gridLayout_7 = QtWidgets.QGridLayout(self.widget_2)
self.gridLayout_7.setObjectName("gridLayout_7")
self.line_9 = QtWidgets.QFrame(self.widget_2)
self.line_9.setFrameShape(QtWidgets.QFrame.HLine)
self.line_9.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_9.setObjectName("line_9")
self.gridLayout_7.addWidget(self.line_9, 0, 0, 2, 8)
self.label_16 = QtWidgets.QLabel(self.widget_2)
self.label_16.setObjectName("label_16")
self.gridLayout_7.addWidget(self.label_16, 1, 2, 2, 3)
self.line_6 = QtWidgets.QFrame(self.widget_2)
self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName("line_6")
self.gridLayout_7.addWidget(self.line_6, 2, 0, 1, 2)
self.label_12 = QtWidgets.QLabel(self.widget_2)
self.label_12.setObjectName("label_12")
self.gridLayout_7.addWidget(self.label_12, 3, 2, 1, 1)
self.nx_spin = QtWidgets.QSpinBox(self.widget_2)
self.nx_spin.setObjectName("nx_spin")
self.gridLayout_7.addWidget(self.nx_spin, 3, 3, 1, 1)
spacerItem = QtWidgets.QSpacerItem(928, 213, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_7.addItem(spacerItem, 3, 4, 7, 4)
self.label_13 = QtWidgets.QLabel(self.widget_2)
self.label_13.setObjectName("label_13")
self.gridLayout_7.addWidget(self.label_13, 4, 2, 1, 1)
self.ny_spin = QtWidgets.QSpinBox(self.widget_2)
self.ny_spin.setObjectName("ny_spin")
self.gridLayout_7.addWidget(self.ny_spin, 4, 3, 1, 1)
self.label_14 = QtWidgets.QLabel(self.widget_2)
self.label_14.setObjectName("label_14")
self.gridLayout_7.addWidget(self.label_14, 5, 2, 1, 1)
self.nz_spin = QtWidgets.QSpinBox(self.widget_2)
self.nz_spin.setObjectName("nz_spin")
self.gridLayout_7.addWidget(self.nz_spin, 5, 3, 1, 1)
self.label_15 = QtWidgets.QLabel(self.widget_2)
self.label_15.setObjectName("label_15")
self.gridLayout_7.addWidget(self.label_15, 6, 2, 1, 1)
self.ds_spin = QtWidgets.QDoubleSpinBox(self.widget_2)
self.ds_spin.setObjectName("ds_spin")
self.gridLayout_7.addWidget(self.ds_spin, 6, 3, 1, 1)
self.label_17 = QtWidgets.QLabel(self.widget_2)
self.label_17.setObjectName("label_17")
self.gridLayout_7.addWidget(self.label_17, 7, 2, 1, 1)
self.x0_spin = QtWidgets.QDoubleSpinBox(self.widget_2)
self.x0_spin.setObjectName("x0_spin")
self.gridLayout_7.addWidget(self.x0_spin, 7, 3, 1, 1)
self.label_19 = QtWidgets.QLabel(self.widget_2)
self.label_19.setObjectName("label_19")
self.gridLayout_7.addWidget(self.label_19, 8, 2, 1, 1)
self.y0_spin = QtWidgets.QDoubleSpinBox(self.widget_2)
self.y0_spin.setObjectName("y0_spin")
self.gridLayout_7.addWidget(self.y0_spin, 8, 3, 1, 1)
self.label_20 = QtWidgets.QLabel(self.widget_2)
self.label_20.setObjectName("label_20")
self.gridLayout_7.addWidget(self.label_20, 9, 2, 1, 1)
self.z0_spin = QtWidgets.QDoubleSpinBox(self.widget_2)
self.z0_spin.setObjectName("z0_spin")
self.gridLayout_7.addWidget(self.z0_spin, 9, 3, 1, 1)
self.line_7 = QtWidgets.QFrame(self.widget_2)
self.line_7.setFrameShape(QtWidgets.QFrame.HLine)
self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_7.setObjectName("line_7")
self.gridLayout_7.addWidget(self.line_7, 10, 0, 1, 7)
spacerItem1 = QtWidgets.QSpacerItem(696, 48, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_7.addItem(spacerItem1, 10, 7, 2, 1)
self.calc_vox_btn = QtWidgets.QPushButton(self.widget_2)
self.calc_vox_btn.setObjectName("calc_vox_btn")
self.gridLayout_7.addWidget(self.calc_vox_btn, 11, 0, 1, 4)
self.vox_progress_bar = QtWidgets.QProgressBar(self.widget_2)
self.vox_progress_bar.setMinimumSize(QtCore.QSize(211, 0))
self.vox_progress_bar.setProperty("value", 24)
self.vox_progress_bar.setObjectName("vox_progress_bar")
self.gridLayout_7.addWidget(self.vox_progress_bar, 11, 5, 1, 2)
self.line_8 = QtWidgets.QFrame(self.widget_2)
self.line_8.setFrameShape(QtWidgets.QFrame.HLine)
self.line_8.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_8.setObjectName("line_8")
self.gridLayout_7.addWidget(self.line_8, 12, 0, 2, 8)
self.label_49 = QtWidgets.QLabel(self.widget_2)
self.label_49.setObjectName("label_49")
self.gridLayout_7.addWidget(self.label_49, 13, 1, 2, 6)
spacerItem2 = QtWidgets.QSpacerItem(804, 48, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_7.addItem(spacerItem2, 14, 6, 2, 2)
self.label_50 = QtWidgets.QLabel(self.widget_2)
self.label_50.setObjectName("label_50")
self.gridLayout_7.addWidget(self.label_50, 15, 1, 1, 3)
self.pixel_size_spin = QtWidgets.QDoubleSpinBox(self.widget_2)
self.pixel_size_spin.setObjectName("pixel_size_spin")
self.gridLayout_7.addWidget(self.pixel_size_spin, 15, 4, 1, 2)
spacerItem3 = QtWidgets.QSpacerItem(1079, 267, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_7.addItem(spacerItem3, 16, 0, 1, 8)
self.gridLayout_2.addWidget(self.widget_2, 0, 0, 1, 1)
self.tabs.addTab(self.focal_grid_tab, "")
self.model_scale_tab = QtWidgets.QWidget()
self.model_scale_tab.setObjectName("model_scale_tab")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.model_scale_tab)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.widget_3 = QtWidgets.QWidget(self.model_scale_tab)
self.widget_3.setObjectName("widget_3")
self.gridLayout_3 = QtWidgets.QGridLayout(self.widget_3)
self.gridLayout_3.setObjectName("gridLayout_3")
self.rawFrameView = ScaleModelWidget(self.widget_3)
self.rawFrameView.setMinimumSize(QtCore.QSize(1091, 511))
self.rawFrameView.setObjectName("rawFrameView")
self.gridLayout_3.addWidget(self.rawFrameView, 0, 0, 1, 1)
self.widget_4 = QtWidgets.QWidget(self.widget_3)
self.widget_4.setMinimumSize(QtCore.QSize(1091, 0))
self.widget_4.setMaximumSize(QtCore.QSize(16777215, 101))
self.widget_4.setObjectName("widget_4")
self.gridLayout = QtWidgets.QGridLayout(self.widget_4)
self.gridLayout.setObjectName("gridLayout")
self.label_22 = QtWidgets.QLabel(self.widget_4)
self.label_22.setObjectName("label_22")
self.gridLayout.addWidget(self.label_22, 0, 0, 1, 1)
self.scaleTable = QtWidgets.QTableWidget(self.widget_4)
self.scaleTable.setMinimumSize(QtCore.QSize(411, 81))
self.scaleTable.setObjectName("scaleTable")
self.scaleTable.setColumnCount(0)
self.scaleTable.setRowCount(0)
self.gridLayout.addWidget(self.scaleTable, 0, 1, 4, 1)
spacerItem4 = QtWidgets.QSpacerItem(248, 78, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem4, 0, 2, 4, 1)
self.raw_mov_spin = QtWidgets.QSpinBox(self.widget_4)
self.raw_mov_spin.setObjectName("raw_mov_spin")
self.gridLayout.addWidget(self.raw_mov_spin, 1, 0, 1, 1)
self.load_scale_btn = QtWidgets.QPushButton(self.widget_4)
self.load_scale_btn.setObjectName("load_scale_btn")
self.gridLayout.addWidget(self.load_scale_btn, 1, 3, 2, 1)
self.save_scale_btn = QtWidgets.QPushButton(self.widget_4)
self.save_scale_btn.setObjectName("save_scale_btn")
self.gridLayout.addWidget(self.save_scale_btn, 1, 4, 2, 1)
self.raw_frame_spin = QtWidgets.QSpinBox(self.widget_4)
self.raw_frame_spin.setObjectName("raw_frame_spin")
self.gridLayout.addWidget(self.raw_frame_spin, 3, 0, 1, 1)
self.set_model_btn = QtWidgets.QPushButton(self.widget_4)
self.set_model_btn.setObjectName("set_model_btn")
self.gridLayout.addWidget(self.set_model_btn, 1, 5, 2, 1)
self.label_21 = QtWidgets.QLabel(self.widget_4)
self.label_21.setObjectName("label_21")
self.gridLayout.addWidget(self.label_21, 2, 0, 1, 1)
self.gridLayout_3.addWidget(self.widget_4, 1, 0, 1, 1)
self.verticalLayout_2.addWidget(self.widget_3)
self.tabs.addTab(self.model_scale_tab, "")
self.model_view_tab = QtWidgets.QWidget()
self.model_view_tab.setObjectName("model_view_tab")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.model_view_tab)
self.horizontalLayout.setObjectName("horizontalLayout")
self.model_param_disp = QtWidgets.QWidget(self.model_view_tab)
self.model_param_disp.setObjectName("model_param_disp")
self.gridLayout_4 = QtWidgets.QGridLayout(self.model_param_disp)
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_23 = QtWidgets.QLabel(self.model_param_disp)
self.label_23.setMinimumSize(QtCore.QSize(114, 621))
self.label_23.setObjectName("label_23")
self.gridLayout_4.addWidget(self.label_23, 0, 0, 1, 1)
self.horizontalLayout.addWidget(self.model_param_disp)
self.model_view_window = ModelViewWidget(self.model_view_tab)
self.model_view_window.setMinimumSize(QtCore.QSize(971, 631))
self.model_view_window.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.model_view_window.setFrameShadow(QtWidgets.QFrame.Raised)
self.model_view_window.setObjectName("model_view_window")
self.horizontalLayout.addWidget(self.model_view_window)
self.tabs.addTab(self.model_view_tab, "")
self.segment_tab = QtWidgets.QWidget()
self.segment_tab.setObjectName("segment_tab")
self.verticalLayout = QtWidgets.QVBoxLayout(self.segment_tab)
self.verticalLayout.setObjectName("verticalLayout")
self.seg_view = ImageSegmentWidget(self.segment_tab)
self.seg_view.setMinimumSize(QtCore.QSize(1101, 481))
self.seg_view.setObjectName("seg_view")
self.verticalLayout.addWidget(self.seg_view)
self.seg_widget = QtWidgets.QWidget(self.segment_tab)
self.seg_widget.setMinimumSize(QtCore.QSize(1122, 90))
self.seg_widget.setMaximumSize(QtCore.QSize(16777215, 141))
self.seg_widget.setObjectName("seg_widget")
self.gridLayout_5 = QtWidgets.QGridLayout(self.seg_widget)
self.gridLayout_5.setObjectName("gridLayout_5")
self.label_40 = QtWidgets.QLabel(self.seg_widget)
self.label_40.setObjectName("label_40")
self.gridLayout_5.addWidget(self.label_40, 0, 0, 1, 1)
self.label_24 = QtWidgets.QLabel(self.seg_widget)
self.label_24.setObjectName("label_24")
self.gridLayout_5.addWidget(self.label_24, 0, 1, 1, 1)
self.label_26 = QtWidgets.QLabel(self.seg_widget)
self.label_26.setObjectName("label_26")
self.gridLayout_5.addWidget(self.label_26, 0, 2, 1, 1)
self.label_35 = QtWidgets.QLabel(self.seg_widget)
self.label_35.setObjectName("label_35")
self.gridLayout_5.addWidget(self.label_35, 0, 3, 1, 1)
self.label_36 = QtWidgets.QLabel(self.seg_widget)
self.label_36.setObjectName("label_36")
self.gridLayout_5.addWidget(self.label_36, 0, 4, 1, 1)
self.label_37 = QtWidgets.QLabel(self.seg_widget)
self.label_37.setObjectName("label_37")
self.gridLayout_5.addWidget(self.label_37, 0, 5, 1, 1)
self.label_38 = QtWidgets.QLabel(self.seg_widget)
self.label_38.setObjectName("label_38")
self.gridLayout_5.addWidget(self.label_38, 0, 6, 1, 1)
self.label_39 = QtWidgets.QLabel(self.seg_widget)
self.label_39.setObjectName("label_39")
self.gridLayout_5.addWidget(self.label_39, 0, 7, 1, 1)
self.line_10 = QtWidgets.QFrame(self.seg_widget)
self.line_10.setFrameShape(QtWidgets.QFrame.VLine)
self.line_10.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_10.setObjectName("line_10")
self.gridLayout_5.addWidget(self.line_10, 0, 8, 4, 1)
self.label_43 = QtWidgets.QLabel(self.seg_widget)
self.label_43.setObjectName("label_43")
self.gridLayout_5.addWidget(self.label_43, 0, 9, 1, 2)
self.line_11 = QtWidgets.QFrame(self.seg_widget)
self.line_11.setFrameShape(QtWidgets.QFrame.VLine)
self.line_11.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_11.setObjectName("line_11")
self.gridLayout_5.addWidget(self.line_11, 0, 12, 4, 1)
spacerItem5 = QtWidgets.QSpacerItem(176, 110, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_5.addItem(spacerItem5, 0, 13, 4, 1)
spacerItem6 = QtWidgets.QSpacerItem(88, 81, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_5.addItem(spacerItem6, 0, 14, 3, 1)
self.seg_mov_spin = QtWidgets.QSpinBox(self.seg_widget)
self.seg_mov_spin.setObjectName("seg_mov_spin")
self.gridLayout_5.addWidget(self.seg_mov_spin, 1, 0, 1, 1)
self.seg_frame_spin = QtWidgets.QSpinBox(self.seg_widget)
self.seg_frame_spin.setObjectName("seg_frame_spin")
self.gridLayout_5.addWidget(self.seg_frame_spin, 1, 1, 1, 1)
self.body_thresh_spin = QtWidgets.QSpinBox(self.seg_widget)
self.body_thresh_spin.setObjectName("body_thresh_spin")
self.gridLayout_5.addWidget(self.body_thresh_spin, 1, 2, 1, 1)
self.wing_thresh_spin = QtWidgets.QSpinBox(self.seg_widget)
self.wing_thresh_spin.setObjectName("wing_thresh_spin")
self.gridLayout_5.addWidget(self.wing_thresh_spin, 1, 3, 1, 1)
self.sigma_spin = QtWidgets.QDoubleSpinBox(self.seg_widget)
self.sigma_spin.setObjectName("sigma_spin")
self.gridLayout_5.addWidget(self.sigma_spin, 1, 4, 1, 1)
self.K_spin = QtWidgets.QSpinBox(self.seg_widget)
self.K_spin.setObjectName("K_spin")
self.gridLayout_5.addWidget(self.K_spin, 1, 5, 1, 1)
self.min_body_spin = QtWidgets.QSpinBox(self.seg_widget)
self.min_body_spin.setObjectName("min_body_spin")
self.gridLayout_5.addWidget(self.min_body_spin, 1, 6, 1, 1)
self.min_wing_spin = QtWidgets.QSpinBox(self.seg_widget)
self.min_wing_spin.setObjectName("min_wing_spin")
self.gridLayout_5.addWidget(self.min_wing_spin, 1, 7, 1, 1)
self.label_44 = QtWidgets.QLabel(self.seg_widget)
self.label_44.setObjectName("label_44")
self.gridLayout_5.addWidget(self.label_44, 1, 9, 1, 1)
self.mask_cam_nr_spin = QtWidgets.QSpinBox(self.seg_widget)
self.mask_cam_nr_spin.setObjectName("mask_cam_nr_spin")
self.gridLayout_5.addWidget(self.mask_cam_nr_spin, 1, 10, 1, 2)
self.label_45 = QtWidgets.QLabel(self.seg_widget)
self.label_45.setObjectName("label_45")
self.gridLayout_5.addWidget(self.label_45, 2, 9, 1, 1)
self.mask_seg_nr_spin = QtWidgets.QSpinBox(self.seg_widget)
self.mask_seg_nr_spin.setObjectName("mask_seg_nr_spin")
self.gridLayout_5.addWidget(self.mask_seg_nr_spin, 2, 10, 1, 2)
self.seg_update_btn = QtWidgets.QPushButton(self.seg_widget)
self.seg_update_btn.setObjectName("seg_update_btn")
self.gridLayout_5.addWidget(self.seg_update_btn, 3, 0, 1, 2)
self.add_mask_btn = QtWidgets.QPushButton(self.seg_widget)
self.add_mask_btn.setObjectName("add_mask_btn")
self.gridLayout_5.addWidget(self.add_mask_btn, 3, 9, 1, 1)
self.reset_mask_btn = QtWidgets.QPushButton(self.seg_widget)
self.reset_mask_btn.setObjectName("reset_mask_btn")
self.gridLayout_5.addWidget(self.reset_mask_btn, 3, 11, 1, 1)
self.continue_btn = QtWidgets.QPushButton(self.seg_widget)
self.continue_btn.setObjectName("continue_btn")
self.gridLayout_5.addWidget(self.continue_btn, 3, 14, 1, 1)
self.verticalLayout.addWidget(self.seg_widget)
self.tabs.addTab(self.segment_tab, "")
self.pcl_view_tab = QtWidgets.QWidget()
self.pcl_view_tab.setObjectName("pcl_view_tab")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.pcl_view_tab)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.pcl_view = BBoxWidget(self.pcl_view_tab)
self.pcl_view.setMinimumSize(QtCore.QSize(1121, 521))
self.pcl_view.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.pcl_view.setFrameShadow(QtWidgets.QFrame.Raised)
self.pcl_view.setObjectName("pcl_view")
self.verticalLayout_6.addWidget(self.pcl_view)
self.widget_5 = QtWidgets.QWidget(self.pcl_view_tab)
self.widget_5.setMinimumSize(QtCore.QSize(1101, 111))
self.widget_5.setObjectName("widget_5")
self.gridLayout_8 = QtWidgets.QGridLayout(self.widget_5)
self.gridLayout_8.setObjectName("gridLayout_8")
self.label_41 = QtWidgets.QLabel(self.widget_5)
self.label_41.setObjectName("label_41")
self.gridLayout_8.addWidget(self.label_41, 0, 0, 1, 1)
self.flight_select_btn_group = QtWidgets.QGroupBox(self.widget_5)
self.flight_select_btn_group.setObjectName("flight_select_btn_group")
self.gridLayout_6 = QtWidgets.QGridLayout(self.flight_select_btn_group)
self.gridLayout_6.setObjectName("gridLayout_6")
self.tethered_radio_btn = QtWidgets.QRadioButton(self.flight_select_btn_group)
self.tethered_radio_btn.setObjectName("tethered_radio_btn")
self.gridLayout_6.addWidget(self.tethered_radio_btn, 0, 0, 1, 1)
self.free_radio_btn = QtWidgets.QRadioButton(self.flight_select_btn_group)
self.free_radio_btn.setObjectName("free_radio_btn")
self.gridLayout_6.addWidget(self.free_radio_btn, 1, 0, 1, 1)
self.gridLayout_8.addWidget(self.flight_select_btn_group, 0, 1, 4, 1)
self.label_47 = QtWidgets.QLabel(self.widget_5)
self.label_47.setObjectName("label_47")
self.gridLayout_8.addWidget(self.label_47, 0, 2, 1, 1)
self.label_51 = QtWidgets.QLabel(self.widget_5)
self.label_51.setObjectName("label_51")
self.gridLayout_8.addWidget(self.label_51, 0, 3, 1, 1)
spacerItem7 = QtWidgets.QSpacerItem(456, 90, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_8.addItem(spacerItem7, 0, 4, 4, 1)
self.view_select_group = QtWidgets.QGroupBox(self.widget_5)
self.view_select_group.setObjectName("view_select_group")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.view_select_group)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.pcl_view_btn = QtWidgets.QRadioButton(self.view_select_group)
self.pcl_view_btn.setObjectName("pcl_view_btn")
self.verticalLayout_3.addWidget(self.pcl_view_btn)
self.bbox_view_btn = QtWidgets.QRadioButton(self.view_select_group)
self.bbox_view_btn.setObjectName("bbox_view_btn")
self.verticalLayout_3.addWidget(self.bbox_view_btn)
self.model_view_btn = QtWidgets.QRadioButton(self.view_select_group)
self.model_view_btn.setObjectName("model_view_btn")
self.verticalLayout_3.addWidget(self.model_view_btn)
self.gridLayout_8.addWidget(self.view_select_group, 0, 5, 4, 1)
self.pcl_mov_spin = QtWidgets.QSpinBox(self.widget_5)
self.pcl_mov_spin.setObjectName("pcl_mov_spin")
self.gridLayout_8.addWidget(self.pcl_mov_spin, 1, 0, 1, 1)
self.stroke_bound_spin = QtWidgets.QSpinBox(self.widget_5)
self.stroke_bound_spin.setObjectName("stroke_bound_spin")
self.gridLayout_8.addWidget(self.stroke_bound_spin, 1, 2, 1, 1)
self.wing_pitch_bound_spin = QtWidgets.QSpinBox(self.widget_5)
self.wing_pitch_bound_spin.setObjectName("wing_pitch_bound_spin")
self.gridLayout_8.addWidget(self.wing_pitch_bound_spin, 1, 3, 1, 1)
self.label_42 = QtWidgets.QLabel(self.widget_5)
self.label_42.setObjectName("label_42")
self.gridLayout_8.addWidget(self.label_42, 2, 0, 1, 1)
self.label_48 = QtWidgets.QLabel(self.widget_5)
self.label_48.setObjectName("label_48")
self.gridLayout_8.addWidget(self.label_48, 2, 2, 1, 1)
self.label_46 = QtWidgets.QLabel(self.widget_5)
self.label_46.setObjectName("label_46")
self.gridLayout_8.addWidget(self.label_46, 2, 3, 1, 1)
self.pcl_frame_spin = QtWidgets.QSpinBox(self.widget_5)
self.pcl_frame_spin.setObjectName("pcl_frame_spin")
self.gridLayout_8.addWidget(self.pcl_frame_spin, 3, 0, 1, 1)
self.dev_bound_spin = QtWidgets.QSpinBox(self.widget_5)
self.dev_bound_spin.setObjectName("dev_bound_spin")
self.gridLayout_8.addWidget(self.dev_bound_spin, 3, 2, 1, 1)
self.sphere_radius_spin = QtWidgets.QDoubleSpinBox(self.widget_5)
self.sphere_radius_spin.setObjectName("sphere_radius_spin")
self.gridLayout_8.addWidget(self.sphere_radius_spin, 3, 3, 1, 1)
self.verticalLayout_6.addWidget(self.widget_5)
self.tabs.addTab(self.pcl_view_tab, "")
self.opt_tab = QtWidgets.QWidget()
self.opt_tab.setObjectName("opt_tab")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.opt_tab)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.opt_widget = QtWidgets.QWidget(self.opt_tab)
self.opt_widget.setObjectName("opt_widget")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.opt_widget)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.contour_view = ContourViewWidget(self.opt_widget)
self.contour_view.setObjectName("contour_view")
self.verticalLayout_8.addWidget(self.contour_view)
self.opt_settings_widget = QtWidgets.QWidget(self.opt_widget)
self.opt_settings_widget.setMinimumSize(QtCore.QSize(0, 120))
self.opt_settings_widget.setObjectName("opt_settings_widget")
self.gridLayout_9 = QtWidgets.QGridLayout(self.opt_settings_widget)
self.gridLayout_9.setObjectName("gridLayout_9")
self.label_52 = QtWidgets.QLabel(self.opt_settings_widget)
self.label_52.setObjectName("label_52")
self.gridLayout_9.addWidget(self.label_52, 0, 0, 1, 1)
self.label_54 = QtWidgets.QLabel(self.opt_settings_widget)
self.label_54.setObjectName("label_54")
self.gridLayout_9.addWidget(self.label_54, 0, 1, 1, 1)
spacerItem8 = QtWidgets.QSpacerItem(849, 78, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_9.addItem(spacerItem8, 0, 2, 3, 1)
self.init_view_check = QtWidgets.QCheckBox(self.opt_settings_widget)
self.init_view_check.setObjectName("init_view_check")
self.gridLayout_9.addWidget(self.init_view_check, 0, 3, 1, 1)
self.opt_mov_spin = QtWidgets.QSpinBox(self.opt_settings_widget)
self.opt_mov_spin.setObjectName("opt_mov_spin")
self.gridLayout_9.addWidget(self.opt_mov_spin, 1, 0, 1, 1)
self.alpha_spin = QtWidgets.QDoubleSpinBox(self.opt_settings_widget)
self.alpha_spin.setObjectName("alpha_spin")
self.gridLayout_9.addWidget(self.alpha_spin, 1, 1, 1, 1)
self.dest_view_check = QtWidgets.QCheckBox(self.opt_settings_widget)
self.dest_view_check.setObjectName("dest_view_check")
self.gridLayout_9.addWidget(self.dest_view_check, 1, 3, 2, 1)
self.label_53 = QtWidgets.QLabel(self.opt_settings_widget)
self.label_53.setObjectName("label_53")
self.gridLayout_9.addWidget(self.label_53, 2, 0, 1, 1)
self.opt_frame_spin = QtWidgets.QSpinBox(self.opt_settings_widget)
self.opt_frame_spin.setObjectName("opt_frame_spin")
self.gridLayout_9.addWidget(self.opt_frame_spin, 3, 0, 1, 1)
self.src_view_check = QtWidgets.QCheckBox(self.opt_settings_widget)
self.src_view_check.setObjectName("src_view_check")
self.gridLayout_9.addWidget(self.src_view_check, 3, 3, 1, 1)
self.verticalLayout_8.addWidget(self.opt_settings_widget)
self.verticalLayout_7.addWidget(self.opt_widget)
self.tabs.addTab(self.opt_tab, "")
self.verticalLayout_5.addWidget(self.tabs)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.tabs.setCurrentIndex(6)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "DipteraTrack"))
self.label.setText(_translate("MainWindow", "Select session folder:"))
self.label_3.setText(_translate("MainWindow", "Session parameters"))
self.label_2.setText(_translate("MainWindow", "Session folder:"))
self.ses_folder_label.setText(_translate("MainWindow", "..."))
self.label_5.setText(_translate("MainWindow", "Background folder:"))
self.bckg_folder_label.setText(_translate("MainWindow", "..."))
self.label_7.setText(_translate("MainWindow", "Calibration folder:"))
self.cal_folder_label.setText(_translate("MainWindow", "..."))
self.label_9.setText(_translate("MainWindow", "Movie folders:"))
self.mov_folder1_label.setText(_translate("MainWindow", "..."))
self.mov_folder2_label.setText(_translate("MainWindow", "..."))
self.mov_folder3_label.setText(_translate("MainWindow", "..."))
self.mov_folder4_label.setText(_translate("MainWindow", "..."))
self.mov_folder5_label.setText(_translate("MainWindow", "..."))
self.mov_folder6_label.setText(_translate("MainWindow", "..."))
self.mov_folder7_label.setText(_translate("MainWindow", "..."))
self.mov_folder8_label.setText(_translate("MainWindow", "..."))
self.label_18.setText(_translate("MainWindow", "Camera folders:"))
self.cam_folder1_label.setText(_translate("MainWindow", "..."))
self.cam_folder2_label.setText(_translate("MainWindow", "..."))
self.cam_folder3_label.setText(_translate("MainWindow", "..."))
self.cam_folder4_label.setText(_translate("MainWindow", "..."))
self.ses_folder_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.bckg_folder_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.cal_folder_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.mov_folder1_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.mov_folder2_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.mov_folder3_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.mov_folder4_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.mov_folder5_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.mov_folder6_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.mov_folder7_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.mov_folder8_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.cam_folder1_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.cam_folder2_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.cam_folder3_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.cam_folder4_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.cam_folder5_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.cam_folder6_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.cam_folder5_label.setText(_translate("MainWindow", "..."))
self.cam_folder6_label.setText(_translate("MainWindow", "..."))
self.label_25.setText(_translate("MainWindow", "Frame name:"))
self.frame_name_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.frame_name_label.setText(_translate("MainWindow", "..."))
self.label_27.setText(_translate("MainWindow", "Background image format:"))
self.label_28.setText(_translate("MainWindow", "Calibration image format:"))
self.label_29.setText(_translate("MainWindow", "Frame image format:"))
self.label_30.setText(_translate("MainWindow", "Trigger settings"))
self.label_31.setText(_translate("MainWindow", "start frame nr:"))
self.label_32.setText(_translate("MainWindow", "trigger frame nr:"))
self.label_33.setText(_translate("MainWindow", "end frame nr:"))
self.label_34.setText(_translate("MainWindow", "Trigger mode:"))
self.label_4.setText(_translate("MainWindow", "Model settings"))
self.mdl_loc_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.mdl_loc_label.setText(_translate("MainWindow", "..."))
self.label_10.setText(_translate("MainWindow", "Model location:"))
self.label_11.setText(_translate("MainWindow", "Model name:"))
self.mdl_name_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.mdl_name_label.setText(_translate("MainWindow", "..."))
self.label_6.setText(_translate("MainWindow", "Calibration file:"))
self.cal_file_label.setText(_translate("MainWindow", "..."))
self.label_8.setText(_translate("MainWindow", "Session name:"))
self.ses_name_label.setText(_translate("MainWindow", "..."))
self.reset_selection_push_btn.setText(_translate("MainWindow", "reset selection"))
self.start_session_push_btn.setText(_translate("MainWindow", "start session"))
self.save_settings_push_btn.setText(_translate("MainWindow", "save parameter file"))
self.load_settings_file_label.setText(_translate("MainWindow", "..."))
self.load_settings_push_btn.setText(_translate("MainWindow", "load parameter file"))
self.load_settings_rbtn.setText(_translate("MainWindow", "RadioButton"))
self.tabs.setTabText(self.tabs.indexOf(self.ses_par_tab), _translate("MainWindow", "Movie selection"))
self.label_16.setText(_translate("MainWindow", "Voxel grid parameters:"))
self.label_12.setText(_translate("MainWindow", "Nx:"))
self.label_13.setText(_translate("MainWindow", "Ny:"))
self.label_14.setText(_translate("MainWindow", "Nz:"))
self.label_15.setText(_translate("MainWindow", "ds:"))
self.label_17.setText(_translate("MainWindow", "x0:"))
self.label_19.setText(_translate("MainWindow", "y0:"))
self.label_20.setText(_translate("MainWindow", "z0:"))
self.calc_vox_btn.setText(_translate("MainWindow", "calculate voxel grid"))
self.label_49.setText(_translate("MainWindow", "Camera parameters:"))
self.label_50.setText(_translate("MainWindow", "pixel size (mm):"))
self.tabs.setTabText(self.tabs.indexOf(self.focal_grid_tab), _translate("MainWindow", "Voxel grid"))
self.label_22.setText(_translate("MainWindow", "Movie nr:"))
self.load_scale_btn.setText(_translate("MainWindow", "load model scale"))
self.save_scale_btn.setText(_translate("MainWindow", "save model scale"))
self.set_model_btn.setText(_translate("MainWindow", "set model scale"))
self.label_21.setText(_translate("MainWindow", "Frame:"))
self.tabs.setTabText(self.tabs.indexOf(self.model_scale_tab), _translate("MainWindow", "Scale model"))
self.label_23.setText(_translate("MainWindow", "Model parameters:"))
self.tabs.setTabText(self.tabs.indexOf(self.model_view_tab), _translate("MainWindow", "Model view"))
self.label_40.setText(_translate("MainWindow", "movie nr:"))
self.label_24.setText(_translate("MainWindow", "frame:"))
self.label_26.setText(_translate("MainWindow", "body threshold"))
self.label_35.setText(_translate("MainWindow", "wing threshold"))
self.label_36.setText(_translate("MainWindow", "sigma"))
self.label_37.setText(_translate("MainWindow", "K"))
self.label_38.setText(_translate("MainWindow", "min body area"))
self.label_39.setText(_translate("MainWindow", "min wing area"))
self.label_43.setText(_translate("MainWindow", "Set image mask:"))
self.label_44.setText(_translate("MainWindow", "cam nr:"))
self.label_45.setText(_translate("MainWindow", "segment nr:"))
self.seg_update_btn.setText(_translate("MainWindow", "update"))
self.add_mask_btn.setText(_translate("MainWindow", "add to mask"))
self.reset_mask_btn.setText(_translate("MainWindow", "reset"))
self.continue_btn.setText(_translate("MainWindow", "continue"))
self.tabs.setTabText(self.tabs.indexOf(self.segment_tab), _translate("MainWindow", "Segmentation"))
self.label_41.setText(_translate("MainWindow", "movie nr:"))
self.tethered_radio_btn.setText(_translate("MainWindow", "tethered flight"))
self.free_radio_btn.setText(_translate("MainWindow", "free flight"))
self.label_47.setText(_translate("MainWindow", "stroke angle bound:"))
self.label_51.setText(_translate("MainWindow", "wing pitch angle bound:"))
self.pcl_view_btn.setText(_translate("MainWindow", "pcl view"))
self.bbox_view_btn.setText(_translate("MainWindow", "bbox view"))
self.model_view_btn.setText(_translate("MainWindow", "model view"))
self.label_42.setText(_translate("MainWindow", "frame nr:"))
self.label_48.setText(_translate("MainWindow", "deviation angle bound:"))
self.label_46.setText(_translate("MainWindow", "sphere radius:"))
self.tabs.setTabText(self.tabs.indexOf(self.pcl_view_tab), _translate("MainWindow", "Pointcloud view"))
self.label_52.setText(_translate("MainWindow", "movie nr:"))
self.label_54.setText(_translate("MainWindow", "alpha:"))
self.init_view_check.setText(_translate("MainWindow", "initial state"))
self.dest_view_check.setText(_translate("MainWindow", "destination contour"))
self.label_53.setText(_translate("MainWindow", "frame nr:"))
self.src_view_check.setText(_translate("MainWindow", "source contour"))
self.tabs.setTabText(self.tabs.indexOf(self.opt_tab), _translate("MainWindow", "Contour optimization"))
from BoundingBoxWidget import BBoxWidget
from ContourViewWidget import ContourViewWidget
from ImageSegmentWidget import ImageSegmentWidget
from ModelViewWidget import ModelViewWidget
from ScaleModelWidget import ScaleModelWidget
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
``` |
{
"source": "jmmelis/FlyMatch",
"score": 2
} |
#### File: jmmelis/FlyMatch/FlyMatch.py
```python
from __future__ import print_function
import sys
#import vtk
from PyQt5 import Qt
from PyQt5.QtCore import QObject, pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QAction, QTreeView, QFileSystemModel, QTableWidget, QTableWidgetItem, QVBoxLayout, QFileDialog
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import math
#import pickle
import os
import os.path
import math
import copy
import time
#from vtk.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from session_select import Ui_Session_Dialog
from fly_match_ui import Ui_MainWindow
class CheckableDirModel(QtGui.QDirModel):
def __init__(self, parent=None):
QtGui.QDirModel.__init__(self, None)
self.checks = {}
def data(self, index, role=QtCore.Qt.DisplayRole):
if role != QtCore.Qt.CheckStateRole:
return QtGui.QDirModel.data(self, index, role)
else:
if index.column() == 0:
return self.checkState(index)
def flags(self, index):
return QtGui.QDirModel.flags(self, index) | QtCore.Qt.ItemIsUserCheckable
def checkState(self, index):
if index in self.checks:
return self.checks[index]
else:
return QtCore.Qt.Unchecked
def setData(self, index, value, role):
if (role == QtCore.Qt.CheckStateRole and index.column() == 0):
self.checks[index] = value
self.emit(QtCore.SIGNAL("dataChanged(QModelIndex,QModelIndex)"), index, index)
return True
return QtGui.QDirModel.setData(self, index, value, role)
# QDialog clasess:
class SelectFolderWindow(QtGui.QDialog, Ui_Session_Dialog):
def __init__(self, directory, parent=None):
super(SelectFolderWindow,self).__init__(parent)
self.setupUi(self)
self.folder_name = None
self.folder_path = None
self.file_model = QFileSystemModel()
self.directory = directory
self.file_model.setRootPath(directory)
self.folder_tree.setModel(self.file_model)
self.folder_tree.setRootIndex(self.file_model.index(self.directory));
self.folder_tree.clicked.connect(self.set_session_folder)
def update_file_model(self,new_dir):
self.directory = new_dir
self.file_model.setRootPath(new_dir)
def set_session_folder(self, index):
indexItem = self.file_model.index(index.row(), 0, index.parent())
self.folder_name = self.file_model.fileName(indexItem)
self.folder_path = self.file_model.filePath(indexItem)
self.selected_session.setText(self.folder_path)
# Main GUI class:
class FlyMatch(QtWidgets.QMainWindow, Ui_MainWindow, QObject):
def __init__(self, parent=None):
super(FlyMatch,self).__init__(parent)
self.setupUi(self)
self.N_mov = 1
self.N_cam = 3
self.start_frame = 0
self.end_frame = 16375
self.session_path = ''
self.session_folder = ''
self.mov_folder = ''
self.output_path = ''
self.output_folder = ''
self.select_seq_window = SelectFolderWindow('/media/flyami/flyami_hdd_1')
self.select_seq_window.setWindowTitle("Select sequence folder")
self.select_mov_window = SelectFolderWindow('/media/flyami/flyami_hdd_1')
self.select_mov_window.setWindowTitle("Select movie folder")
self.select_movie()
self.select_frame()
def select_movie(self):
# Select session folder
self.seq_select_btn.clicked.connect(self.select_seq_callback)
# Select movie folder
self.mov_select_btn.clicked.connect(self.select_mov_callback)
def select_frame(self):
self.load_frames_btn.clicked.connect(self.load_frames)
def load_frames(self):
self.model_fit()
def select_seq_callback(self):
self.select_seq_window.exec_()
self.session_path = str(self.select_seq_window.folder_path)
print(self.session_path)
self.session_folder = str(self.select_seq_window.folder_name)
self.seq_disp.setText(self.session_folder)
self.select_mov_window.update_file_model(self.session_path)
#self.select_out_window.update_file_model(self.session_path)
def select_mov_callback(self):
self.select_mov_window.exec_()
self.mov_folder = str(self.select_mov_window.folder_name)
print(self.mov_folder)
self.mov_disp.setText(self.mov_folder)
def model_fit(self):
self.frame_viewer.set_session_folder(self.session_path)
self.frame_viewer.set_mov_folder(self.mov_folder)
# Set calibration folder:
self.cal_file = self.session_path + '/calibration/cam_calib.txt'
self.frame_viewer.set_calibration_loc(self.cal_file,0.040,0.5,175.0)
# Create manual tracking directory:
#self.frame_viewer.create_manual_track_dir()
# Load model:
#self.frame_viewer.load_model(1)
# Frame spin:
self.frame_spin.setMinimum(self.start_frame)
self.frame_spin.setMaximum(self.end_frame)
self.frame_spin.setValue(self.start_frame)
self.frame_viewer.add_frame(self.start_frame)
self.frame_spin.valueChanged.connect(self.frame_viewer.update_frame)
# Add state displays:
self.frame_viewer.set_display_state_L(self.state_L_table)
self.frame_viewer.set_display_state_R(self.state_R_table)
# Add graphs
self.frame_viewer.add_graphs()
self.frame_viewer.setMouseCallbacks()
# uv_correction:
self.frame_viewer.set_u_cam_1_spin(self.u_cam1_spin)
self.frame_viewer.set_v_cam_1_spin(self.v_cam1_spin)
self.frame_viewer.set_u_cam_2_spin(self.u_cam2_spin)
self.frame_viewer.set_v_cam_2_spin(self.v_cam2_spin)
self.frame_viewer.set_u_cam_3_spin(self.u_cam3_spin)
self.frame_viewer.set_v_cam_3_spin(self.v_cam3_spin)
self.save_uv_shift_btn.clicked.connect(self.frame_viewer.save_uv_shift)
#
self.frame_viewer.set_scale_L_spin(self.L_scale_spin)
self.frame_viewer.set_ry_L_spin(self.ry_L_spin)
self.frame_viewer.set_b1_L_spin(self.b1_L_spin)
self.reset_L_btn.clicked.connect(self.frame_viewer.reset_L)
#
self.frame_viewer.set_scale_R_spin(self.R_scale_spin)
self.frame_viewer.set_ry_R_spin(self.ry_R_spin)
self.frame_viewer.set_b1_R_spin(self.b1_R_spin)
self.reset_R_btn.clicked.connect(self.frame_viewer.reset_R)
# Save label:
self.save_lbl_btn.clicked.connect(self.frame_viewer.save_frame)
# -------------------------------------------------------------------------------------------------
def appMain():
app = QtWidgets.QApplication(sys.argv)
mainWindow = FlyMatch()
mainWindow.show()
app.exec_()
# -------------------------------------------------------------------------------------------------
if __name__ == '__main__':
appMain()
``` |
{
"source": "jmmelis/FlyTrackApp",
"score": 2
} |
#### File: FlyTrackApp/FlyTrackApp/flight_tracker_vis_class.py
```python
import vtk
import sys
import os
import time
import numpy as np
# flight tracker visualization class
class FlightTrackerVisualization:
def __init__(self):
# window parameters
self.window_name = "Model"
self.background = (0.1,0.2,0.4)
self.window_sz = (600, 600)
# stl model parameters
self.model_name = ""
self.stl_list = []
self.model_loc = ""
self.stl_src = []
self.stl_actors = []
# point parameters
self.pointcloud_list = []
# Create the Renderer, RenderWindow, and RenderWindowInteractor
self.ren = vtk.vtkRenderer()
self.ren_win = vtk.vtkRenderWindow()
self.ren_win.AddRenderer(self.ren)
self.iren = vtk.vtkRenderWindowInteractor()
self.iren.SetRenderWindow(self.ren_win)
# Set the background color and window size
self.ren_win.SetWindowName(self.window_name)
self.ren.SetBackground(*self.background)
self.ren_win.SetSize(*self.window_sz)
# Render
self.iren.Initialize()
self.ren.ResetCameraClippingRange()
self.ren_win.Render()
def load_model(self,model_name,model_loc,stl_list):
self.model_name = model_name
self.stl_list = stl_list
self.model_loc = model_loc + '/' + model_name
self.ren_win.SetWindowName(model_name)
os.chdir(self.model_loc)
for stl_file in stl_list:
sr = vtk.vtkSTLReader()
sr.SetFileName(stl_file)
self.stl_src.append(sr)
stl_mapper = vtk.vtkPolyDataMapper()
stl_mapper.ScalarVisibilityOff()
stl_mapper.SetInputConnection(sr.GetOutputPort())
stl_actor = vtk.vtkActor()
stl_actor.SetMapper(stl_mapper)
self.stl_actors.append(stl_actor)
stl_props = stl_actor.GetProperty()
stl_actor.SetPosition(0,0,0)
stl_props.SetInterpolationToGouraud()
stl_mapper.Update()
self.ren.AddActor(stl_actor)
self.ren_win.Render()
def set_state_model(self,state,parents,scale):
for i in range(state.shape[1]):
old_val = -1
j = 0
transformation = vtk.vtkTransform()
while parents[i,j] > old_val:
ind = parents[i,j]
elem_mat = vtk.vtkMatrix4x4()
elem_mat.SetElement(0,0,(2.0*state[0,ind]**2-1.0+2.0*state[1,ind]**2))
elem_mat.SetElement(0,1,(2.0*state[1,ind]*state[2,ind]+2.0*state[0,ind]*state[3,ind]))
elem_mat.SetElement(0,2,(2.0*state[1,ind]*state[3,ind]-2.0*state[0,ind]*state[2,ind]))
elem_mat.SetElement(1,0,(2.0*state[1,ind]*state[2,ind]-2.0*state[0,ind]*state[3,ind]))
elem_mat.SetElement(1,1,(2.0*state[0,ind]**2-1.0+2.0*state[2,ind]**2))
elem_mat.SetElement(1,2,(2.0*state[2,ind]*state[3,ind]+2.0*state[0,ind]*state[1,ind]))
elem_mat.SetElement(2,0,(2.0*state[1,ind]*state[3,ind]+2.0*state[0,ind]*state[2,ind]))
elem_mat.SetElement(2,1,(2.0*state[2,ind]*state[3,ind]-2.0*state[0,ind]*state[1,ind]))
elem_mat.SetElement(2,2,(2.0*state[0,ind]**2-1.0+2.0*state[3,ind]**2))
elem_mat.SetElement(0,3,state[4,ind]*scale[ind])
elem_mat.SetElement(1,3,state[5,ind]*scale[ind])
elem_mat.SetElement(2,3,state[6,ind]*scale[ind])
transformation.Concatenate(elem_mat)
old_val = parents[i,j]
j+=1
self.stl_actors[i].SetUserMatrix(transformation.GetMatrix())
self.ren_win.Render()
def add_pointcloud(self,pcl_in):
N = pcl_in.shape[1]
#points = vtk.vtkPointSource()
points = vtk.vtkPoints()
points.SetNumberOfPoints(N)
polydata = vtk.vtkPolyData()
for i in range(N):
points.InsertNextPoint(pcl_in[:,i])
#points.SetRadius(0.005)
polydata.SetPoints(points)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydata)
#mapper.SetInputData(points)
#mapper.SetInputConnection(points.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
self.ren.AddActor(actor)
#for i in range(N_points):
# #point = vtk.vtkSphereSource()
# #point.SetCenter(pcl_in[:,i])
# #point.SetRadius(0.005)
# point = vtk.vtkPointSource()
# point.SetCenter(pcl_in[:,i])
# point.SetNumberOfPoints(1);
# mapper = vtk.vtkPolyDataMapper()
# mapper.ScalarVisibilityOff()
# mapper.SetInputConnection(point.GetOutputPort())
# actor = vtk.vtkActor()
# actor.SetMapper(mapper)
# props = actor.GetProperty()
# self.ren.AddActor(actor)
def start_interaction_window(self):
self.ren_win.Render()
self.iren.Start()
def kill_interaction_window(self):
del self.ren_win, self.iren
def load_pointcloud(self,pointCloud,pcl_in):
for k in range(pcl_in.shape[1]):
point = pcl_in[:,k]
pointCloud.addPoint(point)
return pointCloud
def show_pointcloud(self,pcl_in):
pointCloud = self.VtkPointCloud(np.amax(pcl_in[3,:]))
pointCloud = self.load_pointcloud(pointCloud,pcl_in)
self.ren.AddActor(pointCloud.vtkActor)
def show_bboxes(self,corner_points):
N_boxes = corner_points.shape[1]
for i in range(N_boxes):
corner_mat = np.empty([8,3])
for j in range(8):
corner_mat[j,0] = corner_points[j*3,i]
corner_mat[j,1] = corner_points[j*3+1,i]
corner_mat[j,2] = corner_points[j*3+2,i]
box = self.BoundingBox()
box.addBox(corner_mat)
self.ren.AddActor(box.vtkActor)
class VtkPointCloud:
def __init__(self,scalar_range):
self.vtkPolyData = vtk.vtkPolyData()
self.clearPoints()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(self.vtkPolyData)
mapper.SetColorModeToDefault()
mapper.SetScalarRange(0.0,scalar_range)
mapper.SetScalarVisibility(1)
self.vtkActor = vtk.vtkActor()
self.vtkActor.SetMapper(mapper)
def addPoint(self,point):
pointID = self.vtkPoints.InsertNextPoint(point[0:3])
self.vtkDepth.InsertNextValue(point[3])
self.vtkCells.InsertNextCell(1)
self.vtkCells.InsertCellPoint(pointID)
self.vtkCells.Modified()
self.vtkPoints.Modified()
self.vtkDepth.Modified()
def clearPoints(self):
self.vtkPoints = vtk.vtkPoints()
self.vtkCells = vtk.vtkCellArray()
self.vtkDepth = vtk.vtkDoubleArray()
self.vtkDepth.SetName('DepthArray')
self.vtkPolyData.SetPoints(self.vtkPoints)
self.vtkPolyData.SetVerts(self.vtkCells)
self.vtkPolyData.GetPointData().SetScalars(self.vtkDepth)
self.vtkPolyData.GetPointData().SetActiveScalars('DepthArray')
class BoundingBox:
def __init__(self):
self.mapper = vtk.vtkPolyDataMapper()
self.vtkActor = vtk.vtkActor()
self.vtkActor.SetMapper(self.mapper)
def addBox(self,corner_points):
# Add a bounding box
points = vtk.vtkPoints()
points.SetNumberOfPoints(8)
points.SetPoint(0,corner_points[0,0],corner_points[0,1],corner_points[0,2])
points.SetPoint(1,corner_points[1,0],corner_points[1,1],corner_points[1,2])
points.SetPoint(2,corner_points[2,0],corner_points[2,1],corner_points[2,2])
points.SetPoint(3,corner_points[3,0],corner_points[3,1],corner_points[3,2])
points.SetPoint(4,corner_points[4,0],corner_points[4,1],corner_points[4,2])
points.SetPoint(5,corner_points[5,0],corner_points[5,1],corner_points[5,2])
points.SetPoint(6,corner_points[6,0],corner_points[6,1],corner_points[6,2])
points.SetPoint(7,corner_points[7,0],corner_points[7,1],corner_points[7,2])
lines = vtk.vtkCellArray()
lines.InsertNextCell(5)
lines.InsertCellPoint(0)
lines.InsertCellPoint(1)
lines.InsertCellPoint(2)
lines.InsertCellPoint(3)
lines.InsertCellPoint(0)
lines.InsertNextCell(5)
lines.InsertCellPoint(4)
lines.InsertCellPoint(5)
lines.InsertCellPoint(6)
lines.InsertCellPoint(7)
lines.InsertCellPoint(4)
lines.InsertNextCell(5)
lines.InsertCellPoint(0)
lines.InsertCellPoint(4)
lines.InsertCellPoint(7)
lines.InsertCellPoint(3)
lines.InsertCellPoint(0)
lines.InsertNextCell(5)
lines.InsertCellPoint(1)
lines.InsertCellPoint(5)
lines.InsertCellPoint(6)
lines.InsertCellPoint(2)
lines.InsertCellPoint(1)
lines.InsertNextCell(5)
lines.InsertCellPoint(0)
lines.InsertCellPoint(1)
lines.InsertCellPoint(5)
lines.InsertCellPoint(4)
lines.InsertCellPoint(0)
lines.InsertNextCell(5)
lines.InsertCellPoint(3)
lines.InsertCellPoint(2)
lines.InsertCellPoint(6)
lines.InsertCellPoint(7)
lines.InsertCellPoint(3)
polygon = vtk.vtkPolyData()
polygon.SetPoints(points)
polygon.SetLines(lines)
self.mapper.SetInputData(polygon)
self.mapper.Update()
``` |
{
"source": "jmmeneguel/sd-maskrcnn",
"score": 2
} |
#### File: sd_maskrcnn/envs/physics_engine.py
```python
import abc
import os
import time
import trimesh
import pybullet
import numpy as np
import shutil
import pkg_resources
from autolab_core import RigidTransform, Logger
from pyrender import Scene, Viewer, Mesh, Node, PerspectiveCamera
from .constants import GRAVITY_ACCEL
class PhysicsEngine(metaclass=abc.ABCMeta):
""" Abstract Physics Engine class """
def __init__(self):
# set up logger
self._logger = Logger.get_logger(self.__class__.__name__)
@abc.abstractmethod
def reset(self):
pass
@abc.abstractmethod
def step(self):
pass
@abc.abstractmethod
def start(self):
pass
@abc.abstractmethod
def stop(self):
pass
class PybulletPhysicsEngine(PhysicsEngine):
""" Wrapper for pybullet physics engine that is tied to a single ID """
def __init__(self, urdf_cache_dir, debug=False):
PhysicsEngine.__init__(self)
self._physics_client = None
self._debug = debug
self._urdf_cache_dir = urdf_cache_dir
if not os.path.isabs(self._urdf_cache_dir):
self._urdf_cache_dir = os.path.join(os.getcwd(), self._urdf_cache_dir)
if not os.path.exists(os.path.join(self._urdf_cache_dir, 'plane')):
os.makedirs(os.path.join(self._urdf_cache_dir, 'plane'))
shutil.copy(pkg_resources.resource_filename('sd_maskrcnn', 'data/plane/plane.urdf'),
os.path.join(self._urdf_cache_dir, 'plane', 'plane.urdf'))
shutil.copy(pkg_resources.resource_filename('sd_maskrcnn', 'data/plane/plane_convex_piece_0.obj'),
os.path.join(self._urdf_cache_dir, 'plane', 'plane_convex_piece_0.obj'))
def add(self, obj, static=False):
# create URDF
urdf_filename = os.path.join(self._urdf_cache_dir, obj.key, '{}.urdf'.format(obj.key))
urdf_dir = os.path.dirname(urdf_filename)
if not os.path.exists(urdf_filename):
try:
os.makedirs(urdf_dir)
except:
self._logger.warning('Failed to create dir %s. The object may have been created simultaneously by another process' %(urdf_dir))
self._logger.info('Exporting URDF for object %s' %(obj.key))
# Fix center of mass (for rendering) and density and export
geometry = obj.mesh.copy()
geometry.apply_translation(-obj.mesh.center_mass)
trimesh.exchange.export.export_urdf(geometry, urdf_dir)
com = obj.mesh.center_mass
pose = self._convert_pose(obj.pose, com)
obj_t = pose.translation
obj_q_wxyz = pose.quaternion
obj_q_xyzw = np.roll(obj_q_wxyz, -1)
try:
obj_id = pybullet.loadURDF(urdf_filename,
obj_t,
obj_q_xyzw,
useFixedBase=static,
physicsClientId=self._physics_client)
except:
raise Exception('Failed to load %s' %(urdf_filename))
if self._debug:
self._add_to_scene(obj)
self._key_to_id[obj.key] = obj_id
self._key_to_com[obj.key] = com
def get_velocity(self, key):
obj_id = self._key_to_id[key]
return pybullet.getBaseVelocity(obj_id, physicsClientId=self._physics_client)
def get_pose(self, key):
obj_id = self._key_to_id[key]
obj_t, obj_q_xyzw = pybullet.getBasePositionAndOrientation(obj_id, physicsClientId=self._physics_client)
obj_q_wxyz = np.roll(obj_q_xyzw, 1)
pose = RigidTransform(rotation=obj_q_wxyz,
translation=obj_t,
from_frame='obj',
to_frame='world')
pose = self._deconvert_pose(pose, self._key_to_com[key])
return pose
def remove(self, key):
obj_id = self._key_to_id[key]
pybullet.removeBody(obj_id, physicsClientId=self._physics_client)
self._key_to_id.pop(key)
self._key_to_com.pop(key)
if self._debug:
self._remove_from_scene(key)
def step(self):
pybullet.stepSimulation(physicsClientId=self._physics_client)
if self._debug:
time.sleep(0.04)
self._update_scene()
def reset(self):
if self._physics_client is not None:
self.stop()
self.start()
def start(self):
if self._physics_client is None:
self._physics_client = pybullet.connect(pybullet.DIRECT)
pybullet.setGravity(0, 0, -GRAVITY_ACCEL, physicsClientId=self._physics_client)
self._key_to_id = {}
self._key_to_com = {}
if self._debug:
self._create_scene()
self._viewer = Viewer(self._scene, use_raymond_lighting=True, run_in_thread=True)
def stop(self):
if self._physics_client is not None:
pybullet.disconnect(self._physics_client)
self._physics_client = None
if self._debug:
self._scene = None
self._viewer.close_external()
while self._viewer.is_active:
pass
def __del__(self):
self.stop()
del self
def _convert_pose(self, pose, com):
new_pose = pose.copy()
new_pose.translation = pose.rotation.dot(com) + pose.translation
return new_pose
def _deconvert_pose(self, pose, com):
new_pose = pose.copy()
new_pose.translation = pose.rotation.dot(-com) + pose.translation
return new_pose
def _create_scene(self):
self._scene = Scene()
camera = PerspectiveCamera(yfov=0.833, znear=0.05,
zfar=3.0, aspectRatio=1.0)
cn = Node()
cn.camera = camera
pose_m = np.array([[0.0,1.0,0.0,0.0],
[1.0,0.0,0.0,0.0],
[0.0,0.0,-1.0,0.88],
[0.0,0.0,0.0,1.0]])
pose_m[:,1:3] *= -1.0
cn.matrix = pose_m
self._scene.add_node(cn)
self._scene.main_camera_node = cn
def _add_to_scene(self, obj):
self._viewer.render_lock.acquire()
n = Node(mesh=Mesh.from_trimesh(obj.mesh), matrix=obj.pose.matrix, name=obj.key)
self._scene.add_node(n)
self._viewer.render_lock.release()
def _remove_from_scene(self, key):
self._viewer.render_lock.acquire()
if self._scene.get_nodes(name=key):
self._scene.remove_node(next(iter(self._scene.get_nodes(name=key))))
self._viewer.render_lock.release()
def _update_scene(self):
self._viewer.render_lock.acquire()
for key in self._key_to_id.keys():
obj_pose = self.get_pose(key).matrix
if self._scene.get_nodes(name=key):
next(iter(self._scene.get_nodes(name=key))).matrix = obj_pose
self._viewer.render_lock.release()
``` |
{
"source": "jmmerrell/jmmerrell.github.io",
"score": 3
} |
#### File: jmmerrell.github.io/movie_random_forest/movie_project.py
```python
from __future__ import print_function
import pandas as pd
import random
import math
from scipy.linalg import toeplitz
import statsmodels.api as sm
from statsmodels.formula.api import ols
from datetime import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.iolib.table import (SimpleTable, default_txt_fmt)
df= pd.read_csv('C:\\Users\\merre\\Desktop\\jmmerrell.github.io\\movie_random_forest\\movie_data.txt', sep="|", header=0)
df.columns = ["date","movie","studio","genre","basedon","actionanim","factfict","budget","thrcount","threngag","fisrtweeknd","domgross","infdomgross","intgross","totgross","direct","compose","act1","act2","act3","act4","act5","act6","act7","act8","act9","act10","act11","act12","act13","act14","act15","act16","act17","act18","act19","act20","rating","franch"]
df['date'] = pd.to_datetime(df['date'])
cols = df.columns[list(range(7,15))]
df[cols] = df[cols].apply(pd.to_numeric, errors='coerce', axis=1)
###order data by date
df = df.sort_values(by=['date'], ascending=True)
###Drop all movies with no inflation adjusted gross and without actors
df= df.loc[(df['infdomgross'] > 0) & (df['act1'].isnull() == False)]
###Create inflation adjusted budget
df['infbudget'] = df['infdomgross']/df['domgross']*df['budget']
####Create a new dataframe with only new movies
df2 = df.loc[df['date']>='2000-01-01']
print(df2)
# df['Date'] = pd.to_datetime(df['Date'])
# df['month'] = df['Date'].dt.month
# df['winter'] = np.where(df['month'].isin([12,1,2,3]) , 1, 0)
# df['summer']= np.where(df['month'].isin([7,8,9,10]), 1, 0)
# plt.plot(df.Date, df.PowerBill)
# plt.show()
# temps = np.array([32,31,36,44.5,52,61,69.5,77.5,76,66.5,53.5,41.5])
# temps = abs(temps-65)
# temps = [temps]*4
# temps = np.concatenate(temps)
# temps = temps.tolist()
# temps2 = temps[:3]
# temps = temps+temps2
# df['temp'] = temps
# df['solar_winter'] = np.where((df['Solar']=='Y')&(df['winter']==1) , 1, 0)
# df['solar_summer'] = np.where((df['Solar']=='Y')&(df['summer']==1) , 1, 0)
# df['summer_temp'] = df['temp']*df['summer']
# df['winter_temp'] = df['temp']*df['winter']
# nsims=100
# out = [0.0]*(nsims*53)
# out = np.reshape(out,(nsims,53))
#
# for i in range(0,nsims):
# rowz = np.random.choice(df.shape[0], 5, replace=False)
# train = df.ix[set(range(1, df.shape[0])).difference(rowz)]
# test = df.ix[rowz]
# ols_resid = sm.OLS.from_formula('PowerBill ~ C(Solar) + C(solar_winter) + C(solar_summer) + summer_temp + winter_temp', data=df).fit().resid
# resid_fit = sm.OLS(endog=list(ols_resid[1:]), exog=sm.add_constant(ols_resid[:-1])).fit()
# rho = resid_fit.params[1]
# toeplitz(range(5))
# order = toeplitz(range(train.shape[0]))
# sigma = rho**order
# gls_model = sm.GLS.from_formula('PowerBill ~ C(Solar) + C(solar_winter) + C(solar_summer) + summer_temp + winter_temp', data=train, sigma=sigma)
# gls_results = gls_model.fit()
# preds=gls_results.predict(test)
# out[i][0]=np.mean(test['PowerBill']-preds)
# out[i][1]=math.sqrt(np.mean((test['PowerBill']-preds)**2))
# out[i][(rowz+1)]=preds
#
# def column(matrix, i):
# return [row[i] for row in matrix]
# print(np.mean(column(out,0)))
# print(np.mean(column(out,1)))
``` |
{
"source": "jmmey/arrismodemmon",
"score": 3
} |
#### File: jmmey/arrismodemmon/main.py
```python
import os
import re
import time
import json
import requests
from bs4 import BeautifulSoup as bs
from influxdb import InfluxDBClient
MEASURE_RE = r'\W*\d*\.\d*'
TRUST_SSL = True
"""
Influx DB Account
Set environment variables for "INFLUX_USER" and "INFLUX_PASS",
but if you do not want to set them add your account information
in the variables below.
"""
DB_USERNAME = ''
DB_PASSWORD = ''
DB_SERVER_NAME = ''
DB_INFLUX_NAME = ''
"""
If environment variables exist use those
if not use the above variable settings.
"""
DB_USER = os.getenv("INFLUX_USER") if os.getenv("INFLUX_USER") else DB_USERNAME
DB_PASS = <PASSWORD>("<PASSWORD>") if os.getenv("INFLUX_PASS") else DB_PASSWORD
DB_HOST = os.getenv("INFLUX_HOST") if os.getenv("INFLUX_HOST") else DB_SERVER_NAME
DB_NAME = os.getenv("INFLUX_DB_NAME") if os.getenv("INFLUX_DB_NAME") else DB_INFLUX_NAME
def modem_url_request(url='http://192.168.100.1'):
"""
Makes http request to Arris modem
web page. Returns page content
"""
try:
r = requests.get(url).content
except:
r = 'failed'
if r == 'failed':
return 'failed'
else:
return r
def parse_html(content):
soup = bs(content, 'html.parser')
return soup
def modem_status_table(table):
status_table = table.find_all('table', class_='simpleTable')
return status_table
def modem_ds_table_rows(data):
ds = data[1]
ds = ds.find_all('tr')[2:]
return ds
def modem_us_table_rows(data):
us = data[-1].find_all('tr')[2:]
return us
def strip_table_row_tags(data):
channel_data = []
for i in data:
row = [td for td in i.stripped_strings]
channel_data.append(row)
return channel_data
def prep_influx_json(ds, us):
modem_data = []
DATA_TIME = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
# Downstream Data
for row in ds:
channel = row[0]
power = re.match(MEASURE_RE, row[5]).group(0)
snr = re.match(MEASURE_RE, row[6]).group(0)
ds_data_power = {
'measurement': 'modem_rf_stats',
'tags': {'direction': 'downstream', 'channel': channel, 'measure': 'power'},
'time': DATA_TIME,
'fields': {'power': power}
}
ds_data_snr = {
'measurement': 'modem_rf_stats',
'tags': {'direction': 'downstream', 'channel': channel, 'measure': 'snr'},
'time': DATA_TIME,
'fields': {'snr': snr}
}
modem_data.append(ds_data_power)
modem_data.append(ds_data_snr)
# Upstream Data
for row in us:
channel = row[0]
power = re.match(MEASURE_RE, row[-1]).group(0)
us_data = {
'measurement': 'modem_rf_stats',
'tags': {'direction': 'upstream', 'channel': channel, 'measure': 'power'},
'time': DATA_TIME,
'fields': {'power': power}
}
modem_data.append(us_data)
json_body = json.dumps(modem_data)
return json_body
def write_influxdb_data(data):
client = InfluxDBClient(
host=DB_HOST,
port=8086,
username=DB_USER,
password=<PASSWORD>,
ssl=True,
verify_ssl=TRUST_SSL
)
db_write = client.write_points(
data,
time_precision=None,
database=DB_NAME,
protocol='json'
)
if db_write == True:
return True
else:
return "Error"
def main():
"""
main program
"""
req = modem_url_request()
if req == 'failed':
pass
else:
html = parse_html(req)
data_tables = modem_status_table(html)
ds_rows = modem_ds_table_rows(data_tables)
us_rows = modem_us_table_rows(data_tables)
ds_rows_clean = strip_table_row_tags(ds_rows)
us_rows_clean = strip_table_row_tags(us_rows)
json_body = prep_influx_json(ds_rows_clean, us_rows_clean)
json_body = json.loads(json_body)
write_influxdb_data(json_body)
if __name__ == '__main__':
while True:
main()
time.sleep(300)
``` |
{
"source": "jmmiddour/DSPT7-Twitoff",
"score": 3
} |
#### File: jmmiddour/DSPT7-Twitoff/hello.py
```python
from flask import Flask
app = Flask(__name__)
@app.route('/') # The website for the app.
def hello_world():
return 'Hello, World!'
@app.route('/new_page') # The website for the app.
def new_page():
return 'This is another page'
# If you add this, you can run the flask app as:
# `python hello.py` in the terminal
# This only works well if you have a one file app.
# Not useful for multi file being ran as a package.
if __name__ == '__main__':
app.run(debug=True)
# If debug=True it will restart the Flask app automatically
# when you save the .py file.
```
#### File: DSPT7-Twitoff/twitoff/app.py
```python
from os import getenv
from flask import Flask, render_template, request
from .db_model import DB, User
from .twitter import add_user_tweepy, add_user_history, update_all_users
from .predict import predict_user
def create_app():
'''
Create and configure an instance of my Flask application
'''
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = getenv('DATABASE_URL')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB.init_app(app) # Connect FLask app to the SQLAlchemy database
@app.route('/') # The home page for the app.
def root(): # This is the defining the root directory/home page of the app.
return render_template('base.html', title='Welcome to TwitOff', users=User.query.all())
@app.route('/user', methods=['POST']) # Adds data from a form to Database
@app.route('/user/<name>', methods=['GET']) # Displays data from Database
def add_or_update_user(name=None, message=''):
name = name or request.values['user_name']
try:
if request.method == 'POST':
add_user_tweepy(name)
message = f'User {name} has successfully been added to the Database!'
# This will pull the tweet atributes for the user just specified:
tweets = User.query.filter(User.username == name).one().tweet
except Exception as e:
print(f'Error adding {name}: ERROR = {e}')
tweets = []
return render_template('user.html', title=f'User: {name}', tweets=tweets,
message=message)
@app.route('/compare', methods=['POST'])
def compare(message=''):
user1 = request.values['user1']
user2 = request.values['user2']
tweet_text = request.values['tweet_text']
if user1 == user2:
message = "Can not compare a user to themselves."
else:
prediction = predict_user(user1, user2, tweet_text)
message = f"""
@{user1 if prediction else user2}
is more likey to tweet this than
@{user2 if prediction else user1}.
"""
return render_template('predict.html', title='Your Prediction',
users=User.query.all(), **locals())
@app.route('/reset_my_DB')
def reset():
DB.drop_all()
DB.create_all()
return render_template('base.html', title='Reset Database',
message=f'The database has been reset.',
users=User.query.all())
@app.route('/update')
def update():
update_all_users()
return render_template('base.html', title='Updated Tweets',
message='All Tweets have been Updated!',
users=User.query.all())
return app
## The following code was from module 1 and is no longer needed:
# @app.route('/add_user/<username>/<followers>')
# # ^-- Will allow to enter information to the route and commit it to the DB.
# def add_user(username, followers):
# user = User(username=username, followers=followers)
# DB.session.add(user) # Adds data to "staging area" to be saved to DB.
# DB.session.commit() # Saves new data to the database.
# return f'{username} has been added to the Database!'
# @app.route('/add_tweet/<user_id>/<tweet>')
# def tweet(user_id, tweet):
# new_tweet = Tweet(user_id=user_id, tweet=tweet)
# DB.session.add(new_tweet)
# DB.session.commit()
# return f'{tweet} has been added to the DB!'
# return app
# The code to run this app:
# export FLASK_APP=twitoff:APP
# ^-- twitoff=directory, APP=defined function in the __init__.py file
# flask run
``` |
{
"source": "jmmiddour/DS-Unit-3-Sprint-2-SQL-and-Databases",
"score": 4
} |
#### File: DS-Unit-3-Sprint-2-SQL-and-Databases/module1-introduction-to-sql/rpg_queries.py
```python
import pandas as pd
import sqlite3
# Create a connection to the Database:
conn = sqlite3.connect('rpg_db.sqlite3')
# Look at the connection to verify it is connected:
conn
# Create a function to get data from a SQLite Data Base:
def get_data(query, conn):
'''
This function will retrive the data from a SQLite data base and
turn it into a pandas DataFrame for easier readablility and to
make it easier to work with.
'''
# Instanstiate the cursor object and get the results:
cursor = conn.cursor()
results = cursor.execute(query).fetchall()
# Get the columns from the cursor object:
cols = list(map(lambda x: x[0], cursor.description))
# Assign it to a pandas DataFrame:
df = pd.DataFrame(data=results, columns=cols)
return df
# Q1: How many total Characters are there?
q1 = get_data('''SELECT
COUNT(character_id) as Total_Observations,
COUNT(DISTINCT character_id) as Total_Number_of_Charaters
FROM charactercreator_character''', conn)
print('--> How many total Characters are there?\n', q1, '\n')
# Q2: How many of each specific subclass?
q2 = get_data('''SELECT
COUNT(DISTINCT charact.character_id) AS Total_Characters,
COUNT(DISTINCT cleric.character_ptr_id) AS Cleric_Type,
COUNT(DISTINCT fighter.character_ptr_id) AS Fighter_Type,
COUNT(DISTINCT mage.character_ptr_id) AS Mage_Type,
COUNT(DISTINCT thief.character_ptr_id) AS Thief_Type
FROM charactercreator_character AS charact
LEFT JOIN charactercreator_cleric AS cleric
ON cleric.character_ptr_id = charact.character_id
LEFT JOIN charactercreator_fighter AS fighter
ON fighter.character_ptr_id = charact.character_id
LEFT JOIN charactercreator_mage AS mage
ON mage.character_ptr_id = charact.character_id
LEFT JOIN charactercreator_thief AS thief
ON thief.character_ptr_id = charact.character_id
''', conn)
print('--> How many of each specific subclass?\n', q2, '\n')
# Q3: How many total Items?
q3 = get_data('''SELECT
COUNT(item_id) as Total_Observations,
COUNT(DISTINCT item_id) as Total_Number_of_Items
FROM armory_item''', conn)
print('--> How many total Items?\n', q3, '\n')
# Q4: How many of the Items are weapons? How many are not?
q4 = get_data('''SELECT
COUNT(arm.item_id) AS Total_Number_of_Items,
COUNT(DISTINCT wep.item_ptr_id) AS Total_Weapons,
(COUNT(arm.item_id) - COUNT(DISTINCT wep.item_ptr_id))
AS Total_Non_Weapons
FROM armory_item AS arm
LEFT JOIN armory_weapon AS wep
ON arm.item_id = wep.item_ptr_id''', conn)
print('--> How many of the Items are weapons? How many are not?\n', q4, '\n')
# Q5: How many Items does each character have? (Return first 20 rows)
q5 = get_data('''SELECT
ch.character_id AS Character_ID,
ch.name AS Character_Name,
COUNT(DISTINCT inv.item_id) AS Total_Items
FROM charactercreator_character AS ch
LEFT JOIN charactercreator_character_inventory AS inv
ON inv.character_id = ch.character_id
GROUP BY Character_Name
LIMIT 20''', conn)
print('--> How many Items does each\
character have? (Return first 20 rows)\n', q5, '\n')
# Q6: How many Weapons does each character have? (Return first 20 rows)
q6 = get_data('''SELECT
ch.character_id AS Character_ID,
ch.name AS Character_Name,
COUNT(DISTINCT inv.item_id) AS Total_Items,
COUNT(DISTINCT wep.item_ptr_id) AS Total_Weapons
FROM charactercreator_character AS ch
LEFT JOIN charactercreator_character_inventory AS inv
ON inv.character_id = ch.character_id
LEFT JOIN armory_item AS arm ON arm.item_id = inv.character_id
LEFT JOIN armory_weapon AS wep ON wep.item_ptr_id = arm.item_id
GROUP BY Character_Name
LIMIT 20''', conn)
print('--> How many Weapons does each\
character have? (Return first 20 rows)\n', q6, '\n')
# Q7: On average, how many Items does each Character have?
q7 = get_data('''
SELECT AVG(Total_Items) AS Average_Items_Per_Character
FROM (SELECT
ch.character_id AS Character_ID,
ch.name AS Character_Name,
COUNT(DISTINCT inv.item_id) AS Total_Items
FROM charactercreator_character AS ch
LEFT JOIN charactercreator_character_inventory AS inv
ON inv.character_id = ch.character_id
GROUP BY Character_Name);''', conn)
print('--> On average, how many Items does each\
Character have?\n', q7, '\n')
# Q8: On average, how many Weapons does each character have?
q8 = get_data('''
SELECT AVG(Total_Weapons) AS Avgerage_Weapons_Per_Character
FROM (SELECT
ch.character_id AS Character_ID,
ch.name AS Character_Name,
COUNT(DISTINCT inv.item_id) AS Total_Items,
COUNT(DISTINCT wep.item_ptr_id) AS Total_Weapons
FROM charactercreator_character AS ch
LEFT JOIN charactercreator_character_inventory AS inv
ON inv.character_id = ch.character_id
LEFT JOIN armory_item AS arm ON arm.item_id = inv.character_id
LEFT JOIN armory_weapon AS wep ON wep.item_ptr_id = arm.item_id
GROUP BY Character_Name);''', conn)
print('--> On average, how many Weapons does each\
character have?\n', q8)
``` |
{
"source": "jmmiddour/Lambdata-jmmiddour",
"score": 4
} |
#### File: Lambdata-jmmiddour/lambdata_jmmiddour/dspt7_utilities.py
```python
def get_business_info(business, city, state):
'''
This function will scrape the yellowpages website and
will return a list of the information, such as name, phone number,
street address, city, state, and zip code for those businesses.
Parameters:
-----------
business : type or name of business
city : name of the city where the business is located
state : the 2 character abbrivation for the state in which the
business is located.
Returns:
--------
DataFrame with information scraped from the yellowpages website,
based on the parameters entered into the function.
'''
# Import libraries needed:
import pandas as pd
import requests
from bs4 import BeautifulSoup
# Set the url to pull the data from:
url = f'https://www.yellowpages.com/search?search_terms={business}&geo_location_terms={city}%2C+{state}&s=distance'
# Create a get request:
response = requests.get(url)
# Check the status code to verify it is 200. This lets you know if there is
# an error reaching the website based on the code:
if response.status_code == 200:
# Use beautiful soup to parse everything:
soup = BeautifulSoup(response.content, 'html.parser')
# Get the data from the location within the webpage:
information = soup.find_all('div', class_="info")
data = {'Name': [], 'Phone_No': [], 'Street': [], 'City_State_Zip': []}
for info in information:
# Get all the attribrutes we need:
name = info.find('a', class_="business-name").span
name = name.text if name else None
phone = info.find('div', class_='phones phone primary')
phone = phone.text if phone else None
street = info.find('div', class_='street-address')
street = street.text if street else None
area = info.find('div', class_='locality')
area = area.text if area else None
# Store the values in a data object:
data['Name'].append(name)
data['Phone_No'].append(phone)
data['Street'].append(street)
data['City_State_Zip'].append(area)
else:
print('There is an error, the website can not be reached.')
# Turn data collected into a pandas dataframe:
business_info = pd.DataFrame(data, columns=['Name', 'Phone_No', 'Street',
'City_State_Zip'])
return business_info
# Function #2:
def address_split(df, col1, col2):
'''
This function will take an address column with the number address and
street name and split it into two seperate columns. It will
also split a column with city, state, and zip code into 3 seperate columns.
Parameters:
-----------
df : The name of your DataFrame
col1 : The column with the address you want to split.
col2 : The column with the city, state, and zip code.
Returns:
--------
DataFrame with 3 more columns. There will be 5 columns total with address
values.
Once the columns are split,
this function will also remove the original columns.
'''
# Split the address column into 2 seperate columns:
df[['Address_No', 'Street_Name']] = df[col1].str.split(n=1, expand=True)
# Split the city, state, and zip into 3 seperate columns:
df[['City', 'State', 'Zip_Code']] = df[col2].str.rsplit(n=2, expand=True)
# Remove , from city column:
df['City'] = df['City'].str.replace(',', '')
# Remove the orginal columns to create a clean dataframe:
df.drop(columns=[col1, col2], inplace=True)
return df
if __name__ == "__main__":
df = get_business_info('fast food', 'jacksonville', 'fl')
print(f'DataFrame after running function 1:\n', df.head(), '\n')
df2 = address_split(df, 'Street', 'City_State_Zip')
print(f'DataFrame after running function 2:\n', df2.head())
``` |
{
"source": "jmmille/Cardinal",
"score": 2
} |
#### File: bin/scout-cli/scout_ssid.py
```python
import scout_auth
import scout_env
import time
# SCOUT SSID COMMAND FUNCTIONS
def scoutCreateSsid24(ip, username, password, ssid, wpa2Pass, vlan, bridgeGroup, radioSub, gigaSub):
"""Function that deploys a 2.4GHz SSID to an AP
using user provided arguments.
"""
scoutSsh = scout_auth.sshInfo(ip=ip, username=username, password=password)
jinjaEnv = scout_env.scoutJinjaEnv()
commandDebug = scout_env.scoutEnv()
ssid = ssid
wpa2Pass = wpa2Pass
vlan = vlan
bridgeGroup = bridgeGroup
radioSub = radioSub
gigaSub = gigaSub
cmdTemplate = jinjaEnv.get_template("scout_create_ssid_24")
cmds = cmdTemplate.render(password=password,ssid=ssid,wpa2Pass=wpa2Pass,vlan=vlan,bridgeGroup=bridgeGroup,radioSub=radioSub,gigaSub=gigaSub)
scoutCommands = cmds.splitlines()
channel = scoutSsh.invoke_shell()
print("INFO: Deploying 2.4GHz SSID {0} to {1}...".format(ssid,ip))
for command in scoutCommands:
channel.send('{}\n'.format(command))
if commandDebug == "on":
commands = channel.recv(65535)
print(commands)
time.sleep(.10)
scoutSsh.close()
def scoutCreateSsid5(ip, username, password, ssid, wpa2Pass, vlan, bridgeGroup, radioSub, gigaSub):
"""Function that deploys a 5GHz SSID to an AP
using user provided arguments.
"""
scoutSsh = scout_auth.sshInfo(ip=ip, username=username, password=password)
jinjaEnv = scout_env.scoutJinjaEnv()
commandDebug = scout_env.scoutEnv()
ssid = ssid
wpa2Pass = wpa2Pass
vlan = vlan
bridgeGroup = bridgeGroup
radioSub = radioSub
gigaSub = gigaSub
cmdTemplate = jinjaEnv.get_template("scout_create_ssid_5")
cmds = cmdTemplate.render(password=password,ssid=ssid,wpa2Pass=wpa2Pass,vlan=vlan,bridgeGroup=bridgeGroup,radioSub=radioSub,gigaSub=gigaSub)
scoutCommands = cmds.splitlines()
channel = scoutSsh.invoke_shell()
print("INFO: Deploying 5GHz SSID {0} to {1}...".format(ssid,ip))
for command in scoutCommands:
channel.send('{}\n'.format(command))
if commandDebug == "on":
commands = channel.recv(65535)
print(commands)
time.sleep(.10)
scoutSsh.close()
def scoutCreateSsid24Radius(ip, username, password, ssid, vlan, bridgeGroup, radioSub, gigaSub, radiusIp, sharedSecret, authPort, acctPort, radiusTimeout, radiusGroup, methodList):
"""Function that deploys a 2.4GHz 802.1x SSID to an
AP using user provided arguments.
"""
scoutSsh = scout_auth.sshInfo(ip=ip, username=username, password=password)
jinjaEnv = scout_env.scoutJinjaEnv()
commandDebug = scout_env.scoutEnv()
ssid = ssid
vlan = vlan
bridgeGroup = bridgeGroup
radioSub = radioSub
gigaSub = gigaSub
radiusIp = radiusIp
sharedSecret = sharedSecret
authPort = authPort
acctPort = acctPort
radiusTimeout = radiusTimeout
radiusGroup = radiusGroup
methodList = methodList
cmdTemplate = jinjaEnv.get_template("scout_create_radius_ssid_24")
cmds = cmdTemplate.render(password=password,ssid=ssid,vlan=vlan,bridgeGroup=bridgeGroup,radioSub=radioSub,gigaSub=gigaSub,radiusIp=radiusIp,sharedSecret=sharedSecret,authPort=authPort,acctPort=acctPort,radiusTimeout=radiusTimeout,radiusGroup=radiusGroup,methodList=methodList)
scoutCommands = cmds.splitlines()
channel = scoutSsh.invoke_shell()
print("INFO: Deploying 2.4GHz RADIUS SSID {0} to {1}...".format(ssid,ip))
for command in scoutCommands:
channel.send('{}\n'.format(command))
if commandDebug == "on":
commands = channel.recv(65535)
print(commands)
time.sleep(.10)
scoutSsh.close()
def scoutCreateSsid5Radius(ip, username, password, ssid, vlan, bridgeGroup, radioSub, gigaSub, radiusIp, sharedSecret, authPort, acctPort, radiusTimeout, radiusGroup, methodList):
"""Function that deploys a 5GHz 802.1x SSID to an
AP using user provided arguments.
"""
scoutSsh = scout_auth.sshInfo(ip=ip,username=username,password=password)
jinjaEnv = scout_env.scoutJinjaEnv()
commandDebug = scout_env.scoutEnv()
ssid = ssid
vlan = vlan
bridgeGroup = bridgeGroup
radioSub = radioSub
gigaSub = gigaSub
radiusIp = radiusIp
sharedSecret = sharedSecret
authPort = authPort
acctPort = acctPort
radiusTimeout = radiusTimeout
radiusGroup = radiusGroup
methodList = methodList
cmdTemplate = jinjaEnv.get_template("scout_create_radius_ssid_5")
cmds = cmdTemplate.render(password=password,ssid=ssid,vlan=vlan,bridgeGroup=bridgeGroup,radioSub=radioSub,gigaSub=gigaSub,radiusIp=radiusIp,sharedSecret=sharedSecret,authPort=authPort,acctPort=acctPort,radiusTimeout=radiusTimeout,radiusGroup=radiusGroup,methodList=methodList)
scoutCommands = cmds.splitlines()
channel = scoutSsh.invoke_shell()
print("INFO: Deploying 5GHz RADIUS SSID {0} to {1}...".format(ssid,ip))
for command in scoutCommands:
channel.send('{}\n'.format(command))
if commandDebug == "on":
commands = channel.recv(65535)
print(commands)
time.sleep(.10)
scoutSsh.close()
def scoutDeleteSsid24(ip, username, password, ssid, vlan, radioSub, gigaSub):
"""Function that deletes an existing 2.4GHz SSID from
an AP.
"""
scoutSsh = scout_auth.sshInfo(ip=ip, username=username, password=password)
jinjaEnv = scout_env.scoutJinjaEnv()
commandDebug = scout_env.scoutEnv()
ssid = ssid
vlan = vlan
radioSub = radioSub
gigaSub = gigaSub
cmdTemplate = jinjaEnv.get_template("scout_delete_ssid_24")
cmds = cmdTemplate.render(password=password,ssid=ssid,vlan=vlan,radioSub=radioSub,gigaSub=gigaSub)
scoutCommands = cmds.splitlines()
channel = scoutSsh.invoke_shell()
print("INFO: Removing 2.4GHz SSID {0} from {1}...".format(ssid,ip))
for command in scoutCommands:
channel.send('{}\n'.format(command))
if commandDebug == "on":
commands = channel.recv(65535)
print(commands)
time.sleep(.10)
scoutSsh.close()
def scoutDeleteSsid5(ip, username, password, ssid, vlan, radioSub, gigaSub):
"""Function that deletes an existing 5GHz SSID from an
AP.
"""
scoutSsh = scout_auth.sshInfo(ip=ip, username=username, password=password)
jinjaEnv = scout_env.scoutJinjaEnv()
commandDebug = scout_env.scoutEnv()
ssid = ssid
vlan = vlan
radioSub = radioSub
gigaSub = gigaSub
cmdTemplate = jinjaEnv.get_template("scout_delete_ssid_5")
cmds = cmdTemplate.render(password=password,ssid=ssid,vlan=vlan,radioSub=radioSub,gigaSub=gigaSub)
scoutCommands = cmds.splitlines()
channel = scoutSsh.invoke_shell()
print("INFO: Removing 5GHz SSID {0} from {1}...".format(ssid,ip))
for command in scoutCommands:
channel.send('{}\n'.format(command))
if commandDebug == "on":
commands = channel.recv(65535)
print(commands)
time.sleep(.10)
scoutSsh.close()
``` |
{
"source": "jmmille/pork-chop",
"score": 3
} |
#### File: pork-chop/modules/stonks.py
```python
import requests
import re
import json
#from bs4 import BeautifulSoup
def stonks_handler(message):
'''Pork Chop gives you stock info by scraping yahoo finance'''
url = 'https://finance.yahoo.com/quote/'
symbol = message.split()[1]
if symbol:
url += symbol
else:
return '!stonks <symbol>'
response = requests.get(url)
if url != response.url:
return 'Could not get ' + symbol + ' price'
stock_html = response.text
price = re.search(r'regularMarketPrice.*?({.*?})', stock_html)
change = re.search(r'regularMarketChangePercent.*?({.*?})', stock_html)
if price:
price = json.loads(price.group(1))['raw']
change = json.loads(change.group(1))['raw']
reply = '$' + str(price) + ' / ' + ('' if change < 0 else '+') + '{:.2f}'.format(change) + '%'
if change < 0:
return reply + ' 📉'
return reply + ' 📈'
return 'Could not get ' + symbol + ' price'
```
#### File: pork-chop/modules/turn.py
```python
import json
def turn_handler(message):
message = message.strip().split()
if len(message) > 1:
# list turns
if message[1] == 'list':
return turnList()
# turn a user
writeTurn(message[1:])
return f"{''.join(message[1:])} got turned."
return "specify a user with !turn <user> or use !turn list"
def writeTurn(user):
user = ' '.join(user).lower()
with open('data/turnHistory.json', 'r') as f:
history = json.load(f)
if user in history:
history[user] = int(history[user]) + 1
else:
history[user] = '1'
with open('data/turnHistory.json', 'w') as f:
json.dump(history, f)
def turnList():
with open('data/turnHistory.json', 'r') as f:
history = json.load(f)
return "\n".join( f"{name} {num}" for name, num in history.items())
``` |
{
"source": "JMMirza/MovieRecomSys",
"score": 3
} |
#### File: JMMirza/MovieRecomSys/movie_recommendation.py
```python
import pandas as pd
import numpy as np
import sys
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
import seaborn as sns
movies = pd.read_csv("movies.csv")
ratings = pd.read_csv("ratings.csv")
movies = pd.read_csv("movies.csv")
ratings = pd.read_csv("ratings.csv")
ratings.head()
"""# movies.head()"""
final_dataset = ratings.pivot(index='movieId',columns='userId',values='rating')
final_dataset.head()
final_dataset.fillna(0,inplace=True)
final_dataset.head()
no_user_voted = ratings.groupby('movieId')['rating'].agg('count')
no_movies_voted = ratings.groupby('userId')['rating'].agg('count')
no_user_voted = ratings.groupby('movieId')['rating'].agg('count')
no_movies_voted = ratings.groupby('userId')['rating'].agg('count')
f,ax = plt.subplots(1,1,figsize=(16,4))
# ratings['rating'].plot(kind='hist')
plt.scatter(no_user_voted.index,no_user_voted,color='mediumseagreen')
plt.axhline(y=10,color='r')
plt.xlabel('MovieId')
plt.ylabel('No. of users voted')
# plt.show()
final_dataset = final_dataset.loc[no_user_voted[no_user_voted > 10].index,:]
f,ax = plt.subplots(1,1,figsize=(16,4))
plt.scatter(no_movies_voted.index,no_movies_voted,color='mediumseagreen')
plt.axhline(y=50,color='r')
plt.xlabel('UserId')
plt.ylabel('No. of votes by user')
# plt.show()
final_dataset=final_dataset.loc[:,no_movies_voted[no_movies_voted > 50].index]
final_dataset
sample = np.array([[0,0,3,0,0],[4,0,0,0,2],[0,0,0,0,1]])
sparsity = 1.0 - ( np.count_nonzero(sample) / float(sample.size) )
# print(sparsity)
csr_sample = csr_matrix(sample)
# print(csr_sample)
csr_data = csr_matrix(final_dataset.values)
final_dataset.reset_index(inplace=True)
knn = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=20, n_jobs=-1)
knn = NearestNeighbors(metric='cosine', algorithm='brute', n_neighbors=20, n_jobs=-1)
knn.fit(csr_data)
# def get_movie_recommendation(movie_name):
def get_movie_recommendation(movie_name):
n_movies_to_reccomend = 10
movie_list = movies[movies['title'].str.contains(movie_name)]
if len(movie_list):
movie_idx= movie_list.iloc[0]['movieId']
movie_idx = final_dataset[final_dataset['movieId'] == movie_idx].index[0]
distances , indices = knn.kneighbors(csr_data[movie_idx],n_neighbors=n_movies_to_reccomend+1)
rec_movie_indices = sorted(list(zip(indices.squeeze().tolist(),distances.squeeze().tolist())),\
key=lambda x: x[1])[:0:-1]
recommend_frame = []
for val in rec_movie_indices:
movie_idx = final_dataset.iloc[val[0]]['movieId']
idx = movies[movies['movieId'] == movie_idx].index
recommend_frame.append({'Title':movies.iloc[idx]['title'].values[0],'Distance':val[1]})
df = pd.DataFrame(recommend_frame,index=range(1,n_movies_to_reccomend+1))
print(df)
else:
print ("No movies found. Please check your input")
get_movie_recommendation(sys.argv[1])
# get_movie_recommendation('Memento')
``` |
{
"source": "jmmjsolutions/covid19-au-nsw",
"score": 2
} |
#### File: covid19-au-nsw/application/assets.py
```python
from flask_assets import Environment, Bundle
def compile_assets(app):
"""Configure & compile asset bundles."""
assets = Environment(app)
# ToDo
```
#### File: covid19-au-nsw/scripts/get_lga_geojson.py
```python
import os.path
import urllib
import json
from geojson_precision import coord_precision as process_features
NSW_LGA_GEOJSON_URL = "https://data.gov.au/geoserver/nsw-local-government-areas/wfs?request=GetFeature&typeName=ckan_f6a00643_1842_48cd_9c2f_df23a3a1dc1e&outputFormat=json"
def get_lga_geojson():
"""Load NSW Local Govt Area boundaries.
Download from data.gov.au if file does not exist.
Return geojson object."""
import json
lga_path = "data/nsw-lga.geojson"
lga_exists = False
# Check if there is a downloaded copy of NSW LGA Geojson data
if not os.path.isfile(lga_path):
try:
print("Download NSW LGA dataset %s...", (NSW_LGA_GEOJSON_URL,))
req = urllib.request.urlopen(NSW_LGA_GEOJSON_URL)
CHUNK = 256 * 10240
with open(lga_path, "wb") as fp:
while True:
chunk = req.read(CHUNK)
if not chunk:
break
fp.write(chunk)
except Exception as e:
# Download failed, not the end of the world,
# so return an empty features list
return []
# Open LGA GeoJson file and load data
lga_path = "data/nsw-lga-6dp.geojson"
with open(lga_path) as f:
geojson = json.load(f)
return geojson["features"]
if __name__ == "__main__":
lga_geojson = get_lga_geojson()
print(
json.dumps(
{
"type": "FeatureCollection",
"features": list(process_features(lga_geojson, 6)),
}
)
)
```
#### File: jmmjsolutions/covid19-au-nsw/wsgi.py
```python
from application import create_app
app = create_app()
def main():
app.run(host='0.0.0.0', port=5100, debug=True)
if __name__ == "__main__":
main()
``` |
{
"source": "jmmjsolutions/fixerio_for_pdr",
"score": 2
} |
#### File: fixerio_for_pdr/tests/test_fixer_for_pdr_base.py
```python
import os
from datetime import datetime, timedelta
import pandas as pd
import pytest
from fixerio_for_pdr import Fixer
TEST_API_KEY = "af3f0000fffefddc5d48f5879c0fefe" # Not a real key
TEST_ENV_API_KEY = os.getenv("FIXERIO_API_KEY")
class TestFixerBase(object):
@classmethod
def setup_class(cls):
pass
@pytest.mark.skipif(
TEST_ENV_API_KEY is not None,
reason="The environment variable FIXERIO_API_KEY is set, so Fixer would use it as the api key.",
)
def test_fixer_raises_exception_on_missing_api_key(self):
"""
GIVEN the fixerio api key is not set in the environment or passed as a parameter
WHEN creating a Fixer instance
THEN the ValueError exception must be raised
"""
with pytest.raises(ValueError):
fixer = Fixer()
def test_fixer_uses_api_key_from_env(self):
"""
GIVEN the fixerio api key is set in the environment variable FIXERIO_API_KEY
WHEN creating a Fixer instance
THEN a Fixer instance will be created
"""
os.environ["FIXERIO_API_KEY"] = TEST_API_KEY
fixer = Fixer()
assert fixer.api_key == TEST_API_KEY
def test_fixer_uses_api_key_variable(self):
"""
GIVEN the fixerio api key passed as a keyword argument
WHEN creating a Fixer instance
THEN a Fixer instance will be created
"""
fixer = Fixer(api_key=TEST_API_KEY)
assert fixer.api_key == TEST_API_KEY
def test_fixer_sets_base_url(self):
"""
GIVEN the fixerio api key passed as a keyword argument
WHEN creating a Fixer instance
THEN a Fixer instance url property equals the fixer.io data api url
"""
fixer = Fixer()
assert fixer.url == "http://data.fixer.io/api/"
def test_fixer_sets_start_date_to_today(self):
"""
GIVEN no start date
WHEN creating a Fixer instance
THEN the Fixer instance start date property equals UTC today
"""
fixer = Fixer()
assert fixer.start == datetime.utcnow().date()
def test_fixer_sets_start_date_to_start_parameter(self):
"""
GIVEN a start date
WHEN creating a Fixer instance
THEN the Fixer instance start date property equals the supplied start date
"""
start_date = datetime.utcnow().date() - timedelta(days=5)
fixer = Fixer(start=start_date)
assert fixer.start == start_date
``` |
{
"source": "jmmk/boxes",
"score": 3
} |
#### File: jmmk/boxes/boxes.py
```python
from PySide import QtCore, QtGui
import ConfigParser
import keybinder
import signal
import sys
import os
from windows import WindowHelper
CONFIG = os.path.expanduser('~/.boxes')
DEFAULTS = {
'grid_columns': 8,
'grid_rows': 4,
'hotkey': 'Alt+R',
'background_color': 'rgba(75, 77, 81, 255)',
'box_background_color': 'rgba(100, 106, 116, 204)',
'selected_box_background_color': 'rgba(50, 53, 58, 204)'
}
class SelectionGrid(QtGui.QFrame):
reset_grid = QtCore.Signal()
highlight_selection = QtCore.Signal()
def __init__(self, desktop):
super(SelectionGrid, self).__init__()
dimensions = desktop.availableGeometry()
self.screen_width = dimensions.width()
self.screen_height = dimensions.height()
self.windows = WindowHelper()
self.load_config()
keybinder.bind(self.settings['hotkey'], self.toggle)
self.construct_grid()
self.init_grid_ui()
def construct_grid(self):
self.grid = QtGui.QGridLayout(self)
for i in range(1, self.settings['grid_rows'] + 1):
for j in range(1, self.settings['grid_columns'] + 1):
grid_box = GridBox(self, i, j)
self.grid.addWidget(grid_box, i, j)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
grid_box = self.childAt(event.x(), event.y())
color = self.settings['selected_box_background_color']
grid_box.setStyleSheet('background-color: {color};border:none;'.format(color=color))
row, col = self.get_box_position(grid_box)
self.current_selection = {
'start_row': row,
'start_col': col
}
def mouseReleaseEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.windows.resize_window(*self.calculate_resize())
self.hide()
self.reset_grid.emit()
def mouseMoveEvent(self, event):
if event.buttons() & QtCore.Qt.LeftButton:
grid_box = self.childAt(event.x(), event.y())
if grid_box:
row, col = self.get_box_position(grid_box)
self.current_selection['outer_x'] = col
self.current_selection['outer_y'] = row
self.highlight_selection.emit()
def calculate_resize(self):
y1 = self.current_selection['start_row']
y2 = self.current_selection['outer_y']
x1 = self.current_selection['start_col']
x2 = self.current_selection['outer_x']
start_x, end_x = sorted((x1, x2))
start_y, end_y = sorted((y1, y2))
box_size_x = self.screen_width / self.settings['grid_columns']
box_size_y = self.screen_height / self.settings['grid_rows']
x_pos = (start_x - 1) * box_size_x
y_pos = (start_y - 1) * box_size_y
size_x = box_size_x * (end_x - start_x + 1)
size_y = box_size_y * (end_y - start_y + 1)
return (self.active_window_id, x_pos, y_pos, size_x, size_y)
def get_box_position(self, grid_box):
index = self.grid.indexOf(grid_box)
row, col, __, __ = self.grid.getItemPosition(index)
return (row, col)
def init_grid_ui(self):
w = self.settings['grid_window_width']
h = self.settings['grid_window_height']
x = (self.screen_width - w) / 2
y = (self.screen_height - h) / 2
self.setGeometry(x, y, w, h)
self.setWindowTitle('boxes')
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
color = self.settings['background_color']
self.setStyleSheet(
'background-color: {color};'
'border-radius: 5px;'
.format(color=color)
)
def show_grid(self):
self.active_window_id = self.windows.get_active_window()
self.show()
def toggle(self):
if self.isVisible():
self.hide()
else:
self.show_grid()
def load_config(self):
self.settings = DEFAULTS
self.settings['grid_window_width'] = self.screen_width / 3.5
self.settings['grid_window_height'] = self.screen_height / 3.5
if os.path.isfile(CONFIG):
cfg = ConfigParser.ConfigParser()
with open(CONFIG) as f:
cfg.readfp(f)
for option in self.settings:
method = 'getint' if type(self.settings[option]) == int else 'get'
try:
self.settings[option] = getattr(cfg, method)('Grid', option)
except:
continue
class GridBox(QtGui.QFrame):
def __init__(self, parent, row, col):
super(GridBox, self).__init__(parent)
parent.reset_grid.connect(self.reset_defaults)
parent.highlight_selection.connect(self.on_selection_update)
self.row = row
self.col = col
self.bg_color = parent.settings['box_background_color']
self.selected_bg_color = parent.settings['selected_box_background_color']
self.parent = parent
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.setStyleSheet('background-color: {color};border:none;'.format(color=self.bg_color))
def reset_defaults(self):
self.setStyleSheet('background-color: {color};border:none;'.format(color=self.bg_color))
def on_selection_update(self):
start_x = self.parent.current_selection['start_col']
start_y = self.parent.current_selection['start_row']
outer_x = self.parent.current_selection['outer_x']
outer_y = self.parent.current_selection['outer_y']
x_bounds = sorted((start_x, outer_x))
y_bounds = sorted((start_y, outer_y))
selected_x = x_bounds[0] <= self.col <= x_bounds[1]
selected_y = y_bounds[0] <= self.row <= y_bounds[1]
if selected_x and selected_y:
self.setStyleSheet('background-color: {color};border:none;'.format(color=self.selected_bg_color))
else:
self.reset_defaults()
def main():
signal.signal(signal.SIGINT, signal.SIG_DFL)
app = QtGui.QApplication([])
desktop = app.desktop()
grid = SelectionGrid(desktop)
sys.exit(app.exec_())
if __name__ == '__main__':
main()
``` |
{
"source": "jmmL/misc",
"score": 4
} |
#### File: jmmL/misc/credit_card_validator.py
```python
import unittest
class MyCreditCardTests(unittest.TestCase):
def test_bad_sum(self):
self.assertFalse(credit_card_check("2768 3424 2345 2358"))
def test_invalid_format(self):
self.assertFalse(credit_card_check("0000000000000000"))
self.assertFalse(credit_card_check("1876 0954 325009182"))
self.assertFalse(credit_card_check(" 5555 5555 5555 5555"))
self.assertFalse(credit_card_check("0000 0000 0000 000"))
self.assertFalse(credit_card_check(""))
self.assertFalse(credit_card_check("0000 0000"))
self.assertFalse(credit_card_check("0123 4567 89AB EFGH"))
self.assertFalse(credit_card_check("0000 000000000000"))
self.assertFalse(credit_card_check(9384349532970121))
def test_true_cards(self):
self.assertTrue(credit_card_check("9384 3495 3297 0121"))
self.assertTrue(credit_card_check("0123 4567 8902 4568"))
def check_length(card_number):
""" Checks that the length of the str card_number is 19 exactly
:param card_number: str
:return: bool
"""
if len(card_number) == 19:
return True
else:
return False
def check_digits(card_number):
""" Checks that the first 4 chars of the str card_number are digits followed by the 5th char being a space,
and so on. Returns True if these conditions are met, and otherwise returns False
:param card_number: str
:return: bool
"""
for i in range(0, len(card_number)):
# The first 4 chars must be digits (and so on)
if (i + 1) % 5 > 0 and not card_number[i].isdigit():
print("Char wasn't a digit")
return False
# Every 5th char must be a space
elif (i + 1) % 5 == 0 and not card_number[i].isspace():
print("Char wasn't a space")
return False
else:
return True
def check_sum(card_number):
""" This function will return true if the sum of all digits in the string card_number is evenly divisible by
a magic_number, 10
:param card_number: str
:return: bool
"""
total = 0
magic_number = 10
for i in range(0, len(card_number)):
if card_number[i].isdigit():
total += int(card_number[i])
# If the sum of all digits is evenly divisible by 10 (the magic number), then return True, else return False
if total % magic_number == 0:
return True
else:
print("Sum was not evenly divisible by %i (total: %s)" % (magic_number, total))
return False
def credit_card_check(card_number):
""" Validates card numbers to ensure they are of the form "#### #### #### ####" where each # is a digit and the
sum of all digits is divisible evenly by 10
:param card_number: str
:return: bool
"""
# Check that we've been given a card number in the right type (a string)
if type(card_number) is not str:
return False
else:
return check_length(card_number) and check_digits(card_number) and check_sum(card_number)
def main():
print(credit_card_check("9384 3495 3297 0121"))
if __name__ == "__main__":
main()
``` |
{
"source": "jmm-montiel/Auto-PyTorch",
"score": 2
} |
#### File: components/lr_scheduler/lr_schedulers.py
```python
from autoPyTorch.utils.config_space_hyperparameter import add_hyperparameter, get_hyperparameter
import numpy as np
import math
import torch
import torch.optim.lr_scheduler as lr_scheduler
from torch.optim import Optimizer
import ConfigSpace as CS
import ConfigSpace.hyperparameters as CSH
__author__ = "<NAME>, <NAME> and <NAME>"
__version__ = "0.0.1"
__license__ = "BSD"
class AutoNetLearningRateSchedulerBase(object):
def __new__(cls, optimizer, config):
"""Get a new instance of the scheduler
Arguments:
cls {class} -- Type of scheduler
optimizer {Optmizer} -- A PyTorch Optimizer
config {dict} -- Sampled lr_scheduler config
Returns:
AutoNetLearningRateSchedulerBase -- The learning rate scheduler object
"""
scheduler = cls._get_scheduler(cls, optimizer, config)
if not hasattr(scheduler, "allows_early_stopping"):
scheduler.allows_early_stopping = True
if not hasattr(scheduler, "snapshot_before_restart"):
scheduler.snapshot_before_restart = False
return scheduler
def _get_scheduler(self, optimizer, config):
raise ValueError('Override the method _get_scheduler and do not call the base class implementation')
@staticmethod
def get_config_space():
return CS.ConfigurationSpace()
class SchedulerNone(AutoNetLearningRateSchedulerBase):
def _get_scheduler(self, optimizer, config):
return NoScheduling(optimizer=optimizer)
class SchedulerStepLR(AutoNetLearningRateSchedulerBase):
"""
Step learning rate scheduler
"""
def _get_scheduler(self, optimizer, config):
return lr_scheduler.StepLR(optimizer=optimizer, step_size=config['step_size'], gamma=config['gamma'], last_epoch=-1)
@staticmethod
def get_config_space(
step_size=(1, 10),
gamma=(0.001, 0.9)
):
cs = CS.ConfigurationSpace()
add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'step_size', step_size)
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'gamma', gamma)
return cs
class SchedulerExponentialLR(AutoNetLearningRateSchedulerBase):
"""
Exponential learning rate scheduler
"""
def _get_scheduler(self, optimizer, config):
return lr_scheduler.ExponentialLR(optimizer=optimizer, gamma=config['gamma'], last_epoch=-1)
@staticmethod
def get_config_space(
gamma=(0.8, 0.9999)
):
cs = CS.ConfigurationSpace()
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'gamma', gamma)
return cs
class SchedulerReduceLROnPlateau(AutoNetLearningRateSchedulerBase):
"""
Reduce LR on plateau learning rate scheduler
"""
def _get_scheduler(self, optimizer, config):
return lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
factor=config['factor'],
patience=config['patience'])
@staticmethod
def get_config_space(
factor=(0.05, 0.5),
patience=(3, 10)
):
cs = CS.ConfigurationSpace()
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'factor', factor)
add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'patience', patience)
return cs
class SchedulerAdaptiveLR(AutoNetLearningRateSchedulerBase):
"""
Adaptive cosine learning rate scheduler
"""
def _get_scheduler(self, optimizer, config):
return AdaptiveLR(optimizer=optimizer,
T_max=config['T_max'],
T_mul=config['T_mult'],
patience=config['patience'],
threshold=config['threshold'])
@staticmethod
def get_config_space(
T_max=(300,1000),
patience=(2,5),
T_mult=(1.0,2.0),
threshold=(0.001, 0.5)
):
cs = CS.ConfigurationSpace()
add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'T_max', T_max)
add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'patience', patience)
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'T_mult', T_mult)
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'threshold', threshold)
return cs
class AdaptiveLR(object):
def __init__(self, optimizer, mode='min', T_max=30, T_mul=2.0, eta_min=0, patience=3, threshold=0.1, min_lr=0, eps=1e-8, last_epoch=-1):
if not isinstance(optimizer, Optimizer):
raise TypeError('{} is not an Optimizer'.format(
type(optimizer).__name__))
self.optimizer = optimizer
if last_epoch == -1:
for group in optimizer.param_groups:
group.setdefault('initial_lr', group['lr'])
else:
for i, group in enumerate(optimizer.param_groups):
if 'initial_lr' not in group:
raise KeyError("param 'initial_lr' is not specified "
"in param_groups[{}] when resuming an optimizer".format(i))
self.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))
self.last_epoch = last_epoch
if isinstance(min_lr, list) or isinstance(min_lr, tuple):
if len(min_lr) != len(optimizer.param_groups):
raise ValueError("expected {} min_lrs, got {}".format(
len(optimizer.param_groups), len(min_lr)))
self.min_lrs = list(min_lr)
else:
self.min_lrs = [min_lr] * len(optimizer.param_groups)
self.T_max = T_max
self.T_mul = T_mul
self.eta_min = eta_min
self.current_base_lrs = self.base_lrs
self.metric_values = []
self.threshold = threshold
self.patience = patience
self.steps = 0
def step(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
self.metric_values.append(metrics)
if len(self.metric_values) > self.patience:
self.metric_values = self.metric_values[1:]
if max(self.metric_values) - metrics > self.threshold:
self.current_base_lrs = self.get_lr()
self.steps = 0
else:
self.steps += 1
self.last_metric_value = metrics
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr
def get_lr(self):
'''
Override this method to the existing get_lr() of the parent class
'''
if self.steps >= self.T_max:
self.T_max = self.T_max * self.T_mul
self.current_base_lrs = self.base_lrs
self.metric_values = []
self.steps = 0
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * self.steps / self.T_max)) / 2
for base_lr in self.current_base_lrs]
class SchedulerCyclicLR(AutoNetLearningRateSchedulerBase):
"""
Cyclic learning rate scheduler
"""
def _get_scheduler(self, optimizer, config):
maf = config['max_factor']
mif = config['min_factor']
cl = config['cycle_length']
r = maf - mif
def l(epoch):
if int(epoch//cl) % 2 == 1:
lr = mif + (r * (float(epoch % cl)/float(cl)))
else:
lr = maf - (r * (float(epoch % cl)/float(cl)))
return lr
return lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda=l, last_epoch=-1)
@staticmethod
def get_config_space(
max_factor=(1.0, 2),
min_factor=(0.001, 1.0),
cycle_length=(3, 10)
):
cs = CS.ConfigurationSpace()
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'max_factor', max_factor)
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'min_factor', min_factor)
add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'cycle_length', cycle_length)
return cs
class SchedulerCosineAnnealingWithRestartsLR(AutoNetLearningRateSchedulerBase):
"""
Cosine annealing learning rate scheduler with warm restarts
"""
def _get_scheduler(self, optimizer, config):
scheduler = CosineAnnealingWithRestartsLR(optimizer, T_max=config['T_max'], T_mult=config['T_mult'],last_epoch=-1)
scheduler.allows_early_stopping = False
scheduler.snapshot_before_restart = True
return scheduler
@staticmethod
def get_config_space(
T_max=(1, 20),
T_mult=(1.0, 2.0)
):
cs = CS.ConfigurationSpace()
add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'T_max', T_max)
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'T_mult', T_mult)
return cs
class NoScheduling():
def __init__(self, optimizer):
self.optimizer = optimizer
def step(self, epoch):
return
def get_lr(self):
try:
return [self.optimizer.defaults["lr"]]
except:
return [None]
class CosineAnnealingWithRestartsLR(torch.optim.lr_scheduler._LRScheduler):
r"""Copyright: pytorch
Set the learning rate of each parameter group using a cosine annealing
schedule, where :math:`\eta_{max}` is set to the initial lr and
:math:`T_{cur}` is the number of epochs since the last restart in SGDR:
.. math::
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})(1 +
\cos(\frac{T_{cur}}{T_{max}}\pi))
When last_epoch=-1, sets initial lr as lr.
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. This implements
the cosine annealing part of SGDR, the restarts and number of iterations multiplier.
Args:
optimizer (Optimizer): Wrapped optimizer.
T_max (int): Maximum number of iterations.
T_mult (float): Multiply T_max by this number after each restart. Default: 1.
eta_min (float): Minimum learning rate. Default: 0.
last_epoch (int): The index of last epoch. Default: -1.
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1, T_mult=1):
self.T_max = T_max
self.T_mult = T_mult
self.restart_every = T_max
self.eta_min = eta_min
self.restarts = 0
self.restarted_at = 0
super().__init__(optimizer, last_epoch)
def restart(self):
self.restart_every *= self.T_mult
self.restarted_at = self.last_epoch
def cosine(self, base_lr):
return self.eta_min + (base_lr - self.eta_min) * (1 + math.cos(math.pi * self.step_n / self.restart_every)) / 2
@property
def step_n(self):
return self.last_epoch - self.restarted_at
def get_lr(self):
if self.step_n >= self.restart_every:
self.restart()
return [self.cosine(base_lr) for base_lr in self.base_lrs]
def needs_checkpoint(self):
return self.step_n + 1 >= self.restart_every
class SchedulerAlternatingCosineLR(AutoNetLearningRateSchedulerBase):
"""
Alternating cosine learning rate scheduler
"""
def _get_scheduler(self, optimizer, config):
scheduler = AlternatingCosineLR(optimizer, T_max=config['T_max'], T_mul=config['T_mult'], amplitude_reduction=config['amp_reduction'], last_epoch=-1)
return scheduler
@staticmethod
def get_config_space(
T_max=(1, 20),
T_mult=(1.0, 2.0),
amp_reduction=(0.1,1)
):
cs = CS.ConfigurationSpace()
add_hyperparameter(cs, CSH.UniformIntegerHyperparameter, 'T_max', T_max)
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'T_mult', T_mult)
add_hyperparameter(cs, CSH.UniformFloatHyperparameter, 'amp_reduction', amp_reduction)
return cs
class AlternatingCosineLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, T_max, T_mul=1, amplitude_reduction=0.9, eta_min=0, last_epoch=-1):
'''
Here last_epoch actually means last_step since the
learning rate is decayed after each batch step.
'''
self.T_max = T_max
self.T_mul = T_mul
self.eta_min = eta_min
self.cumulative_time = 0
self.amplitude_mult = amplitude_reduction
self.base_lr_mult = 1
self.frequency_mult = 1
self.time_offset = 0
self.last_step = 0
super(AlternatingCosineLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
'''
Override this method to the existing get_lr() of the parent class
'''
if self.last_epoch >= self.T_max:
self.T_max = self.T_max * self.T_mul
self.time_offset = self.T_max / 2
self.last_epoch = 0
self.base_lr_mult *= self.amplitude_mult
self.frequency_mult = 2
self.cumulative_time = 0
return [self.eta_min + (base_lr * self.base_lr_mult - self.eta_min) *
(1 + math.cos(math.pi * (self.time_offset + self.cumulative_time) / self.T_max * self.frequency_mult)) / 2
for base_lr in self.base_lrs]
```
#### File: pipeline/nodes/ensemble.py
```python
__author__ = "<NAME>, <NAME> and <NAME>"
__version__ = "0.0.1"
__license__ = "BSD"
import os
from autoPyTorch.pipeline.base.pipeline_node import PipelineNode
from autoPyTorch.utils.config.config_option import ConfigOption
from autoPyTorch.pipeline.nodes.metric_selector import MetricSelector, AutoNetMetric, no_transform
from autoPyTorch.pipeline.nodes import OneHotEncoding, OptimizationAlgorithm
from autoPyTorch.pipeline.nodes.metric_selector import AutoNetMetric
from autoPyTorch.utils.ensemble import build_ensemble, read_ensemble_prediction_file, combine_predictions, combine_test_predictions, \
ensemble_logger, start_server
from hpbandster.core.result import logged_results_to_HBS_result
import json
import asyncio
from hpbandster.core.nameserver import nic_name_to_host
import time
def predictions_for_ensemble(y_true, y_pred):
return y_pred
class EnableComputePredictionsForEnsemble(PipelineNode):
"""Put this Node in the training pipeline after the metric selector node"""
def fit(self, pipeline_config, additional_metrics, refit, loss_penalty):
if refit or pipeline_config["ensemble_size"] == 0 or loss_penalty > 0:
return dict()
return {'additional_metrics': additional_metrics + [
AutoNetMetric(name="predictions_for_ensemble",
metric=predictions_for_ensemble,
loss_transform=no_transform,
ohe_transform=no_transform)]}
class SavePredictionsForEnsemble(PipelineNode):
"""Put this Node in the training pipeline after the training node"""
def fit(self, pipeline_config, loss, info, refit, loss_penalty):
if refit or pipeline_config["ensemble_size"] == 0 or loss_penalty > 0:
return {"loss": loss, "info": info}
if "val_predictions_for_ensemble" in info:
predictions = info["val_predictions_for_ensemble"]
del info["val_predictions_for_ensemble"]
else:
raise ValueError("You need to specify some kind of validation for ensemble building")
del info["train_predictions_for_ensemble"]
combinator = {
"combinator": combine_predictions,
"data": predictions
}
if not "test_predictions_for_ensemble" in info:
return {"loss": loss, "info": info, "predictions_for_ensemble": combinator}
test_combinator = {
"combinator": combine_test_predictions,
"data": info["test_predictions_for_ensemble"]
}
del info["test_predictions_for_ensemble"]
return {"loss": loss, "info": info, "predictions_for_ensemble": combinator, "test_predictions_for_ensemble": test_combinator}
def predict(self, Y):
return {"Y": Y}
def get_pipeline_config_options(self):
options = [
ConfigOption("ensemble_server_credentials", default=None)
]
return options
class BuildEnsemble(PipelineNode):
"""Put this node after the optimization algorithm node"""
def fit(self, pipeline_config, optimized_hyperparameter_config, budget, loss, info, refit=None):
if refit or pipeline_config["ensemble_size"] == 0 or pipeline_config["task_id"] not in [-1, 1]:
return {"optimized_hyperparameter_config": optimized_hyperparameter_config, "budget": budget}
filename = os.path.join(pipeline_config["result_logger_dir"], 'predictions_for_ensemble.npy')
optimize_metric = self.pipeline[MetricSelector.get_name()].metrics[pipeline_config["optimize_metric"]]
y_transform = self.pipeline[OneHotEncoding.get_name()].complete_y_tranformation
result = logged_results_to_HBS_result(pipeline_config["result_logger_dir"])
all_predictions, labels, model_identifiers, _ = read_ensemble_prediction_file(filename=filename, y_transform=y_transform)
ensemble_selection, ensemble_configs = build_ensemble(result=result,
optimize_metric=optimize_metric, ensemble_size=pipeline_config["ensemble_size"],
all_predictions=all_predictions, labels=labels, model_identifiers=model_identifiers,
only_consider_n_best=pipeline_config["ensemble_only_consider_n_best"],
sorted_initialization_n_best=pipeline_config["ensemble_sorted_initialization_n_best"])
return {"optimized_hyperparameter_config": optimized_hyperparameter_config, "budget": budget,
"ensemble": ensemble_selection,
"ensemble_configs": ensemble_configs,
"loss": loss,
"info": info
}
def predict(self, Y):
return {"Y": Y}
def get_pipeline_config_options(self):
options = [
ConfigOption("ensemble_size", default=3, type=int, info="Build a ensemble of well performing autonet configurations. 0 to disable."),
ConfigOption("ensemble_only_consider_n_best", default=0, type=int, info="Only consider the n best models for ensemble building."),
ConfigOption("ensemble_sorted_initialization_n_best", default=0, type=int, info="Initialize ensemble with n best models.")
]
return options
class EnsembleServer(PipelineNode):
"""Put this node in front of the optimization algorithm node"""
def fit(self, pipeline_config, result_loggers, shutdownables, refit=False):
if refit or pipeline_config["ensemble_size"] == 0:
return dict()
es_credentials_file = os.path.join(pipeline_config["working_dir"], "es_credentials_%s.json" % pipeline_config["run_id"])
# start server
if pipeline_config["task_id"] != 1 or pipeline_config["run_worker_on_master_node"]:
host = nic_name_to_host(OptimizationAlgorithm.get_nic_name(pipeline_config))
host, port, process = start_server(host)
pipeline_config["ensemble_server_credentials"] = (host, port)
shutdownables = shutdownables + [process]
result_loggers = [ensemble_logger(directory=pipeline_config["result_logger_dir"], overwrite=True)] + result_loggers
return {"result_loggers": result_loggers, "shutdownables": shutdownables}
```
#### File: pipeline/nodes/optimization_algorithm.py
```python
import numpy as np
import os
import time
import shutil
import netifaces
import traceback
import logging
from hpbandster.core.nameserver import NameServer, nic_name_to_host
from hpbandster.core.result import logged_results_to_HBS_result
from autoPyTorch.pipeline.base.sub_pipeline_node import SubPipelineNode
from autoPyTorch.pipeline.base.pipeline import Pipeline
from autoPyTorch.pipeline.nodes import MetricSelector
from autoPyTorch.utils.config.config_option import ConfigOption, to_bool
from autoPyTorch.utils.config.config_condition import ConfigCondition
from autoPyTorch.core.hpbandster_extensions.bohb_ext import BOHBExt
from autoPyTorch.core.hpbandster_extensions.hyperband_ext import HyperBandExt
from autoPyTorch.core.worker import AutoNetWorker
from autoPyTorch.components.training.budget_types import BudgetTypeTime, BudgetTypeEpochs, BudgetTypeTrainingTime
import copy
class OptimizationAlgorithm(SubPipelineNode):
def __init__(self, optimization_pipeline_nodes):
"""OptimizationAlgorithm pipeline node.
It will run either the optimization algorithm (BOHB, Hyperband - defined in config) or start workers
Each worker will run the provided optimization_pipeline and will return the output
of the pipeline_result_node to the optimization algorithm
Train:
The optimization_pipeline will get the following inputs:
{hyperparameter_config, pipeline_config, X_train, Y_train, X_valid, Y_valid, budget, budget_type}
The pipeline_result_node has to provide the following outputs:
- 'loss': the optimization value (minimize)
- 'info': dict containing info for the respective training process
Predict:
The optimization_pipeline will get the following inputs:
{pipeline_config, X}
The pipeline_result_node has to provide the following outputs:
- 'Y': result of prediction for 'X'
Note: predict will not call the optimization algorithm
Arguments:
optimization_pipeline {Pipeline} -- pipeline that will be optimized (hyperparamter)
pipeline_result_node {PipelineNode} -- pipeline node that provides the results of the optimization_pieline
"""
super(OptimizationAlgorithm, self).__init__(optimization_pipeline_nodes)
self.algorithms = {"bohb": BOHBExt,
"hyperband": HyperBandExt}
self.budget_types = dict()
self.budget_types["time"] = BudgetTypeTime
self.budget_types["epochs"] = BudgetTypeEpochs
self.budget_types["training_time"] = BudgetTypeTrainingTime
def fit(self, pipeline_config, X_train, Y_train, X_valid, Y_valid, result_loggers, dataset_info, shutdownables, refit=None):
"""Run the optimization algorithm.
Arguments:
pipeline_config {dict} -- The configuration of the pipeline.
X_train {array} -- The data
Y_train {array} -- The data
X_valid {array} -- The data
Y_valid {array} -- The data
result_loggers {list} -- List of loggers that log the result
dataset_info {DatasetInfo} -- Object with information about the dataset
shutdownables {list} -- List of objects that need to shutdown when optimization is finished.
Keyword Arguments:
refit {dict} -- dict containing information for refitting. None if optimization run should be started. (default: {None})
Returns:
dict -- Summary of optimization run.
"""
logger = logging.getLogger('autonet')
res = None
run_id, task_id = pipeline_config['run_id'], pipeline_config['task_id']
# Use tensorboard logger
if pipeline_config['use_tensorboard_logger'] and not refit:
import tensorboard_logger as tl
directory = os.path.join(pipeline_config['result_logger_dir'], "worker_logs_" + str(task_id))
os.makedirs(directory, exist_ok=True)
tl.configure(directory, flush_secs=5)
# Only do refitting
if (refit is not None):
logger.info("Start Refitting")
loss_info_dict = self.sub_pipeline.fit_pipeline(
hyperparameter_config=refit["hyperparameter_config"], pipeline_config=pipeline_config,
X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid,
budget=refit["budget"], rescore=refit["rescore"], budget_type=self.budget_types[pipeline_config['budget_type']],
optimize_start_time=time.time(), refit=True, hyperparameter_config_id=None, dataset_info=dataset_info)
logger.info("Done Refitting")
return {'optimized_hyperparameter_config': refit["hyperparameter_config"],
'budget': refit['budget'],
'loss': loss_info_dict['loss'],
'info': loss_info_dict['info']}
# Start Optimization Algorithm
try:
ns_credentials_dir, tmp_models_dir, network_interface_name = self.prepare_environment(pipeline_config)
# start nameserver if not on cluster or on master node in cluster
if task_id in [1, -1]:
NS = self.get_nameserver(run_id, task_id, ns_credentials_dir, network_interface_name)
ns_host, ns_port = NS.start()
if task_id != 1 or pipeline_config["run_worker_on_master_node"]:
self.run_worker(pipeline_config=pipeline_config, run_id=run_id, task_id=task_id, ns_credentials_dir=ns_credentials_dir,
network_interface_name=network_interface_name, X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid,
dataset_info=dataset_info, shutdownables=shutdownables)
# start BOHB if not on cluster or on master node in cluster
res = None
if task_id in [1, -1]:
self.run_optimization_algorithm(pipeline_config=pipeline_config, run_id=run_id, ns_host=ns_host,
ns_port=ns_port, nameserver=NS, task_id=task_id, result_loggers=result_loggers,
dataset_info=dataset_info, logger=logger)
res = self.parse_results(pipeline_config)
except Exception as e:
print(e)
traceback.print_exc()
finally:
self.clean_up(pipeline_config, ns_credentials_dir, tmp_models_dir)
if res:
return res
return {'optimized_hyperparameter_config': dict(), 'budget': 0, 'loss': float('inf'), 'info': dict()}
def predict(self, pipeline_config, X):
"""Run the predict pipeline.
Arguments:
pipeline_config {dict} -- The configuration of the pipeline
X {array} -- The data
Returns:
dict -- The predicted values in a dictionary
"""
result = self.sub_pipeline.predict_pipeline(pipeline_config=pipeline_config, X=X)
return {'Y': result['Y']}
# OVERRIDE
def get_pipeline_config_options(self):
options = [
ConfigOption("run_id", default="0", type=str, info="Unique id for each run."),
ConfigOption("task_id", default=-1, type=int, info="ID for each worker, if you run AutoNet on a cluster. Set to -1, if you run it locally."),
ConfigOption("algorithm", default="bohb", type=str, choices=list(self.algorithms.keys()), info="Algorithm to use for config sampling."),
ConfigOption("budget_type", default="time", type=str, choices=list(self.budget_types.keys())),
ConfigOption("min_budget", default=lambda c: self.budget_types[c["budget_type"]].default_min_budget, type=float, depends=True, info="Min budget for fitting configurations."),
ConfigOption("max_budget", default=lambda c: self.budget_types[c["budget_type"]].default_max_budget, type=float, depends=True, info="Max budget for fitting configurations."),
ConfigOption("max_runtime",
default=lambda c: ((-int(np.log(c["min_budget"] / c["max_budget"]) / np.log(c["eta"])) + 1) * c["max_budget"])
if c["budget_type"] == "time" else float("inf"),
type=float, depends=True, info="Total time for the run."),
ConfigOption("num_iterations",
default=lambda c: (-int(np.log(c["min_budget"] / c["max_budget"]) / np.log(c["eta"])) + 1)
if c["budget_type"] == "epochs" else float("inf"),
type=float, depends=True, info="Number of successive halving iterations."),
ConfigOption("eta", default=3, type=float, info='eta parameter of Hyperband.'),
ConfigOption("min_workers", default=1, type=int),
ConfigOption("working_dir", default=".", type="directory"),
ConfigOption("network_interface_name", default=self.get_default_network_interface_name(), type=str),
ConfigOption("memory_limit_mb", default=1000000, type=int),
ConfigOption("use_tensorboard_logger", default=False, type=to_bool),
ConfigOption("run_worker_on_master_node", default=True, type=to_bool),
ConfigOption("use_pynisher", default=True, type=to_bool)
]
return options
# OVERRIDE
def get_pipeline_config_conditions(self):
def check_runtime(pipeline_config):
return pipeline_config["budget_type"] != "time" or pipeline_config["max_runtime"] >= pipeline_config["max_budget"]
return [
ConfigCondition.get_larger_equals_condition("max budget must be greater than or equal to min budget", "max_budget", "min_budget"),
ConfigCondition("When time is used as budget, the max_runtime must be larger than the max_budget", check_runtime)
]
def get_default_network_interface_name(self):
"""Get the default network interface name
Returns:
str -- The default network interface name
"""
try:
return netifaces.gateways()['default'][netifaces.AF_INET][1]
except:
return 'lo'
def prepare_environment(self, pipeline_config):
"""Create necessary folders and get network interface name
Arguments:
pipeline_config {dict} -- The configuration of the pipeline
Returns:
tuple -- path to created directories and network interface namei
"""
if not os.path.exists(pipeline_config["working_dir"]) and pipeline_config['task_id'] in [1, -1]:
try:
os.mkdir(pipeline_config["working_dir"])
except:
pass
tmp_models_dir = os.path.join(pipeline_config["working_dir"], "tmp_models_" + str(pipeline_config['run_id']))
ns_credentials_dir = os.path.abspath(os.path.join(pipeline_config["working_dir"], "ns_credentials_" + str(pipeline_config['run_id'])))
network_interface_name = self.get_nic_name(pipeline_config)
if os.path.exists(tmp_models_dir) and pipeline_config['task_id'] in [1, -1]:
shutil.rmtree(tmp_models_dir) # not used right now
if os.path.exists(ns_credentials_dir) and pipeline_config['task_id'] in [1, -1]:
shutil.rmtree(ns_credentials_dir)
return ns_credentials_dir, tmp_models_dir, network_interface_name
def clean_up(self, pipeline_config, tmp_models_dir, ns_credentials_dir):
"""Remove created folders
Arguments:
pipeline_config {dict} -- The pipeline config
tmp_models_dir {[type]} -- The path to the temporary models (not used right now)
ns_credentials_dir {[type]} -- The path to the nameserver credentials
"""
if pipeline_config['task_id'] in [1, -1]:
# Delete temporary files
if os.path.exists(tmp_models_dir):
shutil.rmtree(tmp_models_dir)
if os.path.exists(ns_credentials_dir):
shutil.rmtree(ns_credentials_dir)
def get_nameserver(self, run_id, task_id, ns_credentials_dir, network_interface_name):
"""Get the namesever object
Arguments:
run_id {str} -- The id of the run
task_id {int} -- An id for the worker
ns_credentials_dir {str} -- Path to ns credentials
network_interface_name {str} -- The network interface name
Returns:
NameServer -- The NameServer object
"""
if not os.path.isdir(ns_credentials_dir):
try:
os.mkdir(ns_credentials_dir)
except:
pass
return NameServer(run_id=run_id, nic_name=network_interface_name, working_directory=ns_credentials_dir)
def get_optimization_algorithm_instance(self, config_space, run_id, pipeline_config, ns_host, ns_port, loggers, previous_result=None):
"""Get an instance of the optimization algorithm
Arguments:
config_space {ConfigurationSpace} -- The config space to optimize.
run_id {str} -- An Id for the current run.
pipeline_config {dict} -- The configuration of the pipeline.
ns_host {str} -- Nameserver host.
ns_port {int} -- Nameserver port.
loggers {list} -- Loggers to log the results.
Keyword Arguments:
previous_result {Result} -- A previous result to warmstart the search (default: {None})
Returns:
Master -- An optimization algorithm.
"""
optimization_algorithm = self.algorithms[pipeline_config["algorithm"]]
kwargs = {"configspace": config_space, "run_id": run_id,
"eta": pipeline_config["eta"], "min_budget": pipeline_config["min_budget"], "max_budget": pipeline_config["max_budget"],
"host": ns_host, "nameserver": ns_host, "nameserver_port": ns_port,
"result_logger": combined_logger(*loggers),
"ping_interval": 10**6,
"working_directory": pipeline_config["working_dir"],
"previous_result": previous_result}
hb = optimization_algorithm(**kwargs)
return hb
def parse_results(self, pipeline_config):
"""Parse the results of the optimization run
Arguments:
pipeline_config {dict} -- The configuration of the pipeline.
Raises:
RuntimeError: An Error occurred when parsing the results.
Returns:
dict -- Dictionary summarizing the results
"""
try:
res = logged_results_to_HBS_result(pipeline_config["result_logger_dir"])
id2config = res.get_id2config_mapping()
incumbent_trajectory = res.get_incumbent_trajectory(bigger_is_better=False, non_decreasing_budget=False)
except Exception as e:
raise RuntimeError("Error parsing results. Check results.json and output for more details. An empty results.json is usually caused by a misconfiguration of AutoNet.")
if (len(incumbent_trajectory['config_ids']) == 0):
return dict()
final_config_id = incumbent_trajectory['config_ids'][-1]
final_budget = incumbent_trajectory['budgets'][-1]
best_run = [r for r in res.get_runs_by_id(final_config_id) if r.budget == final_budget][0]
return {'optimized_hyperparameter_config': id2config[final_config_id]['config'],
'budget': final_budget,
'loss': best_run.loss,
'info': best_run.info}
def run_worker(self, pipeline_config, run_id, task_id, ns_credentials_dir, network_interface_name,
X_train, Y_train, X_valid, Y_valid, dataset_info, shutdownables):
""" Run the AutoNetWorker
Arguments:
pipeline_config {dict} -- The configuration of the pipeline
run_id {str} -- An id for the run
task_id {int} -- An id for the worker
ns_credentials_dir {str} -- path to nameserver credentials
network_interface_name {str} -- the name of the network interface
X_train {array} -- The data
Y_train {array} -- The data
X_valid {array} -- The data
Y_valid {array} -- The data
dataset_info {DatasetInfo} -- Object describing the dataset
shutdownables {list} -- A list of objects that need to shutdown when the optimization is finished
"""
if not task_id == -1:
time.sleep(5)
while not os.path.isdir(ns_credentials_dir):
time.sleep(5)
host = nic_name_to_host(network_interface_name)
worker = AutoNetWorker(pipeline=self.sub_pipeline, pipeline_config=pipeline_config,
X_train=X_train, Y_train=Y_train, X_valid=X_valid, Y_valid=Y_valid, dataset_info=dataset_info,
budget_type=self.budget_types[pipeline_config['budget_type']],
max_budget=pipeline_config["max_budget"],
host=host, run_id=run_id,
id=task_id, shutdownables=shutdownables,
use_pynisher=pipeline_config["use_pynisher"])
worker.load_nameserver_credentials(ns_credentials_dir)
# run in background if not on cluster
worker.run(background=(task_id <= 1))
def run_optimization_algorithm(self, pipeline_config, run_id, ns_host, ns_port, nameserver, task_id, result_loggers,
dataset_info, logger):
"""
Arguments:
pipeline_config {dict} -- The configuration of the pipeline
run_id {str} -- An id for the run
ns_host {str} -- Nameserver host.
ns_port {int} -- Nameserver port.
nameserver {[type]} -- The nameserver.
task_id {int} -- An id for the worker
result_loggers {[type]} -- [description]
dataset_info {DatasetInfo} -- Object describing the dataset
logger {list} -- Loggers to log the results.
"""
config_space = self.pipeline.get_hyperparameter_search_space(dataset_info=dataset_info, **pipeline_config)
logger.info("[AutoNet] Start " + pipeline_config["algorithm"])
# initialize optimization algorithm
if pipeline_config['use_tensorboard_logger']:
result_loggers.append(tensorboard_logger())
HB = self.get_optimization_algorithm_instance(config_space=config_space, run_id=run_id,
pipeline_config=pipeline_config, ns_host=ns_host, ns_port=ns_port, loggers=result_loggers)
# start algorithm
min_num_workers = pipeline_config["min_workers"] if task_id != -1 else 1
reduce_runtime = pipeline_config["max_budget"] if pipeline_config["budget_type"] == "time" else 0
HB.run_until(runtime=(pipeline_config["max_runtime"] - reduce_runtime),
n_iterations=pipeline_config["num_iterations"],
min_n_workers=min_num_workers)
HB.shutdown(shutdown_workers=True)
nameserver.shutdown()
@staticmethod
def get_nic_name(pipeline_config):
"""Get the nic name from the pipeline config"""
return pipeline_config["network_interface_name"] or (netifaces.interfaces()[1] if len(netifaces.interfaces()) > 1 else "lo")
def clean_fit_data(self):
super(OptimizationAlgorithm, self).clean_fit_data()
self.sub_pipeline.root.clean_fit_data()
class tensorboard_logger(object):
def __init__(self):
self.start_time = time.time()
self.incumbent = float('inf')
def new_config(self, config_id, config, config_info):
pass
def __call__(self, job):
import tensorboard_logger as tl
# id = job.id
budget = job.kwargs['budget']
# config = job.kwargs['config']
timestamps = job.timestamps
result = job.result
exception = job.exception
time_step = int(timestamps['finished'] - self.start_time)
if result is not None:
tl.log_value('BOHB/all_results', result['loss'] * -1, time_step)
if result['loss'] < self.incumbent:
self.incumbent = result['loss']
tl.log_value('BOHB/incumbent_results', self.incumbent * -1, time_step)
class combined_logger(object):
def __init__(self, *loggers):
self.loggers = loggers
def new_config(self, config_id, config, config_info):
for logger in self.loggers:
logger.new_config(config_id, config, config_info)
def __call__(self, job):
for logger in self.loggers:
logger(job)
``` |
{
"source": "jmmnn/MiniMonitor",
"score": 3
} |
#### File: jmmnn/MiniMonitor/miniMonitor.py
```python
import pandas as pd
import smtplib
import time
#### Mailer Config####
server = smtplib.SMTP('smtp.gmail.com', 587) #can use 'localhost' without port or authentication
server.starttls()
server.login("<EMAIL>", "YourPassword") #enter your gmail credentials
##### Monitoring task
def myMonitor (csvLogFile):
try:
df = pd.read_csv(csvLogFile, sep='\t', encoding = "ISO-8859-1") #csv to dataframe
except:
print("Error reading the file")
errors = df[df['Status'] == "FinishedFail"] ###For testing: #FinishedSuccess #FinishedFail #randomMessage
#print(df[df['Status'] == "FinishedFail"])
if len(errors.index) > 0:
print ('these are the # of errors: ' , len(errors.index))
messageBody = str(errors.TaskName)
try:
server.sendmail("<EMAIL>", "<EMAIL>", messageBody)
server.quit()
print('Message sent!')
except:
print('failure to connect to mail server')
else:
print('No errors found, no message sent.')
#### Execute the monitor every 60 seconds.
while True:
myMonitor('NYVM0571_TaskExecution_Scheduler.txt')
time.sleep(60)
``` |
{
"source": "jmmogollon/Orbital",
"score": 2
} |
#### File: jmmogollon/Orbital/solarsys.py
```python
from controls import *
def main():
solarsystem = solarSystem()
# set what is displayed by default
solarsystem.setDefaultFeatures(INNERPLANET|ORBITS|SATELLITE|LABELS|OUTERPLANET|LIT_SCENE)
solarsystem.addTo(makeEcliptic(solarsystem, color.white))
solarsystem.addTo(planet(solarsystem, 'mercury', color.green, INNERPLANET, INNERPLANET, PLANET_SZ_CORRECTION))
solarsystem.addTo(planet(solarsystem, 'venus', color.yellow, INNERPLANET, INNERPLANET, PLANET_SZ_CORRECTION))
earth = planet(solarsystem, 'earth', color.cyan, INNERPLANET, INNERPLANET, PLANET_SZ_CORRECTION)
solarsystem.addTo(earth)
mars = planet(solarsystem, 'mars', color.red, INNERPLANET, INNERPLANET, PLANET_SZ_CORRECTION)
solarsystem.addTo(mars)
solarsystem.addTo(planet(solarsystem, 'jupiter', color.magenta, OUTERPLANET, GASGIANT, PLANET_SZ_CORRECTION))
solarsystem.addTo(planet(solarsystem, 'saturn', color.cyan, OUTERPLANET, GASGIANT, PLANET_SZ_CORRECTION))
solarsystem.addTo(planet(solarsystem, 'uranus', color.yellow, OUTERPLANET, GASGIANT, PLANET_SZ_CORRECTION))
solarsystem.addTo(planet(solarsystem, 'neptune', color.orange, OUTERPLANET, GASGIANT, PLANET_SZ_CORRECTION))
pluto = planet(solarsystem, 'pluto', color.green, DWARFPLANET, DWARFPLANET, DWARFPLANET_SZ_CORRECTION) #OUTERPLANET, DWARFPLANET)
solarsystem.addTo(pluto)
solarsystem.setRings(solarsystem, "saturn", [((0.8,0.8,0.8), 0.9), ((0.5,0.5,0.5), 0.2)]) #[color.gray(0.7), (0.5,0.5,0.5)])
solarsystem.setRings(solarsystem, "uranus", [((0.1,0.1,0.8), 0.1), ((0.2,0.2,0.7), 0.3)])
# generate DWARF planets
solarsystem.addTo(dwarfPlanet(solarsystem, 'eris', color.yellow))
solarsystem.addTo(dwarfPlanet(solarsystem, 'makemake', color.magenta))
solarsystem.addTo(dwarfPlanet(solarsystem, 'sedna', color.orange))
solarsystem.addTo(dwarfPlanet(solarsystem, 'haumea', color.white))
# generate satellites
solarsystem.addTo(satellite(solarsystem, 'moon', color.white, earth))
#solarsystem.addTo(satellite(solarsystem, 'phobos', color.red, mars))
#solarsystem.addTo(satellite(solarsystem, 'deimos', color.white, mars))
#solarsystem.addTo(satellite(solarsystem, 'charon', color.white, pluto))
# generate Belts
solarsystem.addTo(makeBelt(solarsystem, 'kuiper', 'Kuiper Belt', KUIPER_BELT, color.cyan, 2, 4))
solarsystem.addTo(makeBelt(solarsystem, 'asteroid', 'Asteroid Belt', ASTEROID_BELT, color.white, 2, 2))
solarsystem.addTo(makeBelt(solarsystem, 'inneroort', 'Inner Oort Cloud', INNER_OORT_CLOUD, color.white, 2, 5))
solarsystem.addJTrojans(makeJtrojan(solarsystem, 'jupiterTrojan', 'Jupiter Trojans', JTROJANS, color.green, 2, 5, 'jupiter'))
MAX_OBJECTS = 1000
loadBodies(solarsystem, PHA, "200m+PHA_orbital_elements.txt", MAX_OBJECTS)
loadBodies(solarsystem, BIG_ASTEROID,"200km+asteroids_orbital_elements.txt", MAX_OBJECTS)
loadBodies(solarsystem, COMET, "200m+comets_orbital_elements.txt", MAX_OBJECTS)
loadBodies(solarsystem, TRANS_NEPT, "transNeptunian_objects.txt", MAX_OBJECTS)
#loadBodies(solarsystem, SATELLITE, "satellites.txt", MAX_OBJECTS)
solarsystem.drawAllBodiesTrajectory()
glbRefresh(solarsystem, False)
# Start control window
print wx.version()
#print julian(1, 1, 2000)
ex = wx.App(False)
cw = controlWindow(solarsystem)
cw.Show()
while True:
sleep(2)
if __name__ == '__main__' :
main()
``` |
{
"source": "jmmshn/api",
"score": 2
} |
#### File: mp_api/charge_density/client.py
```python
from mp_api.core.client import BaseRester, MPRestError
from mp_api.tasks.client import TaskRester
class ChargeDensityRester(BaseRester):
suffix = "charge_density"
def get_charge_density_from_material_id(self, material_id: str):
"""
Get charge density data for a given Materials Project ID.
Arguments:
material_id (str): Materials project ID
Returns:
chgcar (dict): Pymatgen CHGCAR object.
"""
result = self._make_request(
"{}/?fields=data&all_fields=false".format(material_id)
)
if result.get("data", None) is not None:
return result["data"]
else:
raise MPRestError("No document found")
def get_calculation_details(self, material_id: str):
"""
Get charge density calculations details for a given Materials Project ID.
Arguments:
material_id (str): Materials project ID
Returns:
calc_details (dict): Dictionary containing INCAR, POSCAR, and KPOINTS data for the DFT calculation.
"""
base_endpoint = "/".join(self.endpoint.split("/")[0:3])
task_rester = TaskRester(api_key=self.api_key, endpoint=base_endpoint)
result = task_rester.get_task_from_material_id(
material_id, fields=["task_id", "orig_inputs"]
).get("data")[0]
task_rester.session.close()
return result
```
#### File: mp_api/dielectric/client.py
```python
from typing import List, Optional, Tuple
from collections import defaultdict
from mp_api.core.client import BaseRester, MPRestError
import warnings
class DielectricRester(BaseRester):
suffix = "dielectric"
def get_dielectric_from_material_id(self, material_id: str):
"""
Get dielectric data for a given Materials Project ID.
Arguments:
material_id (str): Materials project ID
Returns:
results (Dict): Dictionary containing dielectric data.
"""
result = self._make_request("{}/?all_fields=true".format(material_id))
if len(result.get("data", [])) > 0:
return result
else:
raise MPRestError("No document found")
def search_dielectric_docs(
self,
e_total: Optional[Tuple[float, float]] = None,
e_ionic: Optional[Tuple[float, float]] = None,
e_static: Optional[Tuple[float, float]] = None,
n: Optional[Tuple[float, float]] = None,
num_chunks: Optional[int] = None,
chunk_size: int = 100,
fields: Optional[List[str]] = None,
):
"""
Query equations of state docs using a variety of search criteria.
Arguments:
e_total (Tuple[float,float]): Minimum and maximum total dielectric constant to consider.
e_ionic (Tuple[float,float]): Minimum and maximum ionic dielectric constant to consider.
e_static (Tuple[float,float]): Minimum and maximum electronic dielectric constant to consider.
n (Tuple[float,float]): Minimum and maximum refractive index to consider.
num_chunks (int): Maximum number of chunks of data to yield. None will yield all possible.
chunk_size (int): Number of data entries per chunk.
fields (List[str]): List of fields in EOSDoc to return data for.
Default is material_id only.
Yields:
([dict]) List of dictionaries containing data for entries defined in 'fields'.
Defaults to Materials Project IDs only.
"""
query_params = defaultdict(dict) # type: dict
if chunk_size <= 0 or chunk_size > 100:
warnings.warn("Improper chunk size given. Setting value to 100.")
chunk_size = 100
if e_total:
query_params.update({"e_total_min": e_total[0], "e_total_max": e_total[1]})
if e_ionic:
query_params.update({"e_ionic_min": e_ionic[0], "e_ionic_max": e_ionic[1]})
if e_static:
query_params.update(
{"e_static_min": e_static[0], "e_static_max": e_static[1]}
)
if n:
query_params.update({"n_min": n[0], "n_max": n[1]})
if fields:
query_params.update({"fields": ",".join(fields)})
query_params = {
entry: query_params[entry]
for entry in query_params
if query_params[entry] is not None
}
query_params.update({"limit": chunk_size, "skip": 0})
count = 0
while True:
query_params["skip"] = count * chunk_size
results = self.query(query_params).get("data", [])
if not any(results) or (num_chunks is not None and count == num_chunks):
break
count += 1
yield results
```
#### File: mp_api/dielectric/resources.py
```python
from mp_api.core.resource import Resource
from mp_api.dielectric.models import DielectricDoc
from mp_api.core.query_operator import PaginationQuery, SortQuery, SparseFieldsQuery
from mp_api.dielectric.query_operators import DielectricQuery
def dielectric_resource(dielectric_store):
resource = Resource(
dielectric_store,
DielectricDoc,
query_operators=[
DielectricQuery(),
SortQuery(),
PaginationQuery(),
SparseFieldsQuery(
DielectricDoc, default_fields=["task_id", "last_updated"]
),
],
tags=["Dielectric"],
)
return resource
```
#### File: mp_api/dois/client.py
```python
from mp_api.core.client import BaseRester, MPRestError
class DOIRester(BaseRester):
suffix = "doi"
def get_eos_from_material_id(self, material_id: str):
"""
Get DOI and reference data for a given Materials Project ID.
Arguments:
material_id (str): Materials project ID
Returns:
results (Dict): Dictionary containing DOI and reference data of state data.
"""
result = self._make_request("{}/?all_fields=true".format(material_id))
if len(result.get("data", [])) > 0:
return result
else:
raise MPRestError("No document found")
```
#### File: mp_api/electrodes/models.py
```python
from monty.json import MontyDecoder
from pymatgen.core.periodic_table import ElementBase
from typing import Dict, List
from datetime import datetime
from pydantic import BaseModel, Field, validator
from mp_api.materials.models import Structure
class WorkingIon(ElementBase):
Li = "Li"
Ca = "Ca"
Mg = "Mg"
class VoltageStep(BaseModel):
"""
Data for individual voltage steps.
Note: Each voltage step is represented as a sub_electrode (ConversionElectrode/InsertionElectrode)
object to gain access to some basic statistics about the voltage step
"""
max_delta_volume: str = Field(
None,
description="Volume changes in % for a particular voltage step using: "
"max(charge, discharge) / min(charge, discharge) - 1",
)
average_voltage: float = Field(
None, description="The average voltage in V for a particular voltage step.",
)
min_voltage: float = Field(
None, description="The min voltage in V for a particular voltage step.",
)
capacity_grav: float = Field(None, description="Gravimetric capacity in mAh/g.")
capacity_vol: float = Field(None, description="Volumetric capacity in mAh/cc.")
energy_grav: float = Field(
None, description="Gravimetric energy (Specific energy) in Wh/kg."
)
energy_vol: float = Field(
None, description="Volumetric energy (Energy Density) in Wh/l."
)
fracA_charge: float = Field(
None, description="Atomic fraction of the working ion in the charged state."
)
fracA_discharge: float = Field(
None, description="Atomic fraction of the working ion in the discharged state."
)
class InsertionVoltageStep(VoltageStep):
"""
Features specific to insertion electrode
"""
stability_charge: float = Field(
None, description="The energy above hull of the charged material."
)
stability_discharge: float = Field(
None, description="The energy above hull of the discharged material."
)
class InsertionElectrodeDoc(InsertionVoltageStep):
task_id: str = Field(None, description="The id for this battery document.")
host_structure: Structure = Field(
None, description="Host structure (structure without the working ion)",
)
voltage_pairs: List[InsertionVoltageStep] = Field(
None, description="Returns all the Voltage Steps",
)
working_ion: WorkingIon = Field(
None, description="The working ion as an Element object",
)
num_steps: float = Field(
None,
description="The number of distinct voltage steps in from fully charge to "
"discharge based on the stable intermediate states",
)
max_voltage_step: float = Field(
None, description="Maximum absolute difference in adjacent voltage steps"
)
last_updated: datetime = Field(
None,
description="Timestamp for the most recent calculation for this Material document",
)
# Make sure that the datetime field is properly formatted
@validator("last_updated", pre=True)
def last_updated_dict_ok(cls, v):
return MontyDecoder().process_decoded(v)
class ConversionVoltageStep(VoltageStep):
"""
Features specific to conversion electrode
"""
reactions: Dict = Field(
None, description="The reaction the characterizes that particular voltage step."
)
class ConversionElectrode(ConversionVoltageStep):
task_id: str = Field(None, description="The id for this battery document.")
adj_pairs: List[ConversionVoltageStep] = Field(
None, description="Returns all the adjacent Voltage Steps",
)
working_ion: WorkingIon = Field(
None, description="The working ion as an Element object",
)
num_steps: float = Field(
None,
description="The number of distinct voltage steps in from fully charge to "
"discharge based on the stable intermediate states",
)
max_voltage_step: float = Field(
None, description="Maximum absolute difference in adjacent voltage steps"
)
last_updated: datetime = Field(
None,
description="Timestamp for the most recent calculation for this Material document",
)
# Make sure that the datetime field is properly formatted
@validator("last_updated", pre=True)
def last_updated_dict_ok(cls, v):
return MontyDecoder().process_decoded(v)
```
#### File: mp_api/gb/client.py
```python
from typing import List, Optional, Tuple
from collections import defaultdict
import warnings
from mp_api.core.client import BaseRester
from mp_api.gb.models import GBTypeEnum
class GBRester(BaseRester):
suffix = "gb"
def search_gb_docs(
self,
task_ids: Optional[List[str]] = None,
gb_energy: Optional[Tuple[float, float]] = None,
separation_energy: Optional[Tuple[float, float]] = None,
rotation_angle: Optional[Tuple[float, float]] = None,
sigma: Optional[int] = None,
type: Optional[GBTypeEnum] = None,
chemsys: Optional[str] = None,
num_chunks: Optional[int] = None,
chunk_size: int = 100,
fields: Optional[List[str]] = None,
):
"""
Query grain boundary docs using a variety of search criteria.
Arguments:
task_ids (List[str]): List of Materials Project IDs to query with.
gb_energy (Tuple[float,float]): Minimum and maximum grain boundary energy in J/m³ to consider.
separation_energy (Tuple[float,float]): Minimum and maximum work of separation energy in J/m³ to consider.
rotation_angle (Tuple[float,float]): Minimum and maximum rotation angle in degrees to consider.
sigma (int): Sigma value of grain boundary.
type (GBTypeEnum): Grain boundary type.
chemsys (str): Dash-delimited string of elements in the material.
num_chunks (int): Maximum number of chunks of data to yield. None will yield all possible.
chunk_size (int): Number of data entries per chunk.
fields (List[str]): List of fields in GBDoc to return data for.
Default is material_id only.
Yields:
([dict]) List of dictionaries containing data for entries defined in 'fields'.
Defaults to Materials Project IDs and last updated data.
"""
query_params = defaultdict(dict) # type: dict
if chunk_size <= 0 or chunk_size > 100:
warnings.warn("Improper chunk size given. Setting value to 100.")
chunk_size = 100
if task_ids:
query_params.update({"task_ids": ",".join(task_ids)})
if gb_energy:
query_params.update(
{"gb_energy_min": gb_energy[0], "gb_energy_max": gb_energy[1]}
)
if separation_energy:
query_params.update(
{
"w_sep_energy_min": separation_energy[0],
"w_sep_energy_max": separation_energy[1],
}
)
if rotation_angle:
query_params.update(
{
"rotation_angle_min": rotation_angle[0],
"rotation_angle_max": rotation_angle[1],
}
)
if sigma:
query_params.update({"sigma": sigma})
if type:
query_params.update({"type": type})
if chemsys:
query_params.update({"chemsys": chemsys})
if fields:
query_params.update({"fields": ",".join(fields)})
query_params = {
entry: query_params[entry]
for entry in query_params
if query_params[entry] is not None
}
query_params.update({"limit": chunk_size, "skip": 0})
count = 0
while True:
query_params["skip"] = count * chunk_size
results = self.query(query_params).get("data", [])
if not any(results) or (num_chunks is not None and count == num_chunks):
break
count += 1
yield results
```
#### File: mp_api/gb/models.py
```python
from typing import List, Optional
from pydantic import BaseModel, Field, validator
from enum import Enum
from datetime import datetime
from monty.json import MontyDecoder
from mp_api.materials.models.core import Lattice, PeriodicSite, Structure
class GBTypeEnum(Enum):
"""
Grain boundary types
"""
tilt = "tilt"
twist = "twist"
class GrainBoundary(BaseModel):
"""
Model for a pymatgen grain boundary object
"""
charge: Optional[float] = Field(None, description="Total charge")
lattice: Lattice = Field(None, description="Lattice for this structure")
sites: List[PeriodicSite] = Field(
None, description="List of sites in this structure"
)
init_cell: Structure = Field(
None, description="Initial bulk structure to form the GB"
)
rotation_axis: List[int] = Field(None, description="Rotation axis")
rotation_angle: float = Field(None, description="Rotation angle in degrees")
gb_plane: List[int] = Field(None, description="Grain boundary plane")
join_plane: List[int] = Field(
None, description="Joining plane of the second grain",
)
vacuum_thickness: float = Field(
None,
description="The thickness of vacuum inserted between two grains of the GB",
)
ab_shit: List[float] = Field(
None, description="The relative shift along a, b vectors"
)
oriented_unit_cell: Structure = Field(
None, description="Oriented unit cell of the bulk init_cell"
)
class Config:
extra = "allow"
class GBDoc(BaseModel):
"""
Model for a document containing grain boundary data
"""
task_id: str = Field(
None,
description="The Materials Project ID of the material. This comes in the form: mp-******",
)
sigma: int = Field(
None, description="Sigma value of the boundary",
)
type: GBTypeEnum = Field(
None, description="Grain boundary type",
)
rotation_axis: List[int] = Field(
None, description="Rotation axis",
)
gb_plane: List[int] = Field(
None, description="Grain boundary plane",
)
rotation_angle: float = Field(
None, description="Rotation angle in degrees",
)
gb_energy: float = Field(
None, description="Grain boundary energy in J/m^2",
)
initial_structure: GrainBoundary = Field(
None, description="Initial grain boundary structure"
)
final_structure: GrainBoundary = Field(
None, description="Final grain boundary structure"
)
pretty_formula: str = Field(None, description="Reduced formula of the material")
w_sep: float = Field(None, description="Work of separation in J/m^2")
cif: str = Field(None, description="CIF file of the structure")
chemsys: str = Field(
None, description="Dash-delimited string of elements in the material"
)
last_updated: datetime = Field(
None,
description="Timestamp for the most recent calculation for this Material document",
)
# Make sure that the datetime field is properly formatted
@validator("last_updated", pre=True)
def last_updated_dict_ok(cls, v):
return MontyDecoder().process_decoded(v)
```
#### File: mp_api/materials/query_operators.py
```python
from typing import Optional
from fastapi import Query
from mp_api.core.query_operator import STORE_PARAMS, QueryOperator
from mp_api.materials.utils import formula_to_criteria
from mp_api.materials.models.core import CrystalSystem
from pymatgen.core.periodic_table import Element
from collections import defaultdict
class FormulaQuery(QueryOperator):
"""
Factory method to generate a dependency for querying by formula with wild cards
"""
def query(
self,
formula: Optional[str] = Query(
None,
description="Query by formula including anonymized formula or by including wild cards",
),
elements: Optional[str] = Query(
None,
description="Query by elements in the material composition as a comma-separated list",
),
) -> STORE_PARAMS:
crit = {}
if formula:
crit.update(formula_to_criteria(formula))
if elements:
element_list = [Element(e) for e in elements.strip().split(",")]
crit["elements"] = {"$all": [str(el) for el in element_list]}
return {"criteria": crit}
class DeprecationQuery(QueryOperator):
"""
Method to generate a deprecation state query
"""
def query(
self,
deprecated: Optional[bool] = Query(
None,
description="Whether the material is marked as deprecated",
),
) -> STORE_PARAMS:
crit = {}
if deprecated:
crit.update({"deprecated": deprecated})
return {"criteria": crit}
class MinMaxQuery(QueryOperator):
"""
Method to generate a query for quantities with a definable min and max
"""
def query(
self,
nsites_max: Optional[int] = Query(
None,
description="Maximum value for the number of sites",
),
nsites_min: Optional[int] = Query(
None,
description="Minimum value for the number of sites",
),
volume_max: Optional[float] = Query(
None,
description="Maximum value for the cell volume",
),
volume_min: Optional[float] = Query(
None,
description="Minimum value for the cell volume",
),
density_max: Optional[float] = Query(
None,
description="Maximum value for the density",
),
density_min: Optional[float] = Query(
None,
description="Minimum value for the density",
),
) -> STORE_PARAMS:
crit = defaultdict(dict) # type: dict
entries = {
"nsites": [nsites_min, nsites_max],
"volume": [volume_min, volume_max],
"density": [density_min, density_max],
} # type: dict
for entry in entries:
if entries[entry][0]:
crit[entry]["$gte"] = entries[entry][0]
if entries[entry][1]:
crit[entry]["$lte"] = entries[entry][1]
return {"criteria": crit}
class SymmetryQuery(QueryOperator):
"""
Method to generate a query on symmetry information
"""
def query(
self,
crystal_system: Optional[CrystalSystem] = Query(
None,
description="Crystal system of the material",
),
spacegroup_number: Optional[int] = Query(
None,
description="Space group number of the material",
),
spacegroup_symbol: Optional[str] = Query(
None,
description="Space group symbol of the material",
),
) -> STORE_PARAMS:
crit = {} # type: dict
if crystal_system:
crit.update({"symmetry.crystal_system": str(crystal_system.value)})
if spacegroup_number:
crit.update({"symmetry.number": spacegroup_number})
if spacegroup_symbol:
crit.update({"symmetry.symbol": spacegroup_symbol})
return {"criteria": crit}
class MultiTaskIDQuery(QueryOperator):
"""
Method to generate a query for different task_ids
"""
def query(
self,
task_ids: Optional[str] = Query(
None, description="Comma-separated list of task_ids to query on"
),
) -> STORE_PARAMS:
crit = {}
if task_ids:
crit.update({"task_ids": {"$in": task_ids.split(",")}})
return {"criteria": crit}
```
#### File: mp_api/molecules/resources.py
```python
from mp_api.core.resource import Resource
from mp_api.molecules.models import MoleculesDoc
from mp_api.core.query_operator import PaginationQuery, SortQuery, SparseFieldsQuery
from mp_api.molecules.query_operators import (
MoleculeBaseQuery,
MoleculeElementsQuery,
MoleculeFormulaQuery,
)
from mp_api.search.query_operators import SearchTaskIDsQuery
def molecules_resource(molecules_store):
resource = Resource(
molecules_store,
MoleculesDoc,
query_operators=[
MoleculeBaseQuery(),
MoleculeElementsQuery(),
MoleculeFormulaQuery(),
SearchTaskIDsQuery(),
SortQuery(),
PaginationQuery(),
SparseFieldsQuery(MoleculesDoc, default_fields=["task_id"]),
],
tags=["Molecules"],
)
return resource
```
#### File: mp_api/robocrys/resources.py
```python
from fastapi import Query
from mp_api.core.resource import Resource
from mp_api.robocrys.models import RobocrysDoc
def robo_resource(robo_store):
def custom_robo_prep(self):
async def query_robo_text(
keywords: str = Query(
...,
description="Comma delimited string keywords to search robocrystallographer description text with",
),
skip: int = Query(0, description="Number of entries to skip in the search"),
limit: int = Query(
100,
description="Max number of entries to return in a single query. Limited to 100",
),
):
pipeline = [
{
"$search": {
"index": "description",
"regex": {
"query": [word + ".*" for word in keywords.split(",")],
"path": "description",
"allowAnalyzedField": True,
},
}
},
{
"$project": {
"_id": 0,
"task_id": 1,
"description": 1,
"condensed_structure": 1,
"last_updates": 1,
"search_score": {"$meta": "searchScore"},
}
},
{"$sort": {"search_score": -1}},
{"$skip": skip},
{"$limit": limit},
]
self.store.connect()
data = list(self.store._collection.aggregate(pipeline, allowDiskUse=True))
response = {"data": data}
return response
self.router.get(
"/text_search/",
response_model=self.response_model,
response_model_exclude_unset=True,
response_description="Find robocrystallographer documents through text search.",
tags=self.tags,
)(query_robo_text)
resource = Resource(
robo_store,
RobocrysDoc,
tags=["Robocrystallographer"],
custom_endpoint_funcs=[custom_robo_prep],
enable_default_search=False,
)
return resource
```
#### File: mp_api/substrates/client.py
```python
from typing import List, Optional, Tuple
from collections import defaultdict
import warnings
from mp_api.core.client import BaseRester
class SubstratesRester(BaseRester):
suffix = "substrates"
def search_substrates_docs(
self,
film_id: Optional[str] = None,
substrate_id: Optional[str] = None,
substrate_formula: Optional[str] = None,
film_orientation: Optional[List[int]] = None,
substrate_orientation: Optional[List[int]] = None,
area: Optional[Tuple[float, float]] = None,
energy: Optional[Tuple[float, float]] = None,
num_chunks: Optional[int] = None,
chunk_size: int = 100,
fields: Optional[List[str]] = None,
):
"""
Query equations of state docs using a variety of search criteria.
Arguments:
film_id (str): Materials Project ID of the film material.
substrate_id (str): Materials Project ID of the substrate material.
substrate_formula (str): Reduced formula of the substrate material.
film_orientation (List[int]): Vector indicating the surface orientation of the film material.
substrate_orientation (List[int]): Vector indicating the surface orientation of the substrate material.
area (Tuple[float,float]): Minimum and maximum volume in Ų to consider for the minimim coincident
interface area range.
energy (Tuple[float,float]): Minimum and maximum energy in meV to consider for the elastic energy range.
num_chunks (int): Maximum number of chunks of data to yield. None will yield all possible.
chunk_size (int): Number of data entries per chunk.
fields (List[str]): List of fields in SubstratesDoc to return data for.
Default is the film_id and substrate_id only.
Yields:
([dict]) List of dictionaries containing data for entries defined in 'fields'.
Defaults to Materials Project IDs for the film and substrate only.
"""
query_params = defaultdict(dict) # type: dict
if chunk_size <= 0 or chunk_size > 100:
warnings.warn("Improper chunk size given. Setting value to 100.")
chunk_size = 100
if film_id:
query_params.update({"film_id": film_id})
if substrate_id:
query_params.update({"substrate_id": substrate_id})
if substrate_formula:
query_params.update({"substrate_formula": substrate_formula})
if film_orientation:
query_params.update(
{"film_orientation": ",".join([str(i) for i in film_orientation])}
)
if substrate_orientation:
query_params.update(
{
"substrate_orientation": ",".join(
[str(i) for i in substrate_orientation]
)
}
)
if area:
query_params.update({"area_min": area[0], "area_max": area[1]})
if energy:
query_params.update({"energy_min": energy[0], "energy_max": energy[1]})
if fields:
query_params.update({"fields": ",".join(fields)})
query_params = {
entry: query_params[entry]
for entry in query_params
if query_params[entry] is not None
}
query_params.update({"limit": chunk_size, "skip": 0})
count = 0
while True:
query_params["skip"] = count * chunk_size
results = self.query(query_params).get("data", [])
if not any(results) or (num_chunks is not None and count == num_chunks):
break
count += 1
yield results
```
#### File: mp_api/surface_properties/client.py
```python
from typing import List, Optional, Tuple
from collections import defaultdict
import warnings
from mp_api.core.client import BaseRester, MPRestError
class SurfacePropertiesRester(BaseRester):
suffix = "surface_properties"
def get_surface_properties_from_material_id(self, material_id: str):
"""
Get surface properties data for a given Materials Project ID.
Arguments:
material_id (str): Materials project ID
Returns:
results (Dict): Dictionary containing surface properties data.
"""
result = self._make_request("{}/?all_fields=true".format(material_id))
if len(result.get("data", [])) > 0:
return result
else:
raise MPRestError("No document found")
def search_surface_properties_docs(
self,
weighted_surface_energy: Optional[Tuple[float, float]] = None,
weighted_work_function: Optional[Tuple[float, float]] = None,
surface_energy_anisotropy: Optional[Tuple[float, float]] = None,
shape_factor: Optional[Tuple[float, float]] = None,
has_reconstructed: Optional[bool] = None,
num_chunks: Optional[int] = None,
chunk_size: int = 100,
fields: Optional[List[str]] = None,
):
"""
Query surface properties docs using a variety of search criteria.
Arguments:
weighted_surface_energy (Tuple[float,float]): Minimum and maximum weighted surface energy in J/m² to
consider.
weighted_work_function (Tuple[float,float]): Minimum and maximum weighted work function in eV to consider.
surface_energy_anisotropy (Tuple[float,float]): Minimum and maximum surface energy anisotropy values to
consider.
shape_factor (Tuple[float,float]): Minimum and maximum shape factor values to consider.
has_reconstructed (bool): Whether the entry has any reconstructed surfaces.
num_chunks (int): Maximum number of chunks of data to yield. None will yield all possible.
chunk_size (int): Number of data entries per chunk.
fields (List[str]): List of fields in EOSDoc to return data for.
Default is material_id only.
Yields:
([dict]) List of dictionaries containing data for entries defined in 'fields'.
Defaults to Materials Project IDs only.
"""
query_params = defaultdict(dict) # type: dict
if chunk_size <= 0 or chunk_size > 100:
warnings.warn("Improper chunk size given. Setting value to 100.")
chunk_size = 100
if weighted_surface_energy:
query_params.update(
{
"weighted_surface_energy_min": weighted_surface_energy[0],
"weighted_surface_energy_max": weighted_surface_energy[1],
}
)
if weighted_work_function:
query_params.update(
{
"weighted_work_function_min": weighted_work_function[0],
"weighted_work_function_max": weighted_work_function[1],
}
)
if surface_energy_anisotropy:
query_params.update(
{
"surface_anisotropy_min": surface_energy_anisotropy[0],
"surface_anisotropy_max": surface_energy_anisotropy[1],
}
)
if shape_factor:
query_params.update(
{
"shape_factor_min": shape_factor[0],
"shape_factor_max": shape_factor[1],
}
)
if has_reconstructed:
query_params.update({"has_reconstructed": has_reconstructed})
if fields:
query_params.update({"fields": ",".join(fields)})
query_params = {
entry: query_params[entry]
for entry in query_params
if query_params[entry] is not None
}
query_params.update({"limit": chunk_size, "skip": 0})
count = 0
while True:
query_params["skip"] = count * chunk_size
results = self.query(query_params).get("data", [])
if not any(results) or (num_chunks is not None and count == num_chunks):
break
count += 1
yield results
```
#### File: mp_api/synthesis/query_operators.py
```python
from typing import Optional
from fastapi import Query
from mp_api.core.query_operator import STORE_PARAMS, QueryOperator
from pymatgen.core import Composition
from collections import defaultdict
class SynthFormulaQuery(QueryOperator):
"""
Method to generate a query for synthesis data using a chemical formula
"""
def query(
self,
formula: Optional[str] = Query(
None,
description="Chemical formula of the material.",
),
) -> STORE_PARAMS:
crit = defaultdict(dict) # type: dict
if formula:
reduced_formula = Composition(formula).get_reduced_formula_and_factor()[0]
crit["formula"] = reduced_formula
return {"criteria": crit}
```
#### File: mp_api/wulff/resources.py
```python
from mp_api.core.resource import Resource
from mp_api.wulff.models import WulffDoc
from mp_api.core.query_operator import PaginationQuery, SparseFieldsQuery
def wulff_resource(wulff_store):
resource = Resource(
wulff_store,
WulffDoc,
query_operators=[
PaginationQuery(),
SparseFieldsQuery(WulffDoc, default_fields=["task_id"]),
],
tags=["Surface Properties"],
enable_default_search=False,
)
return resource
```
#### File: mp_api/xas/client.py
```python
from typing import List, Optional
from pymatgen.core.periodic_table import Element
from mp_api.core.client import BaseRester, MPRestError
from mp_api.xas.models import Edge, XASType
class XASRester(BaseRester):
suffix = "xas"
def get_available_elements(
self,
edge: Optional[Edge] = None,
spectrum_type: Optional[XASType] = None,
absorbing_element: Optional[Element] = None,
required_elements: Optional[List[Element]] = None,
):
return [str(e) for e in Element]
def get_xas_doc(self, xas_id: str):
# TODO do some checking here for sub-components
query_params = {"all_fields": True}
result = self._query_resource(query_params, suburl=xas_id)
if len(result.get("data", [])) > 0:
return result["data"][0]
else:
raise MPRestError("No document found")
def search_xas_docs(
self,
edge: Optional[Edge] = None,
absorbing_element: Optional[Element] = None,
required_elements: Optional[List[Element]] = None,
formula: Optional[str] = None,
task_ids: Optional[List[str]] = None,
num_chunks: Optional[int] = None,
chunk_size: int = 100,
fields: Optional[List[str]] = None,
):
query_params = {
"edge": str(edge.value) if edge else None,
"absorbing_element": str(absorbing_element) if absorbing_element else None,
"formula": formula,
} # type: dict
if task_ids is not None:
query_params["task_ids"] = ",".join(task_ids)
if required_elements:
query_params["elements"] = ",".join([str(el) for el in required_elements])
if fields is not None:
query_params["fields"] = ",".join(fields)
query_params.update({"limit": chunk_size, "skip": 0})
count = 0
while True:
query_params["skip"] = count * chunk_size
results = self._query_resource(query_params).get("data", [])
if not any(results) or (num_chunks is not None and count == num_chunks):
break
count += 1
yield results
def count_xas_docs(
self,
edge: Optional[Edge] = None,
absorbing_element: Optional[Element] = None,
required_elements: Optional[List[Element]] = None,
formula: Optional[str] = None,
):
query_params = {
"edge": str(edge.value) if edge else None,
"absorbing_element": str(absorbing_element) if absorbing_element else None,
"formula": formula,
}
if required_elements:
query_params["elements"] = ",".join([str(el) for el in required_elements])
query_params["limit"] = "1"
result = self._query_resource(query_params)
return result.get("meta", {}).get("total", 0)
```
#### File: mp_api/xas/resources.py
```python
from mp_api.core.resource import Resource
from mp_api.xas.models import XASDoc
from mp_api.core.query_operator import PaginationQuery, SortQuery, SparseFieldsQuery
from mp_api.materials.query_operators import FormulaQuery
from mp_api.xas.query_operator import XASQuery, XASTaskIDQuery
def xas_resource(xas_store):
resource = Resource(
xas_store,
XASDoc,
query_operators=[
FormulaQuery(),
XASQuery(),
XASTaskIDQuery(),
SortQuery(),
PaginationQuery(),
SparseFieldsQuery(
XASDoc,
default_fields=[
"xas_id",
"task_id",
"edge",
"absorbing_element",
"formula_pretty",
"spectrum_type",
"last_updated",
],
),
],
tags=["XAS"],
)
return resource
```
#### File: tests/core/test_xray_middleware.py
```python
import asyncio
from aws_xray_sdk import global_sdk_config
import pytest
from starlette.applications import Starlette
from starlette.requests import Request
from starlette.responses import PlainTextResponse
from starlette.exceptions import HTTPException
from starlette.testclient import TestClient
from starlette.routing import Route
from starlette.middleware import Middleware
from aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter
from aws_xray_sdk.core.async_context import AsyncContext
from aws_xray_sdk.core.models import http
from .xray_utils import get_new_stubbed_recorder
from mp_api.core.xray_middleware import XRayMiddleware
class CustomStubbedEmitter(UDPEmitter):
"""
Custom stubbed emitter which stores all segments instead of the last one
"""
def __init__(self, daemon_address="127.0.0.1:2000"):
super(CustomStubbedEmitter, self).__init__(daemon_address)
self.local = []
def send_entity(self, entity):
self.local.append(entity)
def pop(self):
try:
return self.local.pop(0)
except IndexError:
return None
class ServerTest(object):
async def handle_ok(self, request: Request) -> PlainTextResponse:
"""
Handle / request
"""
if "content_length" in request.query_params:
headers = {"Content-Length": request.query_params["content_length"]}
else:
headers = None
return PlainTextResponse(content="ok", headers=headers)
async def handle_error(self, request: Request) -> PlainTextResponse:
"""
Handle /error which returns a 404
"""
return PlainTextResponse(content="not found", status_code=404)
async def handle_unauthorized(self, request: Request) -> PlainTextResponse:
"""
Handle /unauthorized which returns a 401
"""
raise HTTPException(status_code=401)
async def handle_exception(self, request: Request) -> PlainTextResponse:
"""
Handle /exception which raises a KeyError
"""
return {}["key"]
async def handle_delay(self, request: Request) -> PlainTextResponse:
"""
Handle /delay request
"""
await asyncio.sleep(0.3, loop=self._loop)
return PlainTextResponse(content="ok")
@pytest.fixture
def recorder():
"""
Clean up context storage before and after each test run
"""
loop = asyncio.get_event_loop()
xray_recorder = get_new_stubbed_recorder()
xray_recorder.configure(
service="test", sampling=False, context=AsyncContext(loop=loop)
)
xray_recorder.clear_trace_entities()
yield xray_recorder
global_sdk_config.set_sdk_enabled(True)
xray_recorder.clear_trace_entities()
@pytest.fixture
def client(recorder):
test_object = ServerTest()
routes = [
Route("/", test_object.handle_ok),
Route("/error", test_object.handle_error),
Route("/exception", test_object.handle_exception),
Route("/unauthorized", test_object.handle_unauthorized),
Route("/delay", test_object.handle_delay),
]
app = Starlette(
routes=routes, middleware=[Middleware(XRayMiddleware, recorder=recorder)]
)
return TestClient(app=app)
def test_ok_reg(client, recorder):
"""
Test a normal response
:param client: AioHttp test client fixture
:param loop: Eventloop fixture
:param recorder: X-Ray recorder fixture
"""
resp = client.get("/")
assert resp.status_code == 200
segment = recorder.emitter.pop()
assert not segment.in_progress
request = segment.http["request"]
response = segment.http["response"]
assert request["method"] == "GET"
assert request["url"] == "http://testserver/"
assert response["status"] == 200
def test_ok_x_forwarded_for(client, recorder):
"""
Test a normal response with x_forwarded_for headers
:param test_client: AioHttp test client fixture
:param loop: Eventloop fixture
:param recorder: X-Ray recorder fixture
"""
resp = client.get("/", headers={"X-Forwarded-For": "foo"})
assert resp.status_code == 200
segment = recorder.emitter.pop()
assert segment.http["request"]["client_ip"] == "foo"
assert segment.http["request"]["x_forwarded_for"]
def test_ok_content_length(client, recorder):
"""
Test a normal response with content length as response header
:param test_client: AioHttp test client fixture
:param loop: Eventloop fixture
:param recorder: X-Ray recorder fixture
"""
resp = client.get("/?content_length=100")
assert resp.status_code == 200
segment = recorder.emitter.pop()
assert segment.http["response"]["content_length"] == 100
def test_error(client, recorder):
"""
Test a 4XX response
:param test_client: AioHttp test client fixture
:param loop: Eventloop fixture
:param recorder: X-Ray recorder fixture
"""
resp = client.get("/error")
assert resp.status_code == 404
segment = recorder.emitter.pop()
assert not segment.in_progress
assert segment.error
request = segment.http["request"]
response = segment.http["response"]
assert request["method"] == "GET"
assert request["url"] == "http://testserver/error"
assert request["client_ip"] == "testclient:50000"
assert response["status"] == 404
def test_exception(client, recorder):
"""
Test handling an exception
:param test_client: AioHttp test client fixture
:param loop: Eventloop fixture
:param recorder: X-Ray recorder fixture
"""
exc = None
try:
client.get("/exception")
except Exception as e:
exc = e
assert exc is not None
segment = recorder.emitter.pop()
assert not segment.in_progress
assert segment.fault
request = segment.http["request"]
response = segment.http["response"]
exception = segment.cause["exceptions"][0]
assert request["method"] == "GET"
assert request["url"] == "http://testserver/exception"
assert request["client_ip"] == "testclient:50000"
assert response["status"] == 500
assert exception.type == "KeyError"
def test_unhauthorized(client, recorder):
"""
Test a 401 response
:param test_client: AioHttp test client fixture
:param loop: Eventloop fixture
:param recorder: X-Ray recorder fixture
"""
resp = client.get("/unauthorized")
assert resp.status_code == 401
segment = recorder.emitter.pop()
assert not segment.in_progress
assert segment.error
request = segment.http["request"]
response = segment.http["response"]
assert request["method"] == "GET"
assert request["url"] == "http://testserver/unauthorized"
assert request["client_ip"] == "testclient:50000"
assert response["status"] == 401
def test_response_trace_header(client, recorder):
resp = client.get("/")
xray_header = resp.headers[http.XRAY_HEADER]
segment = recorder.emitter.pop()
expected = "Root=%s" % segment.trace_id
assert expected in xray_header
@pytest.mark.asyncio
async def test_concurrent(client, recorder):
"""
Test multiple concurrent requests
:param test_client: AioHttp test client fixture
:param loop: Eventloop fixture
:param recorder: X-Ray recorder fixture
"""
recorder.emitter = CustomStubbedEmitter()
async def get_delay():
resp = await client.get("/delay")
assert resp.status_code == 200
await asyncio.wait(
[
get_delay(),
get_delay(),
get_delay(),
get_delay(),
get_delay(),
get_delay(),
get_delay(),
get_delay(),
get_delay(),
]
)
# Ensure all ID's are different
ids = [item.id for item in recorder.emitter.local]
assert len(ids) == len(set(ids))
def test_disabled_sdk(client, recorder):
"""
Test a normal response when the SDK is disabled.
:param test_client: AioHttp test client fixture
:param loop: Eventloop fixture
:param recorder: X-Ray recorder fixture
"""
global_sdk_config.set_sdk_enabled(False)
resp = client.get("/")
assert resp.status_code == 200
segment = recorder.emitter.pop()
assert not segment
``` |
{
"source": "jmmshn/custodian",
"score": 2
} |
#### File: custodian/cp2k/interpreter.py
```python
from pymatgen.io.cp2k.inputs import Cp2kInput
from custodian.ansible.actions import FileActions, DictActions
from custodian.ansible.interpreter import Modder
from custodian.cp2k.utils import cleanup_input
__author__ = "<NAME>"
__version__ = "1.0"
__email__ = "<EMAIL>"
__date__ = "October 2021"
class Cp2kModder(Modder):
"""
Cp2kModder is a lightweight class for applying modifications to cp2k input files. It
also supports modifications that are file operations (e.g. copying).
"""
def __init__(self, filename="cp2k.inp", actions=None, strict=True, ci=None):
"""
Initializes a Modder for Cp2kInput sets
Args:
filename (str): name of cp2k input file to modify. This file will be overwritten
if actions are applied.
actions ([Action]): A sequence of supported actions. See
:mod:`custodian.ansible.actions`. Default is None,
which means DictActions and FileActions are supported.
strict (bool): Indicating whether to use strict mode. In non-strict
mode, unsupported actions are simply ignored without any
errors raised. In strict mode, if an unsupported action is
supplied, a ValueError is raised. Defaults to True.
ci (Cp2kInput): A Cp2kInput object from the current directory.
Initialized automatically if not passed (but passing it will
avoid having to reparse the directory).
"""
self.ci = ci or Cp2kInput.from_file(filename)
self.filename = filename
actions = actions or [FileActions, DictActions]
super().__init__(actions, strict)
def apply_actions(self, actions):
"""
Applies a list of actions to the CP2K Input Set and rewrites modified
files.
Args:
actions [dict]: A list of actions of the form {'file': filename,
'action': moddermodification} or {'dict': cp2k_key,
'action': moddermodification}
"""
modified = []
for a in actions:
if "dict" in a:
k = a["dict"]
modified.append(k)
Cp2kModder._modify(a["action"], self.ci)
elif "file" in a:
self.modify(a["action"], a["file"])
self.ci = Cp2kInput.from_file(self.filename)
else:
raise ValueError(f"Unrecognized format: {a}")
cleanup_input(self.ci)
self.ci.write_file(self.filename)
@staticmethod
def _modify(modification, obj):
"""
Note that modify makes actual in-place modifications. It does not
return a copy.
Args:
modification (dict): Modification must be {action_keyword :
settings}. E.g., {'_set': {'Hello':'Universe', 'Bye': 'World'}}
obj (dict/str/object): Object to modify depending on actions. For
example, for DictActions, obj will be a dict to be modified.
For FileActions, obj will be a string with a full pathname to a
file.
"""
modification = list(modification.items()) if isinstance(modification, dict) else modification
for action, settings in modification:
try:
getattr(obj, action[1:])(settings)
except KeyError:
continue
```
#### File: custodian/cp2k/jobs.py
```python
from __future__ import unicode_literals, division
import subprocess
import os
import shutil
import logging
from monty.shutil import decompress_dir
from monty.os.path import zpath
from pymatgen.io.cp2k.inputs import Cp2kInput, Keyword
from custodian.custodian import Job
from custodian.cp2k.interpreter import Cp2kModder
from custodian.cp2k.utils import restart, cleanup_input
logger = logging.getLogger(__name__)
__author__ = "<NAME>"
__version__ = "1.0"
CP2K_INPUT_FILES = ["cp2k.inp"]
CP2K_OUTPUT_FILES = ["cp2k.out"]
class Cp2kJob(Job):
"""
A basic cp2k job. Just runs whatever is in the directory. But conceivably
can be a complex processing of inputs etc. with initialization.
"""
def __init__(
self,
cp2k_cmd,
input_file="cp2k.inp",
output_file="cp2k.out",
stderr_file="std_err.txt",
suffix="",
final=True,
backup=True,
settings_override=None,
restart=False,
):
"""
This constructor is necessarily complex due to the need for
flexibility. For standard kinds of runs, it's often better to use one
of the static constructors. The defaults are usually fine too.
Args:
cp2k_cmd (list): Command to run cp2k as a list of args. For example,
if you are using mpirun, it can be something like
["mpirun", "cp2k.popt"]
input_file (str): Name of the file to use as input to CP2K
executable. Defaults to "cp2k.inp"
output_file (str): Name of file to direct standard out to.
Defaults to "cp2k.out".
stderr_file (str): Name of file to direct standard error to.
Defaults to "std_err.txt".
suffix (str): A suffix to be appended to the final output. E.g.,
to rename all CP2K output from say cp2k.out to
cp2k.out.relax1, provide ".relax1" as the suffix.
final (bool): Indicating whether this is the final cp2k job in a
series. Defaults to True.
backup (bool): Whether to backup the initial input files. If True,
the input file will be copied with a
".orig" appended. Defaults to True.
settings_override ([actions]): A list of actions. See the Cp2kModder
in interpreter.py
restart (bool): Whether to run in restart mode, i.e. this a continuation of
a previous calculation. Default is False.
"""
self.cp2k_cmd = cp2k_cmd
self.input_file = input_file
self.ci = None
self.output_file = output_file
self.stderr_file = stderr_file
self.final = final
self.backup = backup
self.suffix = suffix
self.settings_override = settings_override if settings_override else []
self.restart = restart
def setup(self):
"""
Performs initial setup for Cp2k in three stages. First, if custodian is running in restart mode, then
the restart function will copy the restart file to self.input_file, and remove any previous WFN initialization
if present. Second, any additional user specified settings will be applied. Lastly, a backup of the input
file will be made for reference.
"""
decompress_dir(".")
self.ci = Cp2kInput.from_file(zpath(self.input_file))
cleanup_input(self.ci)
if self.restart:
restart(
actions=self.settings_override,
output_file=self.output_file,
input_file=self.input_file,
no_actions_needed=True,
)
if self.settings_override or self.restart:
modder = Cp2kModder(filename=self.input_file, actions=[], ci=self.ci)
modder.apply_actions(self.settings_override)
if self.backup:
shutil.copy(self.input_file, f"{self.input_file}.orig")
def run(self):
"""
Perform the actual CP2K run.
Returns:
(subprocess.Popen) Used for monitoring.
"""
# TODO: cp2k has bizarre in/out streams. Some errors that should go to std_err are not sent anywhere...
cmd = list(self.cp2k_cmd)
cmd.extend(["-i", self.input_file])
cmdstring = " ".join(cmd)
logger.info(f"Running {cmdstring}")
with open(self.output_file, "w") as f_std, open(self.stderr_file, "w", buffering=1) as f_err:
# use line buffering for stderr
return subprocess.Popen(cmd, stdout=f_std, stderr=f_err, shell=False)
# TODO double jobs, file manipulations, etc. should be done in atomate in the future
# and custodian should only run the job itself
def postprocess(self):
"""
Postprocessing includes renaming and gzipping where necessary.
"""
fs = os.listdir(".")
if os.path.exists(self.output_file):
if self.suffix != "":
os.mkdir(f"run{self.suffix}")
for f in fs:
if "json" in f:
continue
if not os.path.isdir(f):
if self.final:
shutil.move(f, f"run{self.suffix}/{f}")
else:
shutil.copy(f, f"run{self.suffix}/{f}")
# Remove continuation so if a subsequent job is run in
# the same directory, will not restart this job.
if os.path.exists("continue.json"):
os.remove("continue.json")
def terminate(self):
"""
Terminate cp2k
"""
for k in self.cp2k_cmd:
if "cp2k" in k:
try:
os.system(f"killall {k}")
except Exception:
pass
@classmethod
def gga_static_to_hybrid(
cls,
cp2k_cmd,
input_file="cp2k.inp",
output_file="cp2k.out",
stderr_file="std_err.txt",
backup=True,
settings_override_gga=None,
settings_override_hybrid=None,
):
"""
A bare gga to hybrid calculation. Removes all unecessary features
from the gga run, and making it only a ENERGY/ENERGY_FORCE
depending on the hybrid run.
"""
job1_settings_override = [
{
"dict": input_file,
"action": {
"_unset": {"FORCE_EVAL": {"DFT": "XC"}},
"_set": {"GLOBAL": {"PROJECT_NAME": "GGA", "RUN_TYPE": "ENERGY_FORCE"}},
},
},
{
"dict": input_file,
"action": {"_set": {"FORCE_EVAL": {"DFT": {"XC": {"XC_FUNCTIONAL": {"PBE": {}}}}}}},
},
]
job1 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=False,
suffix="1",
settings_override=job1_settings_override,
)
ci = Cp2kInput.from_file(zpath(input_file))
r = ci["global"].get("run_type", Keyword("RUN_TYPE", "ENERGY_FORCE")).values[0]
if r in ["ENERGY", "WAVEFUNCTION_OPTIMIZATION", "WFN_OPT", "ENERGY_FORCE"]: # no need for double job
return [job1]
job2_settings_override = [
{
"dict": input_file,
"action": {
"_set": {
"FORCE_EVAL": {
"DFT": {
"XC": {
"HF": {
"SCREENING": {
"SCREEN_ON_INITIAL_P": True,
"SCREEN_P_FORCES": True,
}
}
},
"WFN_RESTART_FILE_NAME": "GGA-RESTART.wfn",
}
},
"GLOBAL": {"RUN_TYPE": r},
},
},
}
]
job2 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=True,
suffix="2",
restart=False,
settings_override=job2_settings_override,
)
return [job1, job2]
@classmethod
def double_job(
cls, cp2k_cmd, input_file="cp2k.inp", output_file="cp2k.out", stderr_file="std_err.txt", backup=True
):
"""
This creates a sequence of two jobs. The first of which is an "initialization" of the
wfn. Using this, the "restart" function can be exploited to determine if a diagonalization
job can/would benefit from switching to OT scheme. If not, then the second job remains a
diagonalization job, and there is minimal overhead from restarting.
"""
job1 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=False,
suffix="1",
settings_override={},
)
ci = Cp2kInput.from_file(zpath(input_file))
r = ci["global"].get("run_type", Keyword("RUN_TYPE", "ENERGY_FORCE")).values[0]
if r not in ["ENERGY", "WAVEFUNCTION_OPTIMIZATION", "WFN_OPT"]:
job1.settings_override = [
{"dict": input_file, "action": {"_set": {"GLOBAL": {"RUN_TYPE": "ENERGY_FORCE"}}}}
]
job2 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=True,
suffix="2",
restart=True,
)
job2.settings_override = [{"dict": input_file, "action": {"_set": {"GLOBAL": {"RUN_TYPE": r}}}}]
return [job1, job2]
@classmethod
def pre_screen_hybrid(
cls, cp2k_cmd, input_file="cp2k.inp", output_file="cp2k.out", stderr_file="std_err.txt", backup=True
):
"""
Build a job where the first job is an unscreened hybrid static calculation, then the second one
uses the wfn from the first job as a restart to do a screened calculation.
"""
job1_settings_override = [
{
"dict": input_file,
"action": {
"_set": {
"FORCE_EVAL": {
"DFT": {
"XC": {
"HF": {
"SCREENING": {
"SCREEN_ON_INITIAL_P": False,
"SCREEN_P_FORCES": False,
}
}
}
}
},
"GLOBAL": {"RUN_TYPE": "ENERGY_FORCE"},
}
},
}
]
job1 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=False,
suffix="1",
settings_override=job1_settings_override,
)
ci = Cp2kInput.from_file(zpath(input_file))
r = ci["global"].get("run_type", Keyword("RUN_TYPE", "ENERGY_FORCE")).values[0]
if r in ["ENERGY", "WAVEFUNCTION_OPTIMIZATION", "WFN_OPT", "ENERGY_FORCE"]: # no need for double job
return [job1]
job2_settings_override = [
{
"dict": input_file,
"action": {
"_set": {
"FORCE_EVAL": {
"DFT": {
"XC": {
"HF": {
"SCREENING": {
"SCREEN_ON_INITIAL_P": True,
"SCREEN_P_FORCES": True,
}
}
},
"WFN_RESTART_FILE_NAME": "UNSCREENED_HYBRID-RESTART.wfn",
}
},
"GLOBAL": {"RUN_TYPE": r},
},
},
}
]
job2 = Cp2kJob(
cp2k_cmd,
input_file=input_file,
output_file=output_file,
backup=backup,
stderr_file=stderr_file,
final=True,
suffix="2",
restart=False,
settings_override=job2_settings_override,
)
return [job1, job2]
```
#### File: custodian/cp2k/utils.py
```python
import os
import itertools
from collections import deque
from pymatgen.io.cp2k.inputs import Cp2kInput
from pymatgen.io.cp2k.outputs import Cp2kOutput
def restart(actions, output_file, input_file, no_actions_needed=False):
"""
Helper function. To discard old restart if convergence is already good, and copy
the restart file to the input file. Restart also supports switching back and forth
between OT and diagonalization as needed based on convergence behavior. If OT is not
being used and a band gap exists, then OT will be activated.
Args:
actions (list): list of actions that the handler is going to return to custodian. If
no actions are present, then non are added by this function
output_file (str): the cp2k output file name.
input_file (str): the cp2k input file name.
"""
if actions or no_actions_needed:
o = Cp2kOutput(output_file)
ci = Cp2kInput.from_file(input_file)
restart_file = o.filenames.get("restart")
restart_file = restart_file[-1] if restart_file else None
if ci.check("force_eval/dft"):
wfn_restart = ci["force_eval"]["dft"].get("wfn_restart_file_name")
else:
wfn_restart = None
# If convergence is already pretty good, or we have moved to a new ionic step,
# discard the old WFN
if wfn_restart:
conv = get_conv(output_file)
if (conv and conv[-1] <= 1e-5) or restart_file:
actions.append(
{"dict": input_file, "action": {"_unset": {"FORCE_EVAL": {"DFT": "WFN_RESTART_FILE_NAME"}}}}
)
# If issues arose after some ionic steps and corrections are possible
# then switch the restart file to the input file.
if restart_file:
actions.insert(
0,
{
"file": os.path.abspath(restart_file),
"action": {"_file_copy": {"dest": os.path.abspath(input_file)}},
},
)
# TODO Not sure I like this solution
def cleanup_input(ci):
"""
Intention is to use this to remove problematic parts of the input file.
(1) The "POTENTIAL" section within KIND cannot be empty, but the number
sequences used inside do not play nice with the input parser
"""
if not hasattr(ci, "subsections") or not ci.subsections:
return
if any(k.upper() == "POTENTIAL" for k in ci.subsections):
ci.subsections.pop("POTENTIAL")
for k, v in ci.subsections.items():
cleanup_input(v)
def activate_ot(actions, ci):
"""
Activate OT scheme.
actions (list):
list of actions that are being applied. Will be modified in-place
ci (Cp2kInput):
Cp2kInput object, used to coordinate settings
"""
eps_scf = ci["force_eval"]["dft"]["scf"]["eps_scf"]
ot_actions = [
{
"dict": "cp2k.inp",
"action": [
(
"_unset",
{"FORCE_EVAL": {"DFT": "SCF"}},
)
],
},
{
"dict": "cp2k.inp",
"action": [
(
"_set",
{
"FORCE_EVAL": {
"DFT": {
"SCF": {
"MAX_SCF": 20,
"OT": {
"ENERGY_GAP": 0.01,
"ALGORITHM": "STRICT",
"PRECONDITIONER": "FULL_ALL",
"MINIMIZER": "DIIS",
"LINESEARCH": "2PNT",
},
"OUTER_SCF": {"MAX_SCF": 20, "EPS_SCF": eps_scf},
}
}
}
},
)
],
},
]
actions.extend(ot_actions)
def activate_diag(actions):
"""
Activate diagonalization
actions (list):
list of actions that are being applied. Will be modified in-place
"""
diag_actions = [
{"dict": "cp2k.inp", "action": ("_unset", {"FORCE_EVAL": {"DFT": {"SCF": "OT"}}})},
{"dict": "cp2k.inp", "action": ("_unset", {"FORCE_EVAL": {"DFT": {"SCF": "OUTER_SCF"}}})},
{
"dict": "cp2k.inp",
"action": (
"_set",
{
"FORCE_EVAL": {
"DFT": {
"SCF": {
"MAX_SCF": 200,
"ADDED_MOS": 100, # TODO needs to be dynamic value
"MAX_DIIS": 15,
"DIAGONALIZATION": {},
"MIXING": {"ALPHA": 0.05},
"SMEAR": {"ELEC_TEMP": 300, "METHOD": "FERMI_DIRAC"},
}
}
}
},
),
},
]
actions.extend(diag_actions)
def can_use_ot(output, ci, minimum_band_gap=0.1):
"""
Check whether OT can be used:
OT should not already be activated
The output should show that the system has a band gap that is greater than minimum_band_gap
Args:
output (Cp2kOutput): cp2k output object for determining band gap
ci (Cp2kInput): cp2k input object for determining if OT is already active
minimum_band_gap (float): the minimum band gap for OT
"""
output.parse_dos()
if (
not ci.check("FORCE_EVAL/DFT/SCF/OT")
and not ci.check("FORCE_EVAL/DFT/KPOINTS")
and output.band_gap
and output.band_gap > minimum_band_gap
):
return True
return False
def tail(filename, n=10):
"""
Returns the last n lines of a file as a list (including empty lines)
"""
with open(filename) as f:
t = deque(f, n)
if t:
return t
return [""] * n
def get_conv(outfile):
"""
Helper function to get the convergence info from SCF loops
Args:
outfile (str): output file to parse
Returns:
returns convergence info (change in energy between SCF steps) as a
single list (flattened across outer scf loops).
"""
out = Cp2kOutput(outfile, auto_load=False, verbose=False)
out.parse_scf_opt()
return list(itertools.chain.from_iterable(out.data["convergence"]))
``` |
{
"source": "jmmshn/emmet",
"score": 3
} |
#### File: emmet/core/stubs.py
```python
from typing import Dict
import pymatgen.core.structure
from pydantic import BaseModel
from pymatgen.core.periodic_table import Element
"""
The stub names are kept in sync with the actual classes so they
show up correctly in the JSON Schema. They are imported here
in as Stubbed classes to prevent name clashing
"""
class StubComposition(BaseModel):
"""A dictionary mapping element to total quantity"""
__root__: Dict[Element, float]
@classmethod # type: ignore
def get_validators(cls):
yield validate_composition
def validate_composition(cls, v):
if isinstance(v, pymatgen.core.structure.Composition):
return v
return pymatgen.core.structure.Composition(**v)
pymatgen.core.structure.Composition.__pydantic_model__ = StubComposition
pymatgen.core.structure.Composition.__get_validators__ = get_validators
```
#### File: tests/emmet-builders/test_vasp.py
```python
import pytest
from maggma.stores import JSONStore, MemoryStore
from emmet.builders.vasp.task_validator import TaskValidator
intermediate_stores = ["validation"]
@pytest.fixture(scope="session")
def tasks_store(test_dir):
return JSONStore(test_dir / "test_si_tasks.json.gz")
@pytest.fixture(scope="session")
def validation_store():
return MemoryStore()
def test_validator(tasks_store, validation_store):
builder = TaskValidator(tasks=tasks_store, task_validation=validation_store)
builder.run()
assert validation_store.count() == tasks_store.count()
assert validation_store.count({"valid": True}) == tasks_store.count()
```
#### File: tests/emmet-core/test_xrd.py
```python
import pytest
from pymatgen.analysis.diffraction.xrd import WAVELENGTHS
from pymatgen.core import Element, Lattice, Structure
from emmet.core.spectrum import SpectrumDoc
from emmet.core.structure import StructureMetadata
from emmet.core.symmetry import CrystalSystem, SymmetryData
from emmet.core.xrd import Edge, XRDDoc
@pytest.fixture
def structure():
test_latt = Lattice.cubic(3.0)
test_struc = Structure(lattice=test_latt, species=["Fe"], coords=[[0, 0, 0]])
return test_struc
@pytest.mark.parametrize("target", list(WAVELENGTHS.keys()))
def test_target_detection(structure, target):
doc = XRDDoc.from_structure(
structure=structure,
spectrum_id="test-1",
material_id="test-1",
wavelength=WAVELENGTHS[target],
)
target_element = Element(target[:2])
target_edge = Edge(target[2:])
assert doc.target == target_element
assert doc.edge == target_edge
@pytest.mark.parametrize("target", list(WAVELENGTHS.keys()))
def test_from_target(structure, target):
target_element = Element(target[:2])
target_edge = Edge(target[2:])
doc = XRDDoc.from_target(
structure=structure,
material_id="test-1",
target=target_element,
edge=target_edge,
)
assert doc.target == target_element
assert doc.edge == target_edge
def test_schema():
XRDDoc.schema()
```
#### File: emmet-core/vasp/test_materials.py
```python
import json
import pytest
from monty.io import zopen
from emmet.core.vasp.calc_types import TaskType
from emmet.core.vasp.material import MaterialsDoc
from emmet.core.vasp.task import TaskDocument
@pytest.fixture
def test_tasks(test_dir):
with zopen(test_dir / "test_si_tasks.json.gz") as f:
tasks = json.load(f)
tasks = [TaskDocument(**t) for t in tasks]
return tasks
def test_make_mat(test_tasks):
material = MaterialsDoc.from_tasks(test_tasks)
assert material.formula_pretty == "Si"
assert len(material.task_ids) == 4
assert len(material.entries) == 1
bad_task_group = [
task for task in test_tasks if task.task_type != TaskType.Structure_Optimization
]
with pytest.raises(Exception):
MaterialsDoc.from_tasks(bad_task_group)
def test_schema():
MaterialsDoc.schema()
``` |
{
"source": "jmmshn/fireworks",
"score": 3
} |
#### File: features/tests/test_introspect.py
```python
import unittest
from fireworks.features.introspect import flatten_to_keys, separator_str
__author__ = "<NAME> <<EMAIL>>"
class IntrospectTest(unittest.TestCase):
def test_flatten_dict(self):
self.assertEqual(
set(flatten_to_keys({"d": {"e": {"f": 4}, "f": 10}}, max_recurs=1)), {f"d{separator_str}<TRUNCATED_OBJECT>"}
)
self.assertEqual(
set(flatten_to_keys({"d": {"e": {"f": 4}, "f": 10}}, max_recurs=2)),
{f"d.e{separator_str}<TRUNCATED_OBJECT>", f"d.f{separator_str}10"},
)
self.assertEqual(
set(flatten_to_keys({"d": {"e": {"f": 4}, "f": 10}}, max_recurs=3)),
{f"d.e.f{separator_str}4", f"d.f{separator_str}10"},
)
self.assertEqual(
set(flatten_to_keys({"d": [[0, 1], [2, 3]]}, max_recurs=5)), {f"d{separator_str}<TRUNCATED_OBJECT>"}
)
self.assertEqual(
set(flatten_to_keys({"d": [1, 2, 3]}, max_recurs=2)),
{f"d{separator_str}1", f"d{separator_str}2", f"d{separator_str}3"},
)
self.assertEqual(
set(flatten_to_keys({"d": {"e": [0, 1]}}, max_recurs=2)), {f"d.e{separator_str}0", f"d.e{separator_str}1"}
)
```
#### File: scripts/tests/test_mlaunch_run.py
```python
import pytest
from fireworks.scripts.mlaunch_run import mlaunch
__author__ = "<NAME> <<EMAIL>>"
@pytest.mark.parametrize("arg", ["-v", "--version"])
def test_mlaunch_report_version(capsys, arg):
"""Test mlaunch CLI version flag."""
with pytest.raises(SystemExit):
ret_code = mlaunch([arg])
assert ret_code == 0
stdout, stderr = capsys.readouterr()
assert stdout.startswith("mlaunch v")
assert stderr == ""
def test_mlaunch_config_file_flags():
"""Test mlaunch CLI throws errors on missing config file flags."""
num_jobs = "1"
with pytest.raises(FileNotFoundError, match="launchpad_file '' does not exist!"):
mlaunch([num_jobs, "-l", ""])
with pytest.raises(FileNotFoundError, match="fworker_file 'missing_file' does not exist!"):
mlaunch([num_jobs, "-w", "missing_file"])
```
#### File: scripts/tests/test_rlaunch_run.py
```python
import pytest
from fireworks.scripts.rlaunch_run import rlaunch
__author__ = "<NAME> <<EMAIL>>"
@pytest.mark.parametrize("arg", ["-v", "--version"])
def test_rlaunch_report_version(capsys, arg):
"""Test rlaunch CLI version flag."""
with pytest.raises(SystemExit):
ret_code = rlaunch([arg])
assert ret_code == 0
stdout, stderr = capsys.readouterr()
assert stdout.startswith("rlaunch v")
assert stderr == ""
def test_rlaunch_config_file_flags():
"""Test rlaunch CLI throws errors on missing config file flags."""
with pytest.raises(FileNotFoundError, match="launchpad_file '' does not exist!"):
rlaunch(["-l", ""])
with pytest.raises(FileNotFoundError, match="fworker_file 'missing_file' does not exist!"):
rlaunch(["-w", "missing_file"])
```
#### File: fireworks/tests/master_tests.py
```python
from fireworks.user_objects.queue_adapters.common_adapter import CommonAdapter
from fireworks.utilities.fw_serializers import load_object
"""
Master tests for FireWorks - generally used to ensure that installation was \
completed properly.
"""
from fireworks import Firework, FWAction
from fireworks.core.firework import Workflow
from fireworks.user_objects.firetasks.script_task import ScriptTask
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, The Materials Project"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Jan 9, 2013"
import unittest
class TestImports(unittest.TestCase):
"""
Make sure that required external libraries can be imported
"""
def test_imports(self):
pass
# test that MongoClient is available (newer pymongo)
class BasicTests(unittest.TestCase):
"""
Make sure that required external libraries can be imported
"""
def test_fwconnector(self):
fw1 = Firework(ScriptTask.from_str('echo "1"'))
fw2 = Firework(ScriptTask.from_str('echo "1"'))
wf1 = Workflow([fw1, fw2], {fw1.fw_id: fw2.fw_id})
self.assertEqual(wf1.links, {fw1.fw_id: [fw2.fw_id], fw2.fw_id: []})
wf2 = Workflow([fw1, fw2], {fw1: fw2})
self.assertEqual(wf2.links, {fw1.fw_id: [fw2.fw_id], fw2.fw_id: []})
wf3 = Workflow([fw1, fw2])
self.assertEqual(wf3.links, {fw1.fw_id: [], fw2.fw_id: []})
def test_parentconnector(self):
fw1 = Firework(ScriptTask.from_str('echo "1"'))
fw2 = Firework(ScriptTask.from_str('echo "1"'), parents=fw1)
fw3 = Firework(ScriptTask.from_str('echo "1"'), parents=[fw1, fw2])
self.assertEqual(
Workflow([fw1, fw2, fw3]).links, {fw1.fw_id: [fw2.fw_id, fw3.fw_id], fw2.fw_id: [fw3.fw_id], fw3.fw_id: []}
)
self.assertRaises(ValueError, Workflow, [fw1, fw3]) # can't make this
class SerializationTests(unittest.TestCase):
@staticmethod
def get_data(obj_dict):
modname = "fireworks.user_objects.queue_adapters.common_adapter"
classname = "CommonAdapter"
mod = __import__(modname, globals(), locals(), [classname], 0)
if hasattr(mod, classname):
cls_ = getattr(mod, classname)
return cls_.from_dict(obj_dict)
def test_serialization_details(self):
# This detects a weird bug found in early version of serializers
pbs = CommonAdapter("PBS")
self.assertTrue(isinstance(pbs, CommonAdapter))
self.assertTrue(isinstance(self.get_data(pbs.to_dict()), CommonAdapter))
self.assertTrue(isinstance(load_object(pbs.to_dict()), CommonAdapter))
self.assertTrue(isinstance(self.get_data(pbs.to_dict()), CommonAdapter)) # repeated test on purpose!
def test_recursive_deserialize(self):
my_dict = {
"update_spec": {},
"mod_spec": [],
"stored_data": {},
"exit": False,
"detours": [],
"additions": [
{
"updated_on": "2014-10-14T00:56:27.758673",
"fw_id": -2,
"spec": {"_tasks": [{"use_shell": True, "_fw_name": "ScriptTask", "script": ['echo "1"']}]},
"created_on": "2014-10-14T00:56:27.758669",
"name": "Unnamed FW",
}
],
"defuse_children": False,
}
FWAction.from_dict(my_dict)
if __name__ == "__main__":
unittest.main()
```
#### File: user_objects/queue_adapters/common_adapter.py
```python
import copy
"""
This module implements a CommonAdaptor that supports standard PBS and SGE
queues.
"""
import getpass
import os
import re
import stat
import subprocess
from fireworks.queue.queue_adapter import Command, QueueAdapterBase
from fireworks.utilities.fw_serializers import serialize_fw
from fireworks.utilities.fw_utilities import log_exception, log_fancy
__author__ = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "Dec 12, 2012"
class CommonAdapter(QueueAdapterBase):
"""
An adapter that works on most PBS (including derivatives such as
TORQUE), SGE, and SLURM queues.
"""
_fw_name = "CommonAdapter"
default_q_commands = {
"PBS": {"submit_cmd": "qsub", "status_cmd": "qstat"},
"SGE": {"submit_cmd": "qsub", "status_cmd": "qstat"},
"Cobalt": {"submit_cmd": "qsub", "status_cmd": "qstat"},
"SLURM": {"submit_cmd": "sbatch", "status_cmd": "squeue"},
"LoadLeveler": {"submit_cmd": "llsubmit", "status_cmd": "llq"},
"LoadSharingFacility": {"submit_cmd": "bsub", "status_cmd": "bjobs"},
"MOAB": {"submit_cmd": "msub", "status_cmd": "showq"},
}
def __init__(self, q_type, q_name=None, template_file=None, timeout=None, **kwargs):
"""
:param q_type: The type of queue. Right now it should be either PBS,
SGE, SLURM, Cobalt or LoadLeveler.
:param q_name: A name for the queue. Can be any string.
:param template_file: The path to the template file. Leave it as
None (the default) to use Fireworks' built-in
templates for PBS and SGE, which should work
on most queues.
:param timeout: The amount of seconds to wait before raising an error when
checking the number of jobs in the queue. Default 5 seconds.
:param **kwargs: Series of keyword args for queue parameters.
"""
if q_type not in CommonAdapter.default_q_commands:
raise ValueError(
f"{q_type} is not a supported queue type. CommonAdaptor supports {list(self.default_q_commands.keys())}"
)
self.q_type = q_type
self.template_file = (
os.path.abspath(template_file)
if template_file is not None
else CommonAdapter._get_default_template_file(q_type)
)
self.q_name = q_name or q_type
self.timeout = timeout or 5
self.update(dict(kwargs))
self.q_commands = copy.deepcopy(CommonAdapter.default_q_commands)
if "_q_commands_override" in self:
self.q_commands[self.q_type].update(self["_q_commands_override"])
def _parse_jobid(self, output_str):
if self.q_type == "SLURM":
# The line can contain more text after the id.
# Match after the standard "Submitted batch job" string
re_string = r"Submitted batch job\s+(\d+)"
elif self.q_type == "LoadLeveler":
# Load Leveler: "llsubmit: The job "abc.123" has been submitted"
re_string = r"The job \"(.*?)\" has been submitted"
elif self.q_type == "Cobalt":
# 99% of the time you just get:
# Cobalt: "199768"
# but there's a version that also includes project and queue
# information on preceding lines and both of those might
# contain a number in any position.
re_string = r"(\b\d+\b)"
else:
# PBS: "1234.whatever",
# SGE: "Your job 44275 ("jobname") has been submitted"
# Cobalt: "199768"
re_string = r"(\d+)"
m = re.search(re_string, output_str)
if m:
return m.group(1)
raise RuntimeError("Unable to parse jobid")
def _get_status_cmd(self, username):
status_cmd = [self.q_commands[self.q_type]["status_cmd"]]
if self.q_type == "SLURM":
# by default, squeue lists pending and running jobs
# -p: filter queue (partition)
# -h: no header line
# -o: reduce output to user only (shorter string to parse)
status_cmd.extend(['-o "%u"', "-u", username, "-h"])
if self.get("queue"):
status_cmd.extend(["-p", self["queue"]])
elif self.q_type == "LoadSharingFacility":
# use no header and the wide format so that there is one line per job, and display only running and
# pending jobs
status_cmd.extend(["-p", "-r", "-o", "jobID user queue", "-noheader", "-u", username])
elif self.q_type == "Cobalt":
header = "JobId:User:Queue:Jobname:Nodes:Procs:Mode:WallTime:State:RunTime:Project:Location"
status_cmd.extend(["--header", header, "-u", username])
elif self.q_type == "SGE":
status_cmd.extend(["-u", username])
if self.get("queue"):
status_cmd.extend(["-q", self["queue"]])
elif self.q_type == "MOAB":
status_cmd.extend(["-w", f"user={username}"])
# no queue restriction command known for QUEST supercomputer, i.e., -p option doesn't work
else:
status_cmd.extend(["-u", username])
return status_cmd
def _parse_njobs(self, output_str, username):
# TODO: what if username is too long for the output and is cut off?
# WRS: I may come back to this after confirming that Cobalt
# strictly follows the PBS standard and replace the splitting
# with a regex that would solve length issues
if self.q_type == "SLURM":
# subtract one due to trailing '\n' and split behavior
return len(output_str.split("\n")) - 1
if self.q_type == "LoadLeveler":
if "There is currently no job status to report" in output_str:
return 0
else:
# last line is: "1 job step(s) in query, 0 waiting, ..."
return int(output_str.split("\n")[-2].split()[0])
if self.q_type == "LoadSharingFacility":
# Count the number of lines which pertain to the queue
cnt = 0
for line in output_str.split("\n"):
if line.endswith(self["queue"]):
cnt += 1
return cnt
if self.q_type == "SGE":
# want only lines that include username;
# this will exclude e.g. header lines
return len([l for l in output_str.split("\n") if username in l])
if self.q_type == "MOAB":
# want only lines that include username;
# this will exclude e.g. header lines
return len([l for l in output_str.split("\n") if username in l])
count = 0
for l in output_str.split("\n"):
if l.lower().startswith("job"):
if self.q_type == "Cobalt":
# Cobalt capitalzes headers
l = l.lower()
header = l.split()
if self.q_type == "PBS":
# PBS has a ridiculous two word "Job ID" in header
state_index = header.index("S") - 1
queue_index = header.index("Queue") - 1
else:
state_index = header.index("state")
queue_index = header.index("queue")
if username in l:
toks = l.split()
if toks[state_index] != "C":
# note: the entire queue name might be cutoff from the output if long queue name
# so we are only ensuring that our queue matches up until cutoff point
if "queue" in self and self["queue"][0 : len(toks[queue_index])] in toks[queue_index]:
count += 1
return count
def submit_to_queue(self, script_file):
"""
submits the job to the queue and returns the job id
:param script_file: (str) name of the script file to use (String)
:return: (int) job_id
"""
if not os.path.exists(script_file):
raise ValueError(f"Cannot find script file located at: {script_file}")
queue_logger = self.get_qlogger(f"qadapter.{self.q_name}")
submit_cmd = self.q_commands[self.q_type]["submit_cmd"]
# submit the job
try:
if self.q_type == "Cobalt":
# Cobalt requires scripts to be executable
os.chmod(script_file, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP)
cmd = [submit_cmd, script_file]
# For most of the queues handled by common_adapter, it's best to simply submit the file name
# as an argument. LoadSharingFacility doesn't handle the header section (queue name, nodes, etc)
# when taking file arguments, so the file needs to be passed as stdin to make it work correctly.
if self.q_type == "LoadSharingFacility":
with open(script_file) as inputFile:
p = subprocess.Popen([submit_cmd], stdin=inputFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
# retrieve the returncode. PBS returns 0 if the job was successful
if p.returncode == 0:
try:
job_id = self._parse_jobid(p.stdout.read().decode())
queue_logger.info(f"Job submission was successful and job_id is {job_id}")
return job_id
except Exception as ex:
# probably error parsing job code
log_exception(
queue_logger, f"Could not parse job id following {submit_cmd} due to error {str(ex)}..."
)
else:
# some qsub error, e.g. maybe wrong queue specified, don't have permission to submit, etc...
msgs = [
f"Error in job submission with {self.q_name} file {script_file} and cmd {cmd}",
f"The error response reads: {p.stderr.read()}",
]
log_fancy(queue_logger, msgs, "error")
except Exception:
# random error, e.g. no qsub on machine!
log_exception(queue_logger, f"Running the command: {submit_cmd} caused an error...")
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs currently in the queue for the user
:param username: (str) the username of the jobs to count (default is to autodetect)
:return: (int) number of jobs in the queue
"""
queue_logger = self.get_qlogger(f"qadapter.{self.q_name}")
# initialize username
if username is None:
username = getpass.getuser()
# run qstat
qstat = Command(self._get_status_cmd(username))
p = qstat.run(timeout=self.timeout)
# parse the result
if p[0] == 0:
njobs = self._parse_njobs(p[1], username)
queue_logger.info(f"The number of jobs currently in the queue is: {njobs}")
return njobs
# there's a problem talking to qstat server?
msgs = ["Error trying to get the number of jobs in the queue", f"The error response reads: {p[2]}"]
log_fancy(queue_logger, msgs, "error")
return None
@staticmethod
def _get_default_template_file(q_type):
return os.path.join(os.path.dirname(__file__), f"{q_type}_template.txt")
@serialize_fw
def to_dict(self):
d = dict(self)
# _fw_* names are used for the specific instance variables.
d["_fw_q_type"] = self.q_type
if self.q_name != self.q_type:
d["_fw_q_name"] = self.q_name
if self.template_file != CommonAdapter._get_default_template_file(self.q_type):
d["_fw_template_file"] = self.template_file
d["_fw_timeout"] = self.timeout
return d
@classmethod
def from_dict(cls, m_dict):
return cls(
q_type=m_dict["_fw_q_type"],
q_name=m_dict.get("_fw_q_name"),
template_file=m_dict.get("_fw_template_file"),
timeout=m_dict.get("_fw_timeout"),
**{k: v for k, v in m_dict.items() if not k.startswith("_fw")},
)
```
#### File: fireworks/utilities/visualize.py
```python
from typing import Any, Dict
from monty.dev import requires
from fireworks import Firework, Workflow
from fireworks.features.fw_report import state_to_color
try:
from graphviz import Digraph
except ImportError:
Digraph = None
def plot_wf(
wf,
depth_factor=1.0,
breadth_factor=2.0,
labels_on=True,
numerical_label=False,
text_loc_factor=1.0,
save_as=None,
style="rD--",
markersize=10,
markerfacecolor="blue",
fontsize=12,
):
"""
Generate a visual representation of the workflow. Useful for checking whether the firework
connections are in order before launching the workflow.
Args:
wf (Workflow): workflow object.
depth_factor (float): adjust this to stretch the plot in y direction.
breadth_factor (float): adjust this to stretch the plot in x direction.
labels_on (bool): whether to label the nodes or not. The default is to label the nodes
using the firework names.
numerical_label (bool): set this to label the nodes using the firework ids.
text_loc_factor (float): adjust the label location.
save_as (str): save the figure to the given name.
style (str): marker style.
markersize (int): marker size.
markerfacecolor (str): marker face color.
fontsize (int): font size for the node label.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise SystemExit("Install matplotlib. Exiting.")
keys = sorted(wf.links.keys(), reverse=True)
n_root_nodes = len(wf.root_fw_ids)
# set (x,y) coordinates for each node in the workflow links
points_map = {}
# root nodes
for i, k in enumerate(wf.root_fw_ids):
points_map.update({k: ((-0.5 * n_root_nodes + i) * breadth_factor, (keys[0] + 1) * depth_factor)})
# the rest
for k in keys:
for i, j in enumerate(wf.links[k]):
if not points_map.get(j, None):
points_map[j] = ((i - len(wf.links[k]) / 2.0) * breadth_factor, k * depth_factor)
# connect the dots
for k in keys:
for i in wf.links[k]:
plt.plot(
[points_map[k][0], points_map[i][0]],
[points_map[k][1], points_map[i][1]],
style,
markersize=markersize,
markerfacecolor=markerfacecolor,
)
if labels_on:
label1 = wf.id_fw[k].name
label2 = wf.id_fw[i].name
if numerical_label:
label1 = str(k)
label2 = str(i)
plt.text(
points_map[k][0] * text_loc_factor, points_map[k][1] * text_loc_factor, label1, fontsize=fontsize
)
plt.text(
points_map[i][0] * text_loc_factor, points_map[i][1] * text_loc_factor, label2, fontsize=fontsize
)
plt.axis("scaled")
plt.axis("off")
if save_as:
plt.savefig(save_as)
@requires(
Digraph is not None,
"graphviz package required for wf_to_graph.\n"
"Follow the installation instructions here: https://github.com/xflr6/graphviz",
)
def wf_to_graph(wf: Workflow, dag_kwargs: Dict[str, Any] = {}, wf_show_tasks: bool = True) -> Digraph:
"""Renders a graph representation of a workflow or firework. Workflows are rendered as the
control flow of the firework, while Fireworks are rendered as a sequence of Firetasks.
Copied from https://git.io/JO6L8.
Args:
workflow (Workflow | Firework): Workflow or Firework to be rendered.
dag_kwargs (dict[str, Any]): Arguments passed to Digraph.attr(). Defaults to {}.
wf_show_tasks (bool): When rendering a Workflow, whether to show each Firetask in the graph. Defaults to False.
Returns:
Digraph: The Workflow or Firework directed acyclic graph.
"""
if not isinstance(wf, (Workflow, Firework)):
raise ValueError(f"expected instance of Workflow or Firework, got {wf}")
if isinstance(wf, Workflow) and not wf_show_tasks:
# if we're rendering a Workflow and not showing tasks, we render the graph from left to right
# by default
dag_kwargs["rankdir"] = dag_kwargs.get("rankdir", "LR")
# Directed Acyclic Graph
dag = Digraph(comment=wf.name)
dag.attr(**dag_kwargs)
dag.node_attr.update(shape="box")
dag.graph_attr["fontname"] = "helvetica"
dag.node_attr["fontname"] = "helvetica"
dag.edge_attr["fontname"] = "helvetica"
if isinstance(wf, Workflow):
for fw in wf:
color = state_to_color[fw.state]
dag.node(name=str(fw.fw_id), label=fw.name, color=color, fontname="helvetica")
if not wf_show_tasks:
continue
subgraph = Digraph(name=f"tasks_{fw.fw_id}")
subgraph.attr(color=color)
for idx, task in enumerate(fw.tasks):
# Clean up names
name = task.fw_name.replace("{", "").replace("}", "")
name = name.split(".")[-1]
node_id = f"{fw.fw_id}-{idx}"
subgraph.node(name=node_id, label=name, style="dashed")
if idx == 0:
subgraph.edge(str(fw.fw_id), node_id)
else:
subgraph.edge(f"{fw.fw_id}-{idx-1}", node_id)
dag.subgraph(subgraph)
for start, targets in wf.links.items():
for target in targets:
dag.edge(str(start), str(target))
elif isinstance(wf, Firework):
for idx, task in enumerate(wf.tasks):
# Clean up names
name = task.fw_name.replace("{", "").replace("}", "")
name = name.split(".")[-1]
dag.node(str(idx), label=name)
if idx >= 1:
dag.edge(str(idx - 1), str(idx))
return dag
if __name__ == "__main__":
import atomate.vasp.workflows as wf_mod
from pymatgen.core import Lattice, Structure
struct = Structure(Lattice.cubic(3), ["S"], [[0, 0, 0]])
wf = wf_mod.wf_bandstructure_hse(struct)
for item in dir(wf_mod):
if item.startswith("wf_"):
try:
wf = getattr(wf_mod, item)(struct)
except TypeError:
continue
dag = wf_to_graph(wf)
# add wf name as plot title above the graph
dag.attr(label=item, fontsize="20", labelloc="t")
dag.view()
# dag.format = "png" # default format is PDF
# dag.render(f"docs_rst/_static/wf_graphs/{item}")
``` |
{
"source": "jmmshn/jobflow",
"score": 2
} |
#### File: src/jobflow/settings.py
```python
from pathlib import Path
from maggma.stores import MemoryStore
from pydantic import BaseSettings, Field, root_validator
from jobflow import JobStore
DEFAULT_CONFIG_FILE_PATH = Path("~/.jobflow.yaml").expanduser().as_posix()
__all__ = ["JobflowSettings"]
class JobflowSettings(BaseSettings):
"""
Settings for jobflow.
The default way to modify these is to modify ~/.jobflow.yaml. Alternatively,
the environment variable ``JOBFLOW_CONFIG_FILE`` can be set to point to a yaml file
with jobflow settings.
Lastly, the variables can be modified directly though environment variables by
using the "JOBFLOW" prefix. E..g., ``JOBFLOW_JOB_STORE=path/to/jobstore.file``.
**Allowed JOB_STORE formats**
If the store is not supplied, a ``MemoryStore`` will be used. Can be specified in
multiple formats.
The simplest format is the yaml dumped version of the store, generated using:
>>> import yaml
>>> yaml.dump(store.as_dict())
Alternatively, the store can be specified as the keys docs_store, additional_stores
and any other keyword arguments supported by the :obj:`JobStore` constructor. The
docs_store and additional stores are specified by the ``type`` key which must match
a Maggma ``Store`` subclass, and the remaining keys are passed to the store
constructor. For example, the following file would create a :obj:`JobStore` with a
``MongoStore`` for docs and a ``GridFSStore`` or ``S3Store`` as an additional store
for data.
GridFSStore example:
.. code-block:: yaml
docs_store:
type: MongoStore
database: jobflow_unittest
collection_name: outputs
host: localhost
port: 27017
additional_stores:
data:
type: GridFSStore
database: jobflow_unittest
collection_name: outputs_blobs
host: localhost
port: 27017
S3Store example (Note: the ``key`` field must be set to ``blob_uuid``):
.. code-block:: yaml
docs_store:
type: MongoStore
database: jobflow_unittest
collection_name: outputs
host: localhost
port: 27017
additional_stores:
data:
type: S3Store
bucket: output_blobs
key: blob_uuid
index:
type: MongoStore
database: jobflow_unittest
collection_name: output_blobs_index
host: localhost
port: 27017
key: blob_uuid
Lastly, the store can be specified as a file name that points to a file containing
the credentials in any format supported by :obj:`.JobStore.from_file`.
"""
CONFIG_FILE: str = Field(
DEFAULT_CONFIG_FILE_PATH, description="File to load alternative defaults from."
)
# general settings
JOB_STORE: JobStore = Field(
default_factory=lambda: JobStore(MemoryStore()),
description="Default JobStore to use when running locally or using FireWorks. "
"See the :obj:`JobflowSettings` docstring for more details on the "
"accepted formats.",
)
DIRECTORY_FORMAT: str = Field(
"%Y-%m-%d-%H-%M-%S-%f",
description="Date stamp format used to create directories",
)
class Config:
"""Pydantic config settings."""
env_prefix = "jobflow_"
@root_validator(pre=True)
def load_default_settings(cls, values):
"""
Load settings from file or environment variables.
Loads settings from a root file if available and uses that as defaults in
place of built in defaults.
This allows setting of the config file path through environment variables.
"""
from monty.serialization import loadfn
config_file_path: str = values.get("CONFIG_FILE", DEFAULT_CONFIG_FILE_PATH)
new_values = {}
if Path(config_file_path).exists():
new_values.update(loadfn(config_file_path))
store = new_values.get("JOB_STORE")
if isinstance(store, str):
new_values["JOB_STORE"] = JobStore.from_file(store)
elif isinstance(store, dict) and store.get("@class") == "JobStore":
new_values["JOB_STORE"] = JobStore.from_dict(store)
elif isinstance(store, dict):
new_values["JOB_STORE"] = JobStore.from_dict_spec(store)
new_values.update(values)
return new_values
``` |
{
"source": "jmmshn/mp_dash_boards",
"score": 2
} |
#### File: dashboards/soap_explorer/soap_cluster.py
```python
from typing import List
from monty.serialization import loadfn
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from dash_mp_components import Simple3DScene
from pymatgen import Site
import crystal_toolkit # noqa: F401
from pymatgen.analysis.graphs import MoleculeGraph
from pymatgen.core.structure import Molecule
import os
import pandas as pd
import plotly.express as px
dir_path = os.path.dirname(os.path.realpath(__file__))
DUMMY_SPECIES = "Si"
df_res = pd.read_pickle('df_res.pkl')
cluster_fig = fig = px.scatter(df_res, x="x", y='y', width=1000, height=900,
color='DBSCAN_lab', hover_name='index', title="Clusters of Similar Sites")
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
def get_dbs(db_names: List[str], db_file: str = dir_path + "/./db_info.pub.json") -> List:
"""Read the db_file and get the databases corresponding to <<db_name>>
Args:
db_name (List[str]): A list of names of the database we want
db_file (str): The db_file we are reading from
Returns:
MongograntStore: the store we need to access
"""
db_dict = loadfn(db_file)
stores = []
for j_name in db_names:
if j_name not in db_dict:
raise ValueError(
f"The store named {j_name} is missing from the db_file")
stores.append(db_dict[j_name])
return stores
soap_site_db, = get_dbs(["soap_site_descriptors"])
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
# App layout
app.layout = html.Div(
[
dcc.Graph(id="cluster-plot", figure=fig),
html.Pre(id="debug", children=""),
Simple3DScene(
id='site',
sceneSize=400,
settings={'extractAxis': True},
axisView='SW',
data={}
),
]
)
@app.callback(Output('debug', 'children'), [Input('cluster-plot', 'clickData')])
def debug(data):
if data is None:
return 'NONE'
return data["points"][0]["hovertext"]
@app.callback(Output('site', 'data'), [Input('cluster-plot', 'clickData')])
def get_sites_scene(data):
if data is None:
return {}
task_id, n = data["points"][0]["hovertext"].split("+")
with soap_site_db as db:
doc = db.query_one({'task_id': task_id})
scene = get_m_graph_from_site_data(doc['site_data'][int(n)]).get_scene()
scene.name = "site"
return scene
def get_m_graph_from_site_data(s_data):
mol = Molecule.from_sites([Site.from_dict(isite)
for isite in s_data['local_graph']['sites']])
mg = MoleculeGraph.with_empty_graph(mol)
for i in range(1, len(mg)):
mg.add_edge(0, i)
return mg
if __name__ == "__main__":
app.run_server(debug=True)
# %%
``` |
{
"source": "JMMull/personalWebsite",
"score": 3
} |
#### File: personalWeb/core/SPARQL.py
```python
from urllib2 import urlopen
from SPARQLWrapper import SPARQLWrapper, JSON, RDF, XML, Wrapper
class Results():
def __init__(self, results):
self.variables = results['head']['vars']
resultlst = []
for result in results['results']['bindings']:
dct = {}
for var in results['head']['vars']:
dct[var] = result[var]['value']
resultlst.append(dct)
self.results = resultlst
def __str__(self):
string = ''
for result in self.results:
for var in self.variables:
string += '{} : {}\t'.format(var, result[var])
string += '\n'
return string
class Endpoint():
def __init__(self, url, allgraphs, guri):
self.url = url
self.allgraphs = allgraphs
self.graphuri = guri
def __str__(self):
return 'SPARQL Endpoint: {}'.format(self.url)
def query(self, query_string):
'''
Derived from http://stackoverflow.com/questions/28455850/sparql-query-json-error-from-bncf-endpoint
'''
if self.allgraphs:
sparql = SPARQLWrapper(self.url)
else:
sparql = SPARQLWrapper(self.url, defaultGraph=self.graphuri)
sparql.setReturnFormat(JSON)
sparql.setQuery(query_string)
request = sparql._createRequest()
request.add_header('Accept', 'application/sparql-results+json')
response = urlopen(request)
res = Wrapper.QueryResult((response, sparql.returnFormat))
results = res.convert()
return Results(results).results
```
#### File: personalWeb/core/virtuoso.py
```python
import os
import sys
import urllib
import subprocess as _sp
import personal_web.core.SPARQL as query
sys.path.append(os.path.abspath('../..'))
class Repository():
def __init__(self, user, password, store, graphuri, endpoint, allgraphs):
self.usr = user
self.pwd = password
self.store = store
self.gpuri = graphuri
self.endpoint = endpoint
self.allgraphs = allgraphs
def __str__(self):
return 'User: {}\nPass: {}\nStore: {}\nGraph URI: {}'.format(
self.usr,
self.pwd,
self.store,
self.gpuri)
def update_graph(self, new_graph):
self.gpuri = new_graph
def add_data(self, infile, overwrite=False):
if overwrite == False:
rqmethod = 'POST'
elif overwrite == True:
rqmethod = 'PUT'
else:
raise Exception('Invalid argument given for keyword "overwrite".')
command = [
'curl', '--digest', '--user',
'{}:{}'.format(self.usr, self.pwd), '--verbose', '--url',
'{}?graph-uri={}'.format(self.store, self.gpuri), '-X',
rqmethod, '-T', '{}'.format(infile)
]
_sp.call(command)
def query(self):
command = [
'curl', '--digest', '--user',
'{}:{}'.format(self.usr, self.pwd), '--verbose', '--url',
'{}?graph-uri={}'.format(self.store, self.gpuri)
]
print command
_sp.call(command)
def delete(self):
command = [
'curl', '--digest', '--user',
'{}:{}'.format(self.usr, self.pwd), '--verbose', '--url',
'{}?graph-uri={}'.format(self.store, self.gpuri), '-X'
'DELETE']
_sp.call(command)
def sparql_query(self, q):
print self.allgraphs
endpoint = query.Endpoint(self.endpoint, self.allgraphs, self.gpuri)
lst = []
start = 0
while True:
run = False
while run == False:
try:
qrun = q + '\n OFFSET {}'.format(start)
results = endpoint.query(qrun)
lst += results
run = True
except Exception as E:
if str(E) == 'HTTP Error 500: SPARQL Request Failed':
pass
else:
raise Exception()
if len(results) < 10000:
break
start += 10000
return lst
``` |
{
"source": "jmmv/markdown2social",
"score": 2
} |
#### File: markdown2social/markdown2social/__init__.py
```python
import logging
import os
import sys
# Program name to use for log messages.
PROGRAM_NAME = os.path.basename(sys.argv[0])
def _build_logger():
"""Instantiates a global logger for the program.
Returns:
Logger. The logger instance to use for the application.
"""
handler = logging.StreamHandler()
formatter = logging.Formatter(PROGRAM_NAME + ': %(levelname)s: %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger(PROGRAM_NAME)
logger.addHandler(handler)
logger.setLevel(logging.WARNING)
return logger
# Global logger instance for the application.
LOGGER = _build_logger()
``` |
{
"source": "jmnatzaganian/mHTM",
"score": 2
} |
#### File: car_evaluation/encoder/errors.py
```python
__docformat__ = 'epytext'
# Native imports
import textwrap
def wrap_error(msg):
"""
Wraps an error message such that it will get displayed properly.
@param msg: The error to display.
@return: A string containing the formatted message.
"""
return '\n ' + '\n '.join(textwrap.wrap(msg, 77))
class BaseException(Exception):
"""
Base class for exception handling in this program.
"""
def __str__(self):
"""
Allows for the exception to throw the message even if it wasn't caught.
@return: The error message.
"""
return self.msg
class InvalidSequence(BaseException):
"""
Exception raised if an item is not a supported sequence.
"""
def __init__(self, seq, method):
"""
Initialize this class.
@param seq: The failed sequence object.
@param method: The method that was missing.
"""
self.msg = wrap_error('The object, {0}, is not a supported sequence. '
'The object must have a "{1}" method.'.format(seq, method))
class UnsupportedFunction(BaseException):
"""
Exception raised if an unsupported function is called, i.e. a user tried to
do something he / she shouldn't be doing with that specific object.
"""
def __init__(self, class_name, function_name):
"""
Initialize this class.
@param class_name: The class name of the caller.
@param function_name: The function name of the caller.
"""
self.msg = wrap_error('The object, {0}, does not support the function '
'{1}. Please check your usage and try again.'.format(class_name,
function_name))
class BitMissMatch(BaseException):
"""
Exception raised if the expected number of bits does not equal the supplied
number of bits.
"""
def __init__(self, expected, acutal):
"""
Initialize this class.
@param expected: The number of expected bits.
@param actual: The actual number of bits.
"""
self.msg = wrap_error('The encoder expected {0} bit(s), but {1} bit(s)'
' were supplied. Please ensure that you are passing the correct'
' number of bits.'.format(expected, acutal))
```
#### File: car_evaluation/encoder/__init__.py
```python
__docformat__ = 'epytext'
# Program imports
from base import Encoder
from scalar import Scalar
from multi import Multi
from category import Category
from errors import BaseException, wrap_error
###############################################################################
########## Exception Handling
###############################################################################
class UnsupportedEncoder(BaseException):
"""
Exception if the specified encoder is not supported.
"""
def __init__(self, name):
"""
Initialize this class.
@param name: The name of the encoder.
"""
self.msg = wrap_error('The desired encoder, {0}, is not supported. '
'Please request a valid encoder.'.format(name))
###############################################################################
########## Functions
###############################################################################
def get_encoder(type, **kargs):
"""
Creates an encoder of the appropriate type and returns an instance of it.
@param type: The type of encoder to create. The supported types are:
"unity", "threshold", "scalar", "category", and "multi".
@param kargs: Any keyword arguments to pass to the encoder.
@return: An encoder instance.
@raise UnsupportedEncoder: Raised if the requested encoder is not
supported.
"""
t = type.lower()
if t == 'unity':
return Unity(**kargs)
elif t == 'threshold':
return Threshold(**kargs)
elif t == 'scalar':
return Scalar(**kargs)
elif t == 'multi':
return Multi(**kargs)
elif t == 'category':
return Category(**kargs)
else:
raise UnsupportedEncoder(t)
def is_finite(encoder):
"""
Determine if the encoder has a finite number of bins. Technically all
encoders do, but this refers to those that have a feasibly finite number
of bins. For example, a scalar encoder with 100 bins has a small number of
bins (100), thus it is finite. A unity encoder with only 10 bits has a
large number of bins (10!), thus it is not finite.
@param encoder: An encoder instance.
"""
t = type(encoder).__name__.lower()
if t == 'scalar' or t == 'category':
return True
return False
```
#### File: dev/mnist_novelty_detection/OneVsRest.py
```python
import os
import numpy as np
from sklearn.svm import OneClassSVM, LinearSVC
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
from joblib import Parallel, delayed
from mHTM.datasets.loader import load_mnist, MNISTCV
from mHTM.metrics import SPMetrics
from mHTM.region import SPRegion
def main():
"""
Use a linear SVM for multi-class classification.
One vs the rest : 77.61%
Default : 77.61%
One vs one : 85.07%
"""
seed = 123456789
np.random.seed(seed)
ntrain, ntest = 800, 200
(tr_x, tr_y), (te_x, te_y) = load_mnist()
x, y = np.vstack((tr_x, te_x)), np.hstack((tr_y, te_y))
cv = MNISTCV(tr_y, te_y, ntrain, ntest, 1, seed)
for tr, te in cv:
clf = OneVsRestClassifier(LinearSVC(random_state=seed), -1)
clf.fit(x[tr], y[tr])
print clf.score(x[te], y[te])
clf = LinearSVC(random_state=seed)
clf.fit(x[tr], y[tr])
print clf.score(x[te], y[te])
clf = OneVsOneClassifier(LinearSVC(random_state=seed), -1)
clf.fit(x[tr], y[tr])
print clf.score(x[te], y[te])
def main2():
"""
Use one class SVM for multi-class classification
Accuracy = 71.45%
"""
# Initializations
seed = 123456789
np.random.seed(seed)
ntrain, ntest = 800, 200
(tr_x, tr_y), (te_x, te_y) = load_mnist()
tr, te = [], []
for i in xrange(10):
tr.append(np.random.permutation(tr_x[tr_y == i])[:ntrain])
te.append(np.random.permutation(te_x[te_y == i])[:ntest])
# Train the classifiers and get their results
clfs = []
for i in xrange(10):
clf = OneClassSVM(kernel='linear', nu=0.1, random_state=seed)
clf.fit(tr[i])
clfs.append(clf)
# Test the classifiers
te_x = np.vstack(te)
te_y = np.hstack([np.array([i] * ntest) for i in xrange(10)])
results = np.zeros((10, len(te_y)))
for i in xrange(10):
results[i] = clfs[i].decision_function(te_x).flatten() + \
np.random.uniform(0.1, 0.2, len(te_y))
print np.sum(np.argmax(results, 0) == te_y) / float(len(te_y))
def _main3(params, x):
"""
Used by main3 to do the SP training in parallel.
@param params: The configuration parameters for the SP.
@param x: The data to train the SP on.
@return: The SP instance, as well as its predictions on the training data.
"""
clf = SPRegion(**params)
clf.fit(x)
y = np.mean(clf.predict(x), 0)
y[y >= 0.5] = 1
y[y < 1] = 0
return clf, y
def _main3_2(clf, x, base_result, seed):
"""
Used by main3 to do the SP testing in parallel.
@param clf: An instance of the classifier
@param x: The data to test the SP on.
@param base_result: The SP's base result.
@param seed: Seed for random number generator
@return: The SP's overlap results.
"""
np.random.seed(seed)
metrics = SPMetrics()
y = clf.predict(x)
result = np.zeros(len(y))
for i, yi in enumerate(y):
yt = np.vstack((base_result, yi))
result[i] = metrics.compute_overlap(yt)
# Tie-breaker
result += np.random.uniform(0.001, 0.002, len(y))
return result
def main3(log_dir):
"""
Use one class SP for multi-class classification
Accuracy = 49.8%
"""
# Initializations
seed = 123456789
np.random.seed(seed)
ntrain, ntest = 800, 200
(tr_x, tr_y), (te_x, te_y) = load_mnist()
tr, te = [], []
for i in xrange(10):
tr.append(np.random.permutation(tr_x[tr_y == i])[:ntrain])
te.append(np.random.permutation(te_x[te_y == i])[:ntest])
params = {
'ninputs': 784,
'trim': 1e-4,
'disable_boost': True,
'seed': seed,
'pct_active': None,
'random_permanence': True,
'pwindow': 0.5,
'global_inhibition': True,
'ncolumns': 784,
'nactive': 78,
'nsynapses': 100,
'seg_th': 0,
'syn_th': 0.5,
'pinc': 0.001,
'pdec': 0.001,
'nepochs': 10,
'log_dir': log_dir
}
metrics = SPMetrics()
# Train the classifiers
clfs = []
base_results = []
for clf, y in Parallel(n_jobs=-1)(delayed(_main3)(params, tr[i])
for i in xrange(10)):
clfs.append(clf)
base_results.append(y)
# Test the classifiers
te_x = np.vstack(te)
te_y = np.hstack([np.array([i] * ntest) for i in xrange(10)])
results = np.array(Parallel(n_jobs=-1)(delayed(_main3_2)(clfs[i], te_x,
base_results[i], seed) for i in xrange(10)))
print np.sum(np.argmax(results, 0) == te_y) / float(len(te_y))
if __name__ == '__main__':
# main()
# main2()
main3(os.path.join(os.path.expanduser('~'), 'scratch',
'mnist_novelty_classification', 'r1'))
```
#### File: dev/parameter_exploration/parser.py
```python
__docformat__ = 'epytext'
# Native imports
import csv, json, os, re, cPickle
# Third party imports
import numpy as np
import matplotlib.pyplot as plt
# Program imports
from mHTM.plot import plot_error, compute_err
def natural_sort(items):
"""
Sort a set of strings in the format that a human would.
@param items: The list of items to sort.
@return: A new list with the sorted items.
"""
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key : [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(items, key = alphanum_key)
def get_sorted_dirs(paths):
"""
Get all of the directories, sorted from the provided set of paths.
@param paths: The paths to sort.
@return: The sorted directories.
"""
return natural_sort([path for path in paths if os.path.isdir(path)])
def get_results(base_path, config_keys, x=None, y=None):
"""
Get the results for the experiment.
@param base_path: The full path to the directory containing the runs.
@param config_keys: The keys in the config to read.
@param x: If not None add the data to this structure.
@param y: If not None add the data to this structure.
@return: The results and the base paths
"""
# Store the independent variables
if x is None: x = [[] for _ in config_keys]
# Store the dependent variables
# -- fit_time, learn_fit_time, pred_fit_time, input_uniqueness,
# input_overlap, input_correlation, sp_uniqueness, sp_overlap,
# sp_correlation
if y is None: y = [[],[],[],[],[],[],[],[],[]]
# Get the data
prev_param_iteration = None
for path in sorted(os.listdir(base_path)):
# Only work with valid runs
try:
param_iteration, _ = [int(item) for item in path.split('-')]
except ValueError:
continue
#####
# Independent variables
#####
# Get the JSON config
with open(os.path.join(base_path, path, 'config.json'), 'rb') as f:
config = json.load(f)
# Get the data
for i, key in enumerate(config_keys):
x[i].append(config[key])
#####
# Dependent variables
#####
# Read in the results
data = []
with open(os.path.join(base_path, path, 'stats.csv'), 'rb') as f:
reader = csv.reader(f)
for row in reader: data.append(float(row[1]))
# Add to data structure
if prev_param_iteration == param_iteration:
for i, d in enumerate(data): y[i][-1].append(d)
else:
prev_param_iteration = param_iteration
for i, d in enumerate(data): y[i].append([d])
return x, y
def main(root_dir):
"""
Parse out the experiment data into a user-friendly format.
@param root_dir: The root of the directory tree.
CAUTION: Known bug - If only one folder exists, this code will not produce
the output.
"""
# Experiment map
# -- Folder name, parameter names, experiment name
experiment_map = {
'nactive': [['nactive'], 'nactive'],
'ncols1': [['ncolumns'], 'ncols'],
'ncols2': [['ncolumns'], 'ncols'],
'ncols3': [['ncolumns'], 'ncols'],
'ncols4': [['ncolumns'], 'ncols'],
'ncols5': [['ncolumns'], 'ncols'],
'ncols6': [['ncolumns'], 'ncols'],
'ncols7': [['ncolumns'], 'ncols'],
'ncols8': [['ncolumns'], 'ncols'],
'ncols9': [['ncolumns'], 'ncols'],
'ncols10': [['ncolumns'], 'ncols'],
'nepochs': [['nepochs'], 'nepochs'],
'nsynapses': [['nsynapses'], 'nsynapses'],
'pct_active': [['pct_active'], 'pct_active'],
'pdec': [['pdec'], 'pdec'],
'pinc': [['pinc'], 'pinc'],
'pwindow': [['pwindow'], 'pwindow'],
'seg_th': [['seg_th'], 'seg_th']
}
# Initial name of base experiment
prev_name = None
x = y = None
# Process all of the items
for dir in get_sorted_dirs([os.path.join(root_dir, p) for p in
os.listdir(root_dir)]):
parameter_names, experiment_name = experiment_map[
os.path.basename(dir)]
# Get the data
if experiment_name == prev_name:
x, y = get_results(dir, parameter_names, x, y)
else:
# Save the results
if not ((x is None) and (y is None)):
with open(os.path.join(root_dir, '{0}.pkl'.format(prev_name)), 'wb') as f:
cPickle.dump((x, y), f, cPickle.HIGHEST_PROTOCOL)
# Get the new data
x, y = get_results(dir, parameter_names)
prev_name = experiment_name
def main2(base_path):
"""
@param base_path: Full path to pickle file to work with.
"""
# Mapping of independent variables to indexes
data_index = {
'fit_time':0,
'learn_fit_time':1,
'pred_fit_time':2,
'input_uniqueness':3,
'input_overlap':4,
'input_correlation':5,
'sp_uniqueness':6,
'sp_overlap':7,
'sp_correlation':8
}
# Get the data
with open(base_path, 'rb') as f:
x, y = cPickle.load(f)
x = sorted(set(x[-1])) # For now work with 1D
# Pull out data for this plot
y1 = (y[data_index['input_uniqueness']], y[data_index['sp_uniqueness']])
y2 = (y[data_index['input_overlap']], y[data_index['sp_overlap']])
y3 = (y[data_index['input_correlation']], y[data_index['sp_correlation']])
# Refactor the data
x_series = (x, x, x)
med = lambda y: np.median(y, axis=1) * 100
err = lambda y: compute_err(y, axis=1) * 100
y1_series = map(med, y1)
y1_errs = map(err, y1)
y2_series = map(med, y2)
y2_errs = map(err, y2)
y3_series = map(med, y3)
y3_errs = map(err, y3)
# Make the main plot
fig = plt.figure(figsize=(21, 20), facecolor='white')
ax = fig.add_subplot(111)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top='off', bottom='off', left='off',
right='off')
# Make subplots
ax1 = fig.add_subplot(311)
plot_error(show=False, legend=False, ax=ax1, title='Uniqueness',
x_series=x_series, y_series=y1_series, y_errs=y1_errs, ylim=(-5, 105))
ax2 = fig.add_subplot(312, sharex=ax1, sharey=ax1)
plot_error(show=False, legend=False, ax=ax2, title='Overlap',
x_series=x_series, y_series=y2_series, y_errs=y2_errs, ylim=(-5, 105))
ax3 = fig.add_subplot(313, sharex=ax1, sharey=ax1)
plot_error(show=False, legend=False, ax=ax3, title='Correlation',
x_series=x_series, y_series=y3_series, y_errs=y3_errs, ylim=(-5, 105))
plt.tight_layout(h_pad=2)
plt.show()
if __name__ == '__main__':
####
# Parse
####
# results_dir = os.path.join(os.path.expanduser('~'), 'results')
# experiment_name = 'first_order'
# inhibition_types = ('global', 'local')
# for inhibition_type in inhibition_types:
# root_dir = os.path.join(results_dir, experiment_name, inhibition_type)
# main(root_dir)
####
# Plot
####
results_dir = os.path.join(os.path.expanduser('~'), 'scratch')
experiment_name = 'first_order'
inhibition_type = 'global'
root_dir = os.path.join(results_dir, experiment_name, inhibition_type)
for experiment in (os.listdir(root_dir)):
print experiment
base_path = os.path.join(root_dir, experiment)
main2(base_path)
```
#### File: src/examples/sp_simple.py
```python
__docformat__ = 'epytext'
# Native imports
import os
# Third party imports
import numpy as np
# Program imports
from mHTM.region import SPRegion
from mHTM.datasets.loader import SPDataset
from mHTM.metrics import SPMetrics
from mHTM.plot import plot_line
def main():
"""
Program entry.
Build an SP using SPDataset and see how it performs.
"""
# Params
nsamples, nbits, pct_active = 500, 100, 0.4
seed = 123456789
base_path = os.path.join(os.path.expanduser('~'), 'scratch', 'sp_simple')
kargs = {
'ninputs': nbits,
'ncolumns': 200,
'nactive': 50,
'global_inhibition': True,
'trim': 1e-4,
'disable_boost': True,
'seed': seed,
'nsynapses': 75,
'seg_th': 15,
'syn_th': 0.5,
'pinc': 0.001,
'pdec': 0.001,
'pwindow': 0.5,
'random_permanence': True,
'nepochs': 10,
'log_dir': os.path.join(base_path, '1-1')
}
# Build items to store results
npoints = 25
pct_noises = np.linspace(0, 1, npoints, False)
uniqueness_sp, uniqueness_data = np.zeros(npoints), np.zeros(npoints)
similarity_sp, similarity_data = np.zeros(npoints), np.zeros(npoints)
similarity_sp1, similarity_data1 = np.zeros(npoints), np.zeros(npoints)
similarity_sp0, similarity_data0 = np.zeros(npoints), np.zeros(npoints)
dissimilarity_sp, dissimilarity_data = np.zeros(npoints), np.zeros(npoints)
overlap_sp, overlap_data = np.zeros(npoints), np.zeros(npoints)
correlation_sp, correlation_data = np.zeros(npoints), np.zeros(npoints)
# Metrics
metrics = SPMetrics()
# Vary input noise
for i, pct_noise in enumerate(pct_noises):
print 'Iteration {0} of {1}'.format(i + 1, npoints)
# Build the dataset
ds = SPDataset(nsamples=nsamples, nbits=nbits, pct_active=pct_active,
pct_noise=pct_noise, seed=seed)
# Get the dataset stats
uniqueness_data[i] = metrics.compute_uniqueness(ds.data)
similarity_data[i] = metrics.compute_total_similarity(ds.data,
confidence_interval=0.9)
similarity_data1[i] = metrics.compute_one_similarity(ds.data,
confidence_interval=0.9)
similarity_data0[i] = metrics.compute_zero_similarity(ds.data,
confidence_interval=0.9)
dissimilarity_data[i] = metrics.compute_dissimilarity(ds.data,
confidence_interval=0.9)
overlap_data[i] = metrics.compute_overlap(ds.data)
correlation_data[i] = 1 - metrics.compute_distance(ds.data)
# Build the SP
sp = SPRegion(**kargs)
# Train the region
sp.fit(ds.data)
# Get the SP's output SDRs
sp_output = sp.predict(ds.data)
# Get the stats
uniqueness_sp[i] = metrics.compute_uniqueness(sp_output)
similarity_sp[i] = metrics.compute_total_similarity(sp_output,
confidence_interval=0.9)
similarity_sp1[i] = metrics.compute_one_similarity(sp_output,
confidence_interval=0.9)
similarity_sp0[i] = metrics.compute_zero_similarity(sp_output,
confidence_interval=0.9)
dissimilarity_sp[i] = metrics.compute_dissimilarity(sp_output,
confidence_interval=0.9)
overlap_sp[i] = metrics.compute_overlap(sp_output)
correlation_sp[i] = 1 - metrics.compute_distance(sp_output)
# Make some plots
print 'Showing uniqueness - 0% is ideal'
plot_line([pct_noises * 100, pct_noises * 100], [uniqueness_data * 100,
uniqueness_sp * 100], series_names=('Raw Data', 'SP Output'),
x_label='% Noise', y_label='Uniqueness [%]', xlim=False,
ylim=(-5, 105), out_path=os.path.join(base_path, 'uniqueness.png'),
show=True)
print 'Showing total similarity - 100% is ideal'
plot_line([pct_noises * 100, pct_noises * 100], [similarity_data * 100,
similarity_sp * 100], series_names=('Raw Data', 'SP Output'),
x_label='% Noise', y_label='Total similarity [%]', xlim=False,
ylim=(-5, 105), out_path=os.path.join(base_path, 'similarity.png'),
show=True)
print 'Showing similarity of "1" bits - 100% is ideal'
plot_line([pct_noises * 100, pct_noises * 100], [similarity_data1 * 100,
similarity_sp1 * 100], series_names=('Raw Data', 'SP Output'),
x_label='% Noise', y_label="Similarity of '1's [%]", xlim=False,
ylim=(-5, 105), out_path=os.path.join(base_path, 'one_similarity.png'),
show=True)
print 'Showing similarity of "0" bits - 100% is ideal'
plot_line([pct_noises * 100, pct_noises * 100], [similarity_data0 * 100,
similarity_sp0 * 100], series_names=('Raw Data', 'SP Output'),
x_label='% Noise', y_label="Similarity of '0's [%]", xlim=False,
ylim=(-5, 105), out_path=os.path.join(base_path, 'zero_similarity.png'),
show=True)
print 'Showing dissimilarity - 0% is ideal'
plot_line([pct_noises * 100, pct_noises * 100], [dissimilarity_data * 100,
dissimilarity_sp * 100], series_names=('Raw Data', 'SP Output'),
x_label='% Noise', y_label="Dissimilarity [%]", xlim=False,
ylim=(-5, 105), out_path=os.path.join(base_path, 'dissimilarity.png'),
show=True)
print 'Showing average normalized overlap - 100% is ideal'
plot_line([pct_noises * 100, pct_noises * 100], [overlap_data * 100,
overlap_sp * 100], series_names=('Raw Data', 'SP Output'),
x_label='% Noise', y_label="% Normalized Overlap", xlim=False,
ylim=(-5, 105), out_path=os.path.join(base_path, 'overlap.png'),
show=True)
print 'Showing % average sample correlation coefficient - 100% is ideal'
plot_line([pct_noises * 100, pct_noises * 100], [correlation_data * 100,
correlation_sp * 100], series_names=('Raw Data', 'SP Output'),
x_label='% Noise', y_label="% Correlation", xlim=False,
ylim=(-5, 105), out_path=os.path.join(base_path, 'correlation.png'),
show=True)
print '*** All data saved in "{0}" ***'.format(base_path)
if __name__ == '__main__':
main()
``` |
{
"source": "jmnel/bactrian",
"score": 3
} |
#### File: bactrian/bactrian/expression.py
```python
def bractify(arg):
from .numeric_types import Float, Integer
if isinstance(arg, float):
return Float(arg)
if isinstance(arg, int):
return Integer(arg)
else:
return arg
class Expression:
def __init__(self):
pass
def __mul__(self, rhs):
return Mul(self, rhs)
def __rmul__(self, lhs):
return Mul(lhs, self)
def __add__(self, rhs):
return Add(self, rhs)
def __radd__(self, lhs):
return Add(rhs, self)
def __sub__(self, rhs):
return Add(self, Mul(-1, rhs))
def __rsub__(self, lhs):
return Add(Mul(-1, lhs), self)
def __neg__(self):
return Mul(-1, self)
def __lt__(self, rhs):
from .relation import LessThanStrict
return LessThanStrict(self, rhs)
def __gt__(self, rhs):
from .relation import GreaterThanStrict
return GreaterThanStrict(self, rhs)
def __le__(self, rhs):
from .relation import LessThan
return LessThan(self, rhs)
def __ge__(self, rhs):
from .relation import GreaterThan
return GreaterThan(self, rhs)
def __eq__(self, rhs):
from .relation import EqualTo
return EqualTo(self, rhs)
def __ne__(self, rhs):
from .relation import NotEqualTo
return NotEqualTo(self, rhs)
def parse_symbol_times_scalar(expr):
from .symbol import Symbol
if (not isinstance(expr, Mul)) and (not isinstance(expr, Symbol)):
raise TypeError('Failed to parse expression: \"%s\" must be type Mul or Symbol'
% str(expr))
if isinstance(expr, Symbol):
return (1, expr)
is__symbol = list(map(lambda a : isinstance(a, Symbol), expr.args))
is__numeric = list(map(lambda a : isinstance(a, Numeric), expr.args))
symbol_count = is__symbol.count(True)
numeric_count = is__symbol.count(True)
if symbol_count != 1:
# Should these 2 exceptions be TypeError instead?
raise ValueError('Failed to parse expression: \"%s\" must contain exactly one of type Smybol'
% str(expr))
if numeric_count != len(expr.args)-1:
raise ValueError('Failed to parse expression: \"%s\" must contain n-1 of type Numeric'
% str(expr))
coef, symb = 1, None
for i, arg in enumerate(expr.args):
if is_symbol[i]:
assert (not is_numeric[i]), ('not is_numeric[%d]' % i)
symb = arg
else:
assert is_numeric[i] ('is_numeric[%d]' % i)
coef *= arg.value
return (coef, symb)
def parse_linear_combination(expr):
from .symbol import Symbol
if isinstance(expr, Symbol):
return [(1, expr)]
if isinstance(expr, Mul):
return [parse_symbol_times_scalar(expr)]
elif isinstance(expr, Add):
return [parse_symbol_times_scalar(t) for t in expr.args]
else:
raise TypeError('Failed to parse linear combination: \"%s\" is not type Add or Mul'
% str(expr))
from .add import Add
from .mul import Mul
```
#### File: bactrian/bactrian/simplify.py
```python
from .expression import Expression, bractify
def simplify(expr):
from .add import Add
from .mul import Mul
from .numeric_types import Numeric
if isinstance(expr, Mul):
non_numerics = list()
coef = 1
for a in expr.args:
if isinstance(a, Numeric):
coef *= a.value
else:
non_numerics.append(a)
lhs = (bractify(coef), )
rhs = tuple(non_numerics)
return Mul(lhs + rhs)
elif isinstance(expr, Add):
non_numerics = list()
coef = 0
for a in expr.args:
if isinstance(a, Numeric):
coef += a.value
else:
non_numerics.append(a)
lhs = (bractify(coef), )
rhs = tuple(non_numerics)
return Add(lhs + rhs)
else:
return expr
``` |
{
"source": "jmnel/binance-client",
"score": 2
} |
#### File: binance-client/src/client.py
```python
import sqlite3
from pprint import pprint
import json
from pathlib import Path
from time import perf_counter, sleep
import multiprocessing as mp
#from multiprocessing import Pipe, Process
#from multiprocessing.connection import Connection
from multiprocessing.connection import Connection
from enum import Enum
from collections import namedtuple
import matplotlib
matplotlib.use('module://matplotlib-backend-kitty')
import matplotlib.pyplot as plt
import settings
from api import get_aggregate_trades
N = settings.NUM_WORKERS
DB_PATH = settings.DATA_DIRECTORY / settings.DATABASE_NAME
class MessageType(Enum):
STOP = 0
WAIT_UPDATE = 1
AGG_TRADES_REQUEST = 2
AGG_TRADES_RESULT = 3
Message = namedtuple('Message', 'type args')
def prep_database():
if DB_PATH.exists():
DB_PATH.unlink()
db = sqlite3.connect(DB_PATH)
db.execute('''
CREATE TABLE trades_BTCUSDT(
id INTEGER PRIMARY KEY,
timestamp UNSIGNED INT,
price FLOAT,
quantity FLOAT);''')
db.close()
def write_buffer(buff):
buff_2 = list()
if len(buff) > 0:
for b in buff:
if b is not None:
if len(b) > 0:
if len(buff_2) > 0:
assert buff_2[-1][0] + 1 == b[0][0]
buff_2.extend(b)
if len(buff_2) > 0:
db = sqlite3.connect(DB_PATH)
db.executemany('INSERT INTO trades_BTCUSDT(id, timestamp, price, quantity) VALUES(?, ?, ?, ?)',
buff_2)
db.commit()
db.close()
# print(f'wrote {len(buff_2)} rows')
def proc(worker_idx: int, child_conn: Connection):
weight = 0.
prev_time = perf_counter()
while True:
while not child_conn.poll():
sleep(1)
msg = child_conn.recv()
if msg.type == MessageType.STOP:
# print(f'recevied stop')
break
elif msg.type == MessageType.AGG_TRADES_REQUEST:
while True:
wave, symbol, from_id = msg.args
# print(f'w={wave}, s={symbol}, idx={from_id}')
t_start = perf_counter()
try:
result = get_aggregate_trades(symbol, from_id)
except:
print(f'{worker_idx} : Request timed out.')
sleep(2)
continue
print(f'requst took {perf_counter() - t_start}')
if result.status == 200:
true_weight = result.used_weight
data = result.data
if len(data) == 0:
last_id = -1
else:
last_id = data[-1][0]
child_conn.send(Message(MessageType.AGG_TRADES_RESULT, (wave, true_weight, data, from_id, last_id)))
break
else:
print(f'failed: {result.status}')
sleep(10)
elif msg.type == MessageType.WAIT_UPDATE:
weight = msg.args[0]
# print(f'{worker_idx} : new weight={weight}')
new_time = perf_counter()
elapsed = new_time - prev_time
prev_time = new_time
weight = weight - settings.WEIGHT_DECAY * elapsed
# print('worker done')
def main():
prep_database()
parent_conn, child_conn = tuple(zip(*(mp.Pipe() for _ in range(settings.NUM_WORKERS))))
workers = tuple(mp.Process(target=proc, args=(i, child_conn[i],)) for i in range(settings.NUM_WORKERS))
for w in workers:
w.start()
# parent_conn[2].send(Message(MessageType.WAIT_UPDATE, (12,)))
sleep(2)
wave = 0
next_idx = 0
# next_idx = 381084197 + 1002 * 500
# next_idx = int(382082680 - 1e9)
# print(next_idx)
# exit()
weight = 0.0
busy = [False, ] * N
# q = 2000
p = 0
buff = list(None for _ in range(N))
weights = list()
should_stop = False
is_stopping = False
iterations = 0
prev_wave = 0
while True:
if iterations % 100 == 0 and wave != prev_wave:
print('Wave {}: Weight: {}, Progress: {:.1f}, Next ID: {}'.format(
wave + 1, weight, 100. * (next_idx / 382116257), next_idx))
prev_wave = wave
iterations += 1
weight_updated = False
weight_i = [0, ] * N
# Check children for messages.
for i in range(N):
if parent_conn[i].poll():
msg = parent_conn[i].recv()
if msg.type == MessageType.AGG_TRADES_RESULT:
wave_i, true_weight, data, from_id, last_id = msg.args
if wave_i == wave:
weight_i[i] = true_weight
print(f'{i} : {wave_i}, {from_id} -> {last_id} : {true_weight}')
busy[i] = False
assert buff[i] is None
# assert data[0][0] == from_id
# assert data[-1][0] == last_id
if len(data) < 1002:
should_stop = True
# print('reached end')
buff[i] = data
p += 1
if all(map(lambda b: b is not None, buff)):
# print(f'writing buffer')
write_buffer(buff)
# for idx in range(N - 1):
# assert buff[idx][-1][0] + 1 == buff[idx + 1][0][0]
buff = list(None for _ in range(N))
# if p >= q:
# print('done')
# break
if not any(busy):
weight = max(weight_i)
# weight_updated = True
# wave += 1
t_0 = perf_counter()
while weight >= 1000:
# print(f'waiting {weight}')
t_1 = perf_counter()
t_d = t_1 - t_0
t_0 = t_1
weight = max(0, weight - settings.WEIGHT_DECAY * t_d)
# print(f'new weight: {weight}')
# sleep(0.002)
wave += 1
for i in range(N):
parent_conn[i].send(Message(MessageType.AGG_TRADES_REQUEST, (wave, 'BTCUSDT', next_idx)))
sleep(1)
next_idx += 1002
busy[i] = True
weights.append(weight)
# if p % N == 0:
# plt.plot(weights)
# plt.show()
if should_stop and not is_stopping:
sleep(10)
is_stopping = True
print('should stop')
continue
if is_stopping:
print('Done')
c = len(tuple(filter(lambda e: e is not None, buff)))
# print(f'{c} left in buffer')
write_buffer(buff)
break
# for i in range(NUM_WORKERS):
# parent_conn[i % 4].send(Message(MessageType.AGG_TRADES_REQUEST, ('BTCTUSD', next_idx)))
# result = parent_conn[i % 4].recv()
# args = result.args
# print(f'{next_idx} -> {args[-1]+1}')
# print(f'len: {args[-1] - next_idx}')
# print(f'true len: {len(result.args[1])}')
# t0 = args[1][0][0]
# t1 = args[1][-1][0]
# print(f'{t0} -> {t1}')
# print()
# next_idx = args[-1] + 1
sleep(2)
for idx in range(settings.NUM_WORKERS):
parent_conn[idx].send(Message(MessageType.STOP, None))
sleep(2)
# while not parent_conn.poll():
# print('waiting')
# sleep(0.5)
# x = parent_conn.recv()
# p.kill()
# print(x)
main()
``` |
{
"source": "jmnel/combinatorial-optimization",
"score": 3
} |
#### File: src/problem1_b2/adj_graph.py
```python
import numpy as np
from pathlib import Path
from graphviz import Digraph
# import ..settings
def draw_adj_graph(arcs, filename: str, output_dir: Path):
g = Digraph('G',
filename=filename,
directory=output_dir,
format='pdf')
g.attr(rankdir='LR')
for i in range(len(arcs) - 1):
for j in range(len(arcs[i])):
if arcs[i][j]:
g.edge(str(j + 1), str(i + 1))
for j in range(len(arcs[-1])):
if arcs[-1][j]:
g.edge(str(j + 1), 'Done')
g.render()
```
#### File: midterm/old/play.py
```python
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import random
a, b, c, d = -2, 2, -1, 3
def f(x, y): return -(1-x)**2 - 100*(y-x**2)**2
fig = plt.figure()
# ax = fig.gca(projection='3d')
ax = fig.gca()
x = np.arange(-2, 2, 0.1)
y = np.arange(-1, 3, 0.1)
x, y = np.meshgrid(x, y)
r = np.sqrt(x**2 + y**2)
# zline = np.linspace(100.0, 100.0, 100)
# xline = np.linspace(-2.0, 2.0, 100)
# yline = np.linspace(-2.0, 2.0, 100)
z = np.zeros(x.shape)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
z[i, j] = -(1.-x[i, j])**2 - 100.*(y[i, j]-x[i, j]**2)**2
ax.contourf(x, y, -np.power(-z, 1/5), cmap='gnuplot2')
# ax.plot(xline, yline, 'blue')
ax.scatter([1.0], [1.0], c='red')
grid3 = np.array([
[f(a + i*(b-a)/99., c + j*(d-c)/99.) for j in range(100)]
for i in range(100)])
class TempScheduler:
def __init__(self, temp0):
self.temp0 = temp0
self.temp = temp0
self.k = 0
self.alpha = 4.01
self.n = 10000
def step(self):
# self.temp = self.temp0 / (1. + self.alpha + np.log(1. + self.k))
self.temp = self.temp0 * ((self.n - self.k) / self.n)**2
# self.temp = max(0, self.temp0 - 0.1 * self.k)
self.k += 1
return self.temp
def temp_final(self):
return 0.0
def simulated_annealing(grid):
# def temp_schedule(t):
# return max(0., 100. * np.exp(-0.005 * t))
temp_scheduler = TempScheduler(80.0)
n = (0, 0)
# t = 0
# te
# temp0 = 10.
history = list()
# temp = temp0
# temp_final = 0.6
while True:
temp = temp_scheduler.step()
history.append(n)
# print(f'iteration {t}')
# temp = max(0.999 * temp, 1e-12)
# alpha = 1.01
# temp = temp0 / (1. + alpha * np.log(1. + t))
print(f'temp={temp}')
e_current = grid[n]
i, j = n
if temp <= temp_scheduler.temp_final():
print('Solution found:')
print(' x* = ({}, {})'.format(i, j))
print(' f(x*) = {}'.format(e_current))
return (n, grid[n], history)
children = [(i-1, j),
(i, j-1),
(i+1, j),
(i, j+1)]
children = tuple(filter(
lambda c: c[0] >= 0 and c[1] >= 0 and c[0] < grid.shape[0] and c[1] < grid.shape[1], children))
successor = children[random.randint(0, len(children) - 1)]
# print(successor)
e_successor = grid[successor]
e_delta = e_successor - e_current
if e_delta > 0.0:
n = successor
else:
jump_prob = np.exp(e_delta / temp)
if np.random.uniform(0., 1.) < jump_prob:
n = successor
# t += 1
n_max, f_max, hist = simulated_annealing(grid3)
xline = np.array([a + h[0]*(b-a)/99. for h in hist])
yline = np.array([c + h[1]*(d-c)/99. for h in hist])
grid_max = np.amax(grid3)
grid_ij_max = np.where(grid3 == grid_max)
print(grid_max)
print(grid_ij_max)
grid_xy_max = (a + grid_ij_max[0]*(b-a)/99., c + grid_ij_max[1]*(d-c)/99.)
print(grid_xy_max)
foo_max = (a + n_max[0]*(b-a)/99., c + n_max[1]*(d-c)/99.)
ax.scatter([foo_max[0]], [foo_max[1]])
ax.plot(xline, yline, linewidth=0.5, color='gray')
plt.show()
```
#### File: test/NEL_Jacques-212588109-midterm1/dj38_solution.py
```python
import numpy as np
import matplotlib
matplotlib.use('GTK3Cairo')
import matplotlib.pyplot as plt
from dj38_loader import DJ38Loader
from tsp_solver import tsp_solve
def solve_dj38():
"""Solves TSP for 'dj38' dataset."""
print('Solving TSP with \'dj38\' dataset...')
# Download and get citiy coordinates form dataset.
cities = DJ38Loader().dataset
num_cities = len(cities)
print(f' Found {num_cities} cities.')
# Create initial tour by enumerating cities and append first element.
initial_tour = tuple(i for i in range(num_cities)) + (0,)
print(' Running 2-opt TSP solver...\n')
optim_tour, _ = tsp_solve(cities)
initial_tour_points = np.array([cities[i] for i in initial_tour])
optim_tour_points = np.array([cities[i] for i in optim_tour])
fig, ax = plt.subplots()
ax.plot(initial_tour_points[:, 0], initial_tour_points[:, 1], 'red',
linewidth=0.8, zorder=-30)
ax.scatter(cities[:, 0], cities[:, 1], c='blue')
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.savefig(fname='figures/figure4-1.svg')
plt.cla()
ax.plot(optim_tour_points[:, 0], optim_tour_points[:, 1], 'red',
linewidth=0.8, zorder=-30)
ax.scatter(cities[:, 0], cities[:, 1], c='blue')
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.savefig(fname='figures/figure4-2.svg')
# plt.show()
solve_dj38()
```
#### File: combinatorial-optimization/midterm/time_utils.py
```python
def time_human_readible(t: float):
if t > 60:
return '{:.1f} minutes'.format(t / 60.)
elif t >= 1.0:
return '{:.2f}s'.format(t)
elif t > 1e-3:
return '{:.0f}ms'.format(t * 1e3)
elif t > 1e-6:
return '{:.0f}μs'.format(t * 1e6)
else:
return '{}s'.format(t)
```
#### File: workshop3/src/test_minimax.py
```python
from time import perf_counter
from typing import Tuple
from random import shuffle
from minimax import minimax_search
from common.time_utils import time_hr
from common.ticktack import *
def test_minimax_random_state():
num_trials = 10
epochs = 200
for n in range(9):
t_avg = 0.
minimax_calls_avg = 0
util_fn_evals_avg = 0
for trial in range(num_trials):
init_state = list(range(9))
shuffle(init_state)
init_state = init_state[:n]
init_state = tuple(init_state)
t_start = perf_counter()
_, _, stats = minimax_search(init_state,
util_fn=score_weighted,
expand_fn=expand_basic,
randomize=True)
t_avg += perf_counter() - t_start
util_fn_evals_avg += stats['util_fn_evals']
minimax_calls_avg += stats['minimax_calls']
t_avg /= num_trials
util_fn_evals_avg /= num_trials
minimax_calls_avg /= num_trials
print(
f'For n={n} available states, avg. statistics over {num_trials} runs:')
print(f' running time: {time_hr(t_avg)}')
print(
f' minimax calls: {minimax_calls_avg}, util function calls: {util_fn_evals_avg}\n')
``` |
{
"source": "jmnel/dataapi-client",
"score": 2
} |
#### File: dataapi_client/topk/export.py
```python
import requests
import json
from pprint import pprint
import sqlite3
from typing import Sequence, Union, Optional
import datetime
from datetime import datetime
from ..api import ApiConfig
ENDPOINT_BASE = 'https://data.jmnel.com/api/v1/'
ENDPOINT_AUTH = ENDPOINT_BASE + 'topk/authenticate?api={}'
ENDPOINT_EXPORT = ENDPOINT_BASE + 'topk/import/'
def export_topk(date: Union[str, datetime.date],
symbols: Sequence[str],
api_key: Optional[str] = None,
overwrite: bool = False,
blocking: bool = True,
verbose: bool = False):
"""
Export top-k prediction to the data server.
Args:
date: Date of prediction.
symbols: List of predicted symbols.
api_key: API key for authentication.
overwrite: Overwrite existing predictions.
blocking: Wait for IB symbol lookup before returning if true.
verbose: Generate verbose output.
Returns:
"""
if api_key is None or api_key == '':
if ApiConfig.api_key is None or ApiConfig.api_key == '':
raise ValueError('dataapi-client: API key not provided')
else:
api_key = ApiConfig.api_key
if not blocking:
raise NotImplementedError('dataapi-client: non-blocking requests not implemented yet.')
session = requests.Session()
if verbose:
print('dataapi-client: session created')
try:
auth_result = session.get(ENDPOINT_AUTH.format(api_key))
if verbose:
print('dataapi-client: authenticated')
except Exception as e:
raise ConnectionError('dataapi-client: failed to connect to server: {e}')
if not auth_result.ok:
raise ValueError(f'dataapi - client: authentification failed; {auth_result.reason}')
if isinstance(date, str):
try:
datetime.strptime(date, '%Y-%m-%d')
except:
raise ValueError(f'{date} is not in %Y-%m-%d format')
else:
date = datetime.strftime('%Y-%m-%d')
json_data = {'api': api_key,
'date': date,
'overwrite': overwrite,
'symbols': symbols}
session.headers['referer'] = ENDPOINT_AUTH
data = {'csrfmiddlewaretoken': session.cookies['csrftoken'],
'json_data': json.dumps(json_data)}
if verbose:
print('dataapi-client: exporting top-k symbols')
response = session.post(ENDPOINT_EXPORT, data=data)
# print(response.content)
response_json = json.loads(response.content)
if verbose:
print('dataapi-client: response:')
pprint(response_json)
return response_json
``` |
{
"source": "jmnel/datapipeline",
"score": 2
} |
#### File: src/iex/update.py
```python
from pymongo import MongoClient
from confluent_kafka import Consumer, Producer
from nameko.extensions import Entrypoint
class Updater(Entrypoint):
name = 'service'
def __init__(self):
pass
def setup(self):
pass
def start(self):
self.container.spawn_managed_thread(self.run, identifier='Updater.run')
def run(self):
pass
def stop(self):
pass
#updater = Updater.decorator
``` |
{
"source": "jmnel/dromedary-welcome",
"score": 3
} |
#### File: dromedary-welcome/src/help_browser.py
```python
import os
from PySide2.QtWidgets import (QWidget, QPushButton, QTextBrowser, QHBoxLayout, QVBoxLayout,
QDialog)
from PySide2.QtCore import Qt, Slot
class HelpBrowser(QWidget):
instance = None # Class variable instance of help browser
documentation_path = '' # Documentation path
def __init__(self, parent=None):
# def __init__(self, path, page, parent=None):
# We don't pass parent to superclass, because we don't want help browser to be a child of
# main window. We handle closing help browser when main window closes manually.
super(HelpBrowser,self).__init__()
# Set needed widget attributes. WA_DeleteOnClose is needed so that closing main window also
# closes instance of help browser.
self.setAttribute(Qt.WA_DeleteOnClose) # Destroy widget when window is closed.
self.setAttribute(Qt.WA_GroupLeader)
# Create home, back, and close buttons.
self.home_button = QPushButton(self.tr('&Home'))
self.back_button = QPushButton(self.tr('&Back'))
self.close_button = QPushButton(self.tr('Close'))
self.close_button.setShortcut(self.tr('Esc'))
# Layout home, back, and close buttons.
self.button_layout = QHBoxLayout()
self.button_layout.addWidget(self.home_button)
self.button_layout.addWidget(self.back_button)
self.button_layout.addStretch()
self.button_layout.addWidget(self.close_button)
# Create basic layout containing QTextBrowser.
self.text_browser = QTextBrowser()
self.main_layout = QVBoxLayout()
self.main_layout.addLayout(self.button_layout)
self.main_layout.addWidget(self.text_browser)
self.setLayout(self.main_layout)
# Connect button signals
self.home_button.clicked.connect(self.text_browser.home)
self.back_button.clicked.connect(self.text_browser.backward)
self.close_button.clicked.connect(self.close)
# Calls static function to clear help browser instance reference.
self.destroyed.connect(HelpBrowser.on_close)
# Close help browser on parent is_closing signal.
parent.is_closing.connect(self.close)
# Navigates to page in documentation path.
def goto_page(self, page):
page_file_path = os.path.join(HelpBrowser.documentation_path, page)
self.text_browser.setSource(page_file_path)
# Sets documenation path.
@staticmethod
def set_documentation_path(path):
HelpBrowser.documentation_path = path
# Unsets help browser instance reference. This gets called when help browser is destroyed.
@staticmethod
def on_close():
if HelpBrowser.instance != None:
HelpBrowser.instance = None
# Creates and shows help browser window, stores instance in class variable, and navigates to
# page in documentation path.
@staticmethod
def show_page(page, parent=None):
if HelpBrowser.instance == None:
HelpBrowser.instance = HelpBrowser(parent)
HelpBrowser.instance.resize(500,400)
HelpBrowser.instance.show()
HelpBrowser.instance.goto_page(page)
``` |
{
"source": "jmnel/math4171",
"score": 3
} |
#### File: arcplot/pythonscripts/foo.py
```python
print("loading")
def bar(a,b,c):
print("hi")
print(a)
print(b)
print(c)
return
#from mpl_toolkits.mplot3d import Axes3D
#import matplotlib.pyplot as plt
#from matplotlib import cm
#from matplotlib.ticker import LinearLocator, FormatStrFormatter
#import numpy as np
#fig = plt.figure()
#ax = fig.gca(projection='3d')
## Make data.
#X = np.arange(-5, 5, 0.05)
#Y = np.arange(-5, 5, 0.05)
#X, Y = np.meshgrid(X, Y)
#R = np.sqrt(X**2 + Y**2)
#Z = np.sin(R)
## Plot the surface.
#surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,
#linewidth=0, antialiased=True)
## Customize the z axis.
#ax.set_zlim(-1.01, 1.01)
#ax.zaxis.set_major_locator(LinearLocator(10))
#ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
## Add a color bar which maps values to colors.
##fig.colorbar(surf, shrink=0.5, aspect=5)
#plt.show()
``` |
{
"source": "jmnel/pyfintools",
"score": 3
} |
#### File: pyfintools/plot/geometry.py
```python
import numpy as np
class Point(np.ndarray):
def __new__(cls, x, y, *args, **kwargs):
return super(Point, cls).__new__(cls, *args, shape=(3,), **kwargs)
def __init__(self, x, y):
self[0:3] = [x, y, 1]
@property
def x(self):
return self[0]
@x.setter
def x(self, value):
self.x = value
@property
def y(self):
return self[1]
@y.setter
def y(self, value):
self.y = value
@property
def xy(self):
return self[:2]
def __str__(self):
return 'Point( x={}, y={} )'.format(*self[:2])
class Rect:
def __init__(self, position: Point, scale: Point):
self.position = position
self.scale = scale
def __str__(self):
return 'Rect( position={}, scale={} )'.format(self.position, self.scale)
def __getitem__(self, idx):
if idx > 1:
raise StopIteration
if idx == 0:
return self.position
else:
return self.scale
```
#### File: pyfintools/plot/plot.py
```python
from typing import List, Union, Tuple
import pandas as pd
from .figure import Figure
from .axes import Axes
from .ohlcv import TickVolume, TickCandles
def subplots(num_rows: int, num_cols: int):
fig = Figure()
axes = list()
for i in range(num_rows):
axes.append(list())
for j in range(num_cols):
ax = Axes()
axes[-1].append(ax)
fig.axes = axes
if num_cols == 1 and num_rows == 1:
return fig, axes[0][0]
elif num_rows == 1:
return fig, axes[0]
elif num_cols == 1:
return fig, list(a[0] for a in axes)
return fig, axes
def ohlcv_plot(data: pd.DataFrame):
fig, axes = subplots(2, 1)
axes[0].plot_series.append(TickCandles(data))
axes[1].plot_series.append(TickVolume(data))
# fig.set_title(f'{model.contract} {model.date}')
fig.layout()
fig.draw()
return fig, axes
# print(buff.getvalue().decode('utf-8'))
``` |
{
"source": "jmnel/simulated-annealing",
"score": 3
} |
#### File: jmnel/simulated-annealing/multistart.py
```python
from typing import Callable, Dict
import numpy as np
import scipy.optimize as optim
from gradient_descent import grad_descent
from zoo import Zoo
def multistart(f: Callable,
jac: Callable,
domain: np.array,
max_iterations: int = 200,
tau: float = 1e-4,
rho: float = 0.8,
eps: float = 1e-3,
tol: float = 1e-7,
ls_method: Callable = optim.minimize,
ls_kwargs: Dict = dict(),
polish: bool = True,
polish_method: Callable = optim.minimize,
polish_kwargs: Dict = dict()):
"""
Globally minimizes a function f on region S by repeatly running LS from uniformly sampled
points in S.
Args:
f: The function to mininmize
jac: Gradient of f
domain: Search region S on which to minimize f
max_iterations: Maximum number of global iterations
tau: Stopping tolerance of LS method
rho: Double-box rule search parameter
eps: Epsilon below which to consider two local minima as the same
polish: Improve final solution by one last LS
tol: Tolerance of polish
Returns:
np.array, float, int Solution x, f(x), and global iterations n
"""
if 'tol' not in ls_kwargs:
ls_kwargs['tol'] = tau
if 'tol' not in polish_kwargs:
polish_kwargs['tol'] = tol
dims = domain.shape[1]
s_min = domain[0]
s_max = domain[1]
s_size = s_max - s_min
s2_min = s_min - 0.5 * (np.power(2., 1. / dims) - 1.) * s_size
s2_max = s_max + 0.5 * (np.power(2., 1. / dims) - 1.) * s_size
s2_size = s2_max - s2_min
m_n = 0
x_minima = list()
deltas = list()
result = optim.OptimizeResult()
result.nfev = 0
result.njev = 0
x_best = None
f_best = float('inf')
for n in range(1, max_iterations):
# Generate points in S2 and discard ones not in S.
while True:
x = np.random.uniform(size=dims) * s2_size + s2_min
in_s = all([s_min[i] <= x[i] and x[i] <= s_max[i] for i in range(dims)])
m_n += 1
if in_s:
break
# Perform LS with generated initial point in S.
# x_ls=grad_descent(f, x, grad, tol = tau, max_iterations = 400)
ls_result = ls_method(f, x0=x, jac=jac, **ls_kwargs)
x_ls = ls_result.x
result.nfev += ls_result.nfev
result.njev += ls_result.njev
f_ls = f(x_ls)
result.nfev += 1
# LS results which escape S are discared.
if any([x_ls[i] < s_min[i] or x_ls[i] > s_max[i] for i in range(dims)]):
continue
delta = n / m_n
deltas.append(delta)
sigma_2 = np.var(deltas)
if f_ls < f_best:
f_best = f_ls
x_best = x_ls
result.x = x_best
result.fun = f_best
result.jac = ls_result.jac
min_is_new = True
for i, x_min_other in enumerate(x_minima):
if np.linalg.norm(x_ls - x_min_other) <= eps:
min_is_new = False
break
if min_is_new:
sigma_2_last = sigma_2
x_minima.append(x_ls)
# Otherwise, no new local minimum was found.
else:
# Check double-box stop rule.
if sigma_2 < rho * sigma_2_last:
break
if polish:
polish_result = polish_method(f, x0=x_best, jac=jac, **polish_kwargs)
result.nfev += polish_result.nfev
result.njev += polish_result.njev
result.fun = polish_result.fun
result.jac = polish_result.jac
result.x = polish_result.x
# x=grad_descent(f, x_best, tol = tol, max_iterations = 400)
return result
```
#### File: jmnel/simulated-annealing/results1.py
```python
import numpy as np
import matplotlib
# matplotlib.use('module://matplotlib-backend-kitty')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from zoo import Zoo
from sa import simulated_annealing
objectives = [Zoo().get('branin').make_explicit(),
Zoo().get('goldstein_price').make_explicit()]
NUM_RUNS = 4
GAMMA = 0.01
for obj in objectives:
f, grad = obj.f, obj.grad
domain, plt_domain = np.array(obj.domain), np.array(obj.domain_plot)
DOM_DIM = 2
L0 = 10
L = DOM_DIM * L0
DELTA = 0.1
EPS = 1e-4
CHI = 0.9
SMOOTHING = 0.01
T = 0.1
def callback(iteration, x, chain, c):
x_hist.extend(chain)
c_hist.append(c)
xlim = plt_domain[:, 0]
ylim = plt_domain[:, 1]
n_samples = 200
x_plt, y_plt = np.linspace(*xlim, n_samples), np.linspace(*ylim, n_samples)
mx, my = np.meshgrid(x_plt, y_plt)
z = np.power(f([mx, my]), 0.1)
fig, ax = plt.subplots(1, 1)
ax.contourf(x_plt, y_plt, z, levels=50, cmap='viridis')
colors = plt.cm.twilight(np.linspace(0, 1, NUM_RUNS + 1))
temps = list()
for idx_run in range(NUM_RUNS + 1):
c_hist = list()
x_hist = list()
res = simulated_annealing(f,
grad,
domain=domain,
l0=L0,
delta=DELTA,
stop_eps=EPS,
chi=CHI,
smoothing=SMOOTHING,
descent_affinity=T,
callback=callback)
x_smooth = res[0]
x_smooth_hist = [x_smooth, ]
temps.append(c_hist)
for x_i in reversed(x_hist):
x_smooth = GAMMA * x_i + (1. - GAMMA) * x_smooth
x_smooth_hist.append(x_smooth)
ax.scatter([res[0][0], ], [res[0][1], ], c=np.array(colors[idx_run]).reshape((1, 4)))
ax.plot(*tuple(zip(*x_smooth_hist)), color=colors[idx_run], linewidth=0.6)
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
fig.savefig(f'figures/fig51-{obj.name}.pdf', dpi=200)
fig, ax = plt.subplots(1, 1)
for c_hist in temps:
ax.plot(np.arange(len(c_hist)), c_hist)
ax.set_xlabel(r'Iteration $n$')
ax.set_ylabel(r'Temperature $c^{(n)}$')
fig.savefig(f'figures/fig52-{obj.name}.pdf', dpi=200)
```
#### File: jmnel/simulated-annealing/step_size.py
```python
from typing import Callable
import numpy as np
import scipy.optimize as optim
def armijo_step(f: Callable,
l0: float,
jac: Callable,
alpha: float,
rho: float):
"""
Calculates the maximum Armijo step size such that the Goldstein condition is still satisfied.
Args:
f Function objective value along search direction.
jac: Derivative of f with respect to t.
l0: Initial base step size.
alpha: Armijo parameters.
rho: Growth factor.
Returns:
OptimizeResult Armijo max step size.
"""
k0 = 0
l = l0 * np.power(rho, k0)
f0 = f(0.)
jac0 = jac(0.)
for k in range(k0, 100):
l_new = l0 * np.power(rho, k)
if f(l_new) > f0 + alpha * l_new * jac0:
result = optim.OptimizeResult(x=l,
success=True,
status=0,
message='found optimal step size',
nfev=1 + k,
njev=1,
nit=k)
return result
l = l_new
result = optim.OptimizeResult(x=l,
success=False,
status=-1,
message='max iterations exceeded',
nfev=100 + 1,
njev=1,
nit=k)
return result
def gss(f: Callable, a: float, b: float, tol: float = 1e-12):
"""
Find minimum of function with Golden-section search.
Args:
f: Function to minimize.
a: Left bracket of search interval.
b: Right bracket of search interval.
tol: Desired tolerance.
Returns:
float Minimizer of f.
"""
INV_PHI = 0.5 * (np.sqrt(5.) - 1.)
INV_PHI_2 = 0.5 * (3. - np.sqrt(5.))
a, b = min(a, b), max(a, b)
h = b - a
if h <= tol:
return optim.OptimizeResult(x=0.5 * (a + b),
success=True,
status=0,
message='found optimal value',
nfev=0,
njev=0,
nit=0)
# return 0.5 * (a + b)
n = int(np.ceil(np.log(tol / h) / np.log(INV_PHI)))
nfev = 0
c = a + INV_PHI_2 * h
d = a + INV_PHI * h
fc = f(c)
fd = f(d)
nfev += 2
for k in range(n - 1):
if fc < fd:
b = d
d = c
fd = fc
h = INV_PHI * h
c = a + INV_PHI_2 * h
fc = f(c)
nfev += 1
else:
a = c
c = d
fc = fd
h = INV_PHI * h
d = a + INV_PHI * h
fd = f(d)
nfev += 1
if fc < fd:
assert (d - a) <= tol
# x = 0.5 * (a + d)
else:
assert (b - c) <= tol
# x = 0.5 * (c + b)
return optim.OptimizeResult(x=0.5 * (a + d) if fc < fd else 0.5 * (c + b),
success=True,
status=0,
message='found optimal value',
nfev=nfev,
njev=0,
nit=n)
```
#### File: jmnel/simulated-annealing/utils.py
```python
import numpy as np
def grad_approx(f, x, tau=1e-14):
x1, x2 = x
return np.array([
(f([x1 + 0.5 * tau, x2]) - f([x1 - 0.5 * tau, x2])) / tau,
(f([x1, x2 + 0.5 * tau]) - f([x1, x2 - 0.5 * tau])) / tau])
```
#### File: simulated-annealing/zoo/rosenbrock.py
```python
import sympy as sym
from .benchmark import Benchmark
class Rosenbrock(Benchmark):
def __init__(self):
super().__init__()
self.name = 'Rosenbrock'
self.name_short = 'RB'
self.dims = 2
x = sym.IndexedBase('x')
i = sym.Idx('i')
x1, x2 = x[1], x[2]
self.x = [x1, x2]
a, b = sym.symbols('a b')
self.params = {'a': [a, 1.],
'b': [b, sym.pi * 4]
}
self.expr = (a - x1)**2 + b * (x2 - x1**2)**2
self.xmin = [[a, a**2], ]
self.domain = [[-2., -1.], [2., 2.]]
self.domain_plot = [[-2., -2.], [3., 3.]]
```
#### File: simulated-annealing/zoo/schubert.py
```python
from pprint import pprint
import sympy as sym
sym.init_printing(use_latex=True)
import numpy as np
from .benchmark import Benchmark
class Schubert(Benchmark):
def __init__(self, case: str):
super().__init__()
if case not in {'p3', 'p8', 'p16', 'p22'}:
raise ValueError('case must be one of p3, p8, p16, or p22')
self.name = f"schubert {case}"
def u(x_i, a, k, m):
return sym.Piecewise(
(k * (x_i - a)**m, sym.Gt(x_i, a)),
(0, sym.And(sym.Ge(x[i], -a), sym.Le(x[i], a))),
(k * (-x_i - a)**m, sym.Lt(x_i, -a))
)
a, k, m = sym.symbols('a k m')
if case == 'p3':
n = 2
x = sym.IndexedBase('x')
self.x = [x[i] for i in range(0, n)]
i = sym.Idx('i')
term1 = sym.Sum(i * sym.cos((i + 1) * x[0] + 1), (i, 0, 4))
term2 = sym.Sum(i * sym.cos((i + 1) * x[1] + 1), (i, 0, 4))
self.expr = term1 * term2 + u(x[0], a, k, m) + u(x[1], a, k, m)
self.params = {'a': [a, 10.],
'k': [k, 100.],
'm': [m, 2]}
self.xmin = None
self.domain = [-10. * np.ones(n), 10. * np.ones(n)]
self.domain_plot = self.domain
elif case == 'p8':
n = 3
x = sym.IndexedBase('x')
self.x = [x[i] for i in range(0, n)]
y = sym.IndexedBase('y')
i = sym.Idx('i')
k_1, k_2 = sym.symbols('k_1 k_2')
pprint(y)
self.expr = (sym.pi / n) * (
k_1 * sym.sin(sym.pi * y[0])**2
+ sym.Sum((y[i] - k_2)**2
* (1. + k_1 * sym.sin(sym.pi * y[i + 1])**2), (i, 0, n - 2))
+ (y[n - 1] - k_2)**2) \
+ sym.Sum(u(x[i], a, k, m), (i, 0, n - 1))
y_subs = {y[i]: 1. + 0.25 * (x[i] + 1.) for i in range(n)}
self.expr = self.expr.doit().subs(y_subs)
self.params = {'a': [a, 10.],
'k': [k, 100.],
'm': [m, 4],
'k_1': [k_1, 10.],
'k_2': [k_2, 1.]}
self.xmin = [[1., 1., 1.], ]
self.domain = [-10. * np.ones(n), 10. * np.ones(n)]
self.domain_plot = None
self.dims = n
``` |
{
"source": "jmnich/UFP_Regulator_Simulations",
"score": 3
} |
#### File: UFP_Regulator_Simulations/FUZZY_V4/Field.py
```python
class Field:
"""
Store data related to one filed of the reaction table.
"""
def __init__(self, index, initial_value, max_value, min_value):
self.current_value = initial_value
self.dummy_value = initial_value
self.max = max_value
self.min = min_value
self.checked_mid = False
self.checked_top = False
self.checked_bot = False
self.related_index = index
def setNewValue(self, new_value):
self.current_value = new_value
def getCurrentValue(self):
return self.current_value
def getNextValue(self):
if not self.checked_bot:
self.checked_bot = True
self.dummy_value = self.current_value - 1
if self.dummy_value < self.min:
self.dummy_value = self.min
return self.dummy_value
elif not self.checked_mid:
self.checked_mid = True
self.dummy_value = self.current_value
return self.dummy_value
elif not self.checked_top:
self.checked_top = True
self.dummy_value = self.current_value + 1
if self.dummy_value > self.max:
self.dummy_value = self.max
return self.dummy_value
def allChecked(self):
if self.checked_bot and self.checked_mid and self.checked_top:
return True
else:
return False
def resetCheckFlags(self):
self.checked_bot = False
self.checked_mid = False
self.checked_top = False
```
#### File: UFP_Regulator_Simulations/FUZZY_V4/ReactionZone.py
```python
class ReactionZone:
"""
Store a block of 9 reactions.
"""
def __init__(self, indexes, reactions_array):
self.reactions_array = reactions_array
self.contained_indexes = indexes
def getFields(self):
list_of_fields = []
for index in self.contained_indexes:
list_of_fields.append(self.reactions_array[index])
return list_of_fields
```
#### File: UFP_Regulator_Simulations/FUZZY_V5/FuzzyRegulator.py
```python
import MembershipFunction
class FuzzyRegulator:
def __init__(self, min_thrust, max_thrust, reactions):
self.min_thrust = min_thrust
self.max_thrust = max_thrust
self.recommended_left = 0.0
self.recommended_right = 0.0
self.lastErr = 0.0
self.recently_used_rules = []
# prepare membership functions for input parameters
self.angle_err = {}
self.angle_err[4] = MembershipFunction.MembershipFunction(False, True, 1.0 / 15.0, 60.0)
self.angle_err[3] = MembershipFunction.MembershipFunction(False, False, 1.0 / 15.0, 45.0)
self.angle_err[2] = MembershipFunction.MembershipFunction(False, False, 1.0 / 15.0, 25.0)
self.angle_err[1] = MembershipFunction.MembershipFunction(False, False, 1.0 / 10.0, 10.0)
self.angle_err[0] = MembershipFunction.MembershipFunction(False, False, 1.0 / 10.0, 0.0)
self.angle_err[-1] = MembershipFunction.MembershipFunction(False, False, 1.0 / 10.0, -10.0)
self.angle_err[-2] = MembershipFunction.MembershipFunction(False, False, 1.0 / 15.0, -25.0)
self.angle_err[-3] = MembershipFunction.MembershipFunction(False, False, 1.0 / 15.0, -45.0)
self.angle_err[-4] = MembershipFunction.MembershipFunction(True, False, 1.0 / 15.0, -60.0)
self.ang_vel = {}
self.ang_vel[4] = MembershipFunction.MembershipFunction(False, True, 1.0 / 7.5, 30.0)
self.ang_vel[3] = MembershipFunction.MembershipFunction(False, False, 1.0 / 7.5, 22.5)
self.ang_vel[2] = MembershipFunction.MembershipFunction(False, False, 1.0 / 7.5, 12.5)
self.ang_vel[1] = MembershipFunction.MembershipFunction(False, False, 1.0 / 5.0, 5.0)
self.ang_vel[0] = MembershipFunction.MembershipFunction(False, False, 1.0 / 5.0, 0.0)
self.ang_vel[-1] = MembershipFunction.MembershipFunction(False, False, 1.0 / 5.0, -5.0)
self.ang_vel[-2] = MembershipFunction.MembershipFunction(False, False, 1.0 / 7.5, -12.5)
self.ang_vel[-3] = MembershipFunction.MembershipFunction(False, False, 1.0 / 7.5, -22.5)
self.ang_vel[-4] = MembershipFunction.MembershipFunction(True, False, 1.0 / 7.5, -30.0)
# prepare membership functions for output thrust recommendations
span = max_thrust - min_thrust
factor = span / 8.0
self.thr_reco = {}
self.thr_reco[8] = MembershipFunction.MembershipFunction(False, False, factor, max_thrust)
self.thr_reco[7] = MembershipFunction.MembershipFunction(False, False, factor, min_thrust + span / 8.0 * 7)
self.thr_reco[6] = MembershipFunction.MembershipFunction(False, False, factor, min_thrust + span / 8.0 * 6)
self.thr_reco[5] = MembershipFunction.MembershipFunction(False, False, factor, min_thrust + span / 8.0 * 5)
self.thr_reco[4] = MembershipFunction.MembershipFunction(False, False, factor, min_thrust + span / 8.0 * 4)
self.thr_reco[3] = MembershipFunction.MembershipFunction(False, False, factor, min_thrust + span / 8.0 * 3)
self.thr_reco[2] = MembershipFunction.MembershipFunction(False, False, factor, min_thrust + span / 8.0 * 2)
self.thr_reco[1] = MembershipFunction.MembershipFunction(False, False, factor, min_thrust + span / 8.0 * 1)
self.thr_reco[0] = MembershipFunction.MembershipFunction(False, False, factor, min_thrust)
# prepare rulebase
self.rules = []
z = 0
for verse in range(9):
for column in range(9):
self.rules.append(Rule(verse - 4, column - 4, reactions[z]))
z += 1
print(str(z) + " rules loaded")
def calcNewThrusts(self, angle, ang_vel, desired_angle):
self.lastErr = desired_angle - angle
self.recently_used_rules.clear()
# fuzzyfication of the input
fuzzy_angle = []
for fuzzy_state in self.angle_err:
pertinence = self.angle_err[fuzzy_state].getMembershipFactor(angle)
if pertinence > 0.0: # is a member
fuzzy_angle.append(FuzzyInput(fuzzy_state, pertinence))
fuzzy_vel = []
for fuzzy_state in self.ang_vel:
pertinence = self.ang_vel[fuzzy_state].getMembershipFactor(ang_vel)
if pertinence > 0.0: # is a member
fuzzy_vel.append(FuzzyInput(fuzzy_state, pertinence))
# gather recommendations from rules and save used rules
recommendations = []
for r in range(len(self.rules)):
for ang in fuzzy_angle:
for vel in fuzzy_vel:
if self.rules[r].angle == ang.input_value and \
self.rules[r].angular_velocity == vel.input_value:
# rule is on
recommendations.append(Recommendation(self.rules[r].recommended_thrust,
min(ang.pertinence, vel.pertinence)))
self.recently_used_rules.append([r, min(ang.pertinence, vel.pertinence)])
# defuzzyfication
total_val = 0.0
total_pertinence = 0.0
for reco in recommendations:
abs_rec = abs(reco.recommended_thrust)
center = self.thr_reco[abs_rec].central_boundary
if reco.recommended_thrust >= 0: # left thruster
total_val += center * reco.pertinence
else: # right thruster
total_val -= center * reco.pertinence
total_pertinence += reco.pertinence
# center-average calculation
final_recommendation = total_val / total_pertinence
# safety check
if final_recommendation > self.max_thrust:
final_recommendation = self.max_thrust
# elif final_recommendation < self.min_thrust:
# final_recommendation = self.min_thrust # !!! bug
# save results to proper fields
if final_recommendation == 0.0: # thrusters disabled
self.recommended_left = self.min_thrust
self.recommended_right = self.min_thrust
elif final_recommendation > 0: # left thruster active
self.recommended_left = final_recommendation
self.recommended_right = self.min_thrust
elif final_recommendation < 0: # right thruster active
self.recommended_right = -final_recommendation
self.recommended_left = self.min_thrust
def getRightThrust(self):
return self.recommended_right
def getLeftThrust(self):
return self.recommended_left
def getLastErr(self):
return self.lastErr
class Recommendation:
def __init__(self, thrust_level, pertinence):
self.recommended_thrust = thrust_level
self.pertinence = pertinence
class FuzzyInput:
def __init__(self, input_value, pertinence):
self.input_value = input_value
self.pertinence = pertinence
class Rule:
def __init__(self, angle, angular_velocity, recommended_thrust):
self.angle = angle
self.angular_velocity = angular_velocity
self.recommended_thrust = recommended_thrust
```
#### File: UFP_Regulator_Simulations/PID/PIDRegulator.py
```python
class PIDRegulator:
def __init__(self, proportional, integral, derivative, min_thrust,
max_thrust):
self.p = proportional
self.d = derivative
self.i = integral
self.max_thrust = max_thrust
self.min_thrust = min_thrust
self.errSum = 0.0
self.lastErr = 0.0
self.recommended_left = 0.0
self.recommended_right = 0.0
def calcNewThrusts(self, angle, desired_angle, interval):
err = desired_angle - angle;
Derr = (err - self.lastErr) / interval
self.lastErr = err
self.errSum += err * interval
r_recommendation = self.p * err + self.d * Derr + self.i * self.errSum
r_recommendation = -r_recommendation
if r_recommendation < self.min_thrust:
r_recommendation = self.min_thrust
elif r_recommendation > self.max_thrust:
r_recommendation = self.max_thrust
self.recommended_right = r_recommendation
l_recommendation = self.p * err + self.d * Derr + self.i * self.errSum
if l_recommendation < self.min_thrust:
l_recommendation = self.min_thrust
elif l_recommendation > self.max_thrust:
l_recommendation = self.max_thrust
self.recommended_left = l_recommendation
def getRightThrust(self):
return self.recommended_right
def getLeftThrust(self):
return self.recommended_left
def getLastErr(self):
return self.lastErr
``` |
{
"source": "jmnickerson05/Udacity_ImageClassifier_CLI",
"score": 3
} |
#### File: jmnickerson05/Udacity_ImageClassifier_CLI/predict.py
```python
import torch
from torchvision import datasets, transforms, models
import PIL
import torch.nn.functional as nnf
import pandas as pd, argparse, json
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser()
parser.add_argument('image_path')
parser.add_argument('checkpoint', default='cli_checkpoint.pth')
parser.add_argument('--top_k', default=5)
parser.add_argument('--category_names', default='cat_to_name.json')
parser.add_argument('--gpu', default=True)
parser.add_argument('--show_plots', default=False)
global args
args = parser.parse_args()
model = load_model()
if not args.show_plots:
predict(image_path=args.image_path, model=model, topk=args.top_k)
else:
predict_and_plot(model, args.image_path)
def load_model():
model = torch.load('cli_checkpoint.pth', ((lambda storage, loc: storage.cuda())
if (torch.cuda.is_available() and args.gpu is True) else 'cpu'))
model.eval()
return model
def process_image(image_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
return transforms.Compose([
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])(PIL.Image.open(image_path))
def predict(image_path, model, topk=5):
# Transform
cat_to_name = json.loads(open(args.category_names, 'r').read())
actual_category = cat_to_name[image_path.split('/')[2]]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
inputs = process_image(image_path).unsqueeze(0)
model.eval()
model, inputs = model.to(device), inputs.to(device)
prob = nnf.softmax(model(inputs), dim=1)
top_p, top_class = prob.topk(topk, dim = 1)
pred = pd.DataFrame({'probability': top_p.cpu().detach().numpy()[0],
'category': top_class.cpu().detach().numpy()[0]})
ppred = pred.sort_values(by='probability', ascending=False)[
['category', 'probability']
]
pred['name'] = pred.category.apply(
lambda x: cat_to_name[str(int(x))]
)
pred['category'] = pred['category'].astype(int)
print('\n','*'*30, f'\nACTUAL FLOWER NAME: {actual_category}\n', '*'*30)
print(pred)
return pred
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.numpy().transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
def predict_and_plot(model, img_path):
cat_to_name = json.loads(open(args.category_names, 'r').read())
imshow(process_image(img_path))
pred = predict(img_path, model)
pred = pred.sort_values(by='probability', ascending=False)[
['category', 'probability']
]
pred['name'] = pred.category.apply(
lambda x: cat_to_name[str(int(x))]
)
pred['category'] = pred['category'].astype(int)
print(pred)
ax = (pred.sort_values(by='probability', ascending=False)
.plot.barh(x='name', y='probability'))
ax.invert_yaxis()
main()
```
#### File: jmnickerson05/Udacity_ImageClassifier_CLI/train.py
```python
from workspace_utils import active_session
import torch
from torch import nn, optim
from torchvision import datasets, transforms, models
import os, copy, time, json, argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('data_directory')
parser.add_argument('--save_dir', default='.')
parser.add_argument('--learning_rate', default=0.01)
parser.add_argument('--epochs', default=25)
parser.add_argument('--gpu', default=True)
# NOPE -- Not sure I would just change models on the fly in the real world.
# parser.add_argument('--arch', default='vgg16')
# IS THIS NEEDED?
# parser.add_argument('--hidden_units', default=512)
global args
args = parser.parse_args()
model = initialize_model(num_classes=102, feature_extract=True)[0]
train_and_save(model)
# Adapted From: https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def initialize_model(num_classes, feature_extract, use_pretrained=True):
model_ft = None
input_size = 0
model_ft = models.vgg16_bn(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.classifier[6].in_features
model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)
input_size = 224
return model_ft, input_size
# Adapted From: https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False):
since = time.time()
val_acc_history = []
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
device = torch.device("cuda:0" if (torch.cuda.is_available() and args.gpu is True) else "cpu")
print(device)
model.to(device)
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
for phase in ['train', 'valid']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
if is_inception and phase == 'train':
# From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, labels)
loss2 = criterion(aux_outputs, labels)
loss = loss1 + 0.4 * loss2
else:
outputs = model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / len(dataloaders[phase].dataset)
epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
if phase == 'valid' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
if phase == 'valid':
val_acc_history.append(epoch_acc)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, val_acc_history
# Adapted From: https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
def train_and_save(model):
with active_session():
data_dir = args.data_directory
data_transforms = {'train': transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]),
'test': transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
]),
'valid': transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])
])
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'test', 'valid']}
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x],
batch_size=8, shuffle=True,
num_workers=4) for x in ['train', 'valid']}
feature_extract = True
params_to_update = model.parameters()
print("Params to learn:")
if feature_extract:
params_to_update = []
for name, param in model.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t", name)
else:
for name, param in model.named_parameters():
if param.requires_grad == True:
print("\t", name)
optimizer_ft = optim.SGD(params_to_update, lr=args.learning_rate, momentum=0.9)
criterion = nn.CrossEntropyLoss()
model, hist = train_model(model, dataloaders_dict,
num_epochs=args.epochs,
criterion=criterion,
optimizer=optimizer_ft)
# Uncomment this to save the model during an unattended run
torch.save(model, f'{args.save_dir}/cli_checkpoint.pth')
main()
``` |
{
"source": "jmniederle/misgan",
"score": 3
} |
#### File: misgan/src/celeba_generator.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
def add_mask_transformer(self, temperature=.66, hard_sigmoid=(-.1, 1.1)):
"""
hard_sigmoid:
False: use sigmoid only
True: hard thresholding
(a, b): hard thresholding on rescaled sigmoid
"""
self.temperature = temperature
self.hard_sigmoid = hard_sigmoid
if hard_sigmoid is False:
self.transform = lambda x: torch.sigmoid(x / temperature)
elif hard_sigmoid is True:
self.transform = lambda x: F.hardtanh(
x / temperature, 0, 1)
else:
a, b = hard_sigmoid
self.transform = lambda x: F.hardtanh(
torch.sigmoid(x / temperature) * (b - a) + a, 0, 1)
def dconv_bn_relu(in_dim, out_dim):
return nn.Sequential(
nn.ConvTranspose2d(in_dim, out_dim, 5, 2,
padding=2, output_padding=1, bias=False),
nn.BatchNorm2d(out_dim),
nn.ReLU())
# Must sub-class ConvGenerator to provide transform()
class ConvGenerator(nn.Module):
def __init__(self, latent_size=128):
super().__init__()
dim = 64
self.l1 = nn.Sequential(
nn.Linear(latent_size, dim * 8 * 4 * 4, bias=False),
nn.BatchNorm1d(dim * 8 * 4 * 4),
nn.ReLU())
self.l2_5 = nn.Sequential(
dconv_bn_relu(dim * 8, dim * 4),
dconv_bn_relu(dim * 4, dim * 2),
dconv_bn_relu(dim * 2, dim),
nn.ConvTranspose2d(dim, self.out_channels, 5, 2,
padding=2, output_padding=1))
def forward(self, input):
net = self.l1(input)
net = net.view(net.shape[0], -1, 4, 4)
net = self.l2_5(net)
return self.transform(net)
class ConvDataGenerator(ConvGenerator):
def __init__(self, latent_size=128):
self.out_channels = 3
super().__init__(latent_size=latent_size)
self.transform = lambda x: torch.sigmoid(x)
class ConvMaskGenerator(ConvGenerator):
def __init__(self, latent_size=128, temperature=.66,
hard_sigmoid=(-.1, 1.1)):
self.out_channels = 1
super().__init__(latent_size=latent_size)
add_mask_transformer(self, temperature, hard_sigmoid)
```
#### File: misgan/src/masked_celeba.py
```python
import torch
from torchvision import datasets, transforms
import numpy as np
from PIL import Image
class MaskedCelebA(datasets.ImageFolder):
def __init__(self, data_dir='celeba-data', image_size=64, random_seed=0):
transform = transforms.Compose([
transforms.CenterCrop(108),
transforms.Resize(size=image_size, interpolation=Image.BICUBIC),
transforms.ToTensor(),
# transforms.Normalize(mean=(.5, .5, .5), std=(.5, .5, .5)),
])
super().__init__(data_dir, transform)
self.rnd = np.random.RandomState(random_seed)
self.image_size = image_size
self.generate_masks()
def __getitem__(self, index):
image, label = super().__getitem__(index)
return image, self.mask[index], label, index
def __len__(self):
return super().__len__()
class BlockMaskedCelebA(MaskedCelebA):
def __init__(self, block_len=None, *args, **kwargs):
self.block_len = block_len
super().__init__(*args, **kwargs)
def generate_masks(self):
d0_len = d1_len = self.image_size
d0_min_len = 12
d0_max_len = d0_len - d0_min_len
d1_min_len = 12
d1_max_len = d1_len - d1_min_len
n_masks = len(self)
self.mask = [None] * n_masks
self.mask_info = [None] * n_masks
for i in range(n_masks):
if self.block_len is None:
d0_mask_len = self.rnd.randint(d0_min_len, d0_max_len)
d1_mask_len = self.rnd.randint(d1_min_len, d1_max_len)
else:
d0_mask_len = d1_mask_len = self.block_len
d0_start = self.rnd.randint(0, d0_len - d0_mask_len + 1)
d1_start = self.rnd.randint(0, d1_len - d1_mask_len + 1)
mask = torch.zeros((d0_len, d1_len), dtype=torch.uint8)
mask[d0_start:(d0_start + d0_mask_len),
d1_start:(d1_start + d1_mask_len)] = 1
self.mask[i] = mask
self.mask_info[i] = d0_start, d1_start, d0_mask_len, d1_mask_len
class IndepMaskedCelebA(MaskedCelebA):
def __init__(self, obs_prob=.2, obs_prob_high=None, *args, **kwargs):
self.prob = obs_prob
self.prob_high = obs_prob_high
super().__init__(*args, **kwargs)
def generate_masks(self):
imsize = self.image_size
prob = self.prob
prob_high = self.prob_high
n_masks = len(self)
self.mask = [None] * n_masks
for i in range(n_masks):
if prob_high is None:
p = prob
else:
p = self.rnd.uniform(prob, prob_high)
self.mask[i] = torch.ByteTensor(imsize, imsize).bernoulli_(p)
```
#### File: misgan/src/misgan.py
```python
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import time
import pylab as plt
import seaborn as sns
from collections import defaultdict
from plot import plot_samples
from utils import CriticUpdater, mkdir, mask_data
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
def misgan(args, data_gen, mask_gen, data_critic, mask_critic, data,
output_dir, checkpoint=None):
n_critic = args.n_critic
gp_lambda = args.gp_lambda
batch_size = args.batch_size
nz = args.n_latent
epochs = args.epoch
plot_interval = args.plot_interval
save_interval = args.save_interval
alpha = args.alpha
tau = args.tau
gen_data_dir = mkdir(output_dir / 'img')
gen_mask_dir = mkdir(output_dir / 'mask')
log_dir = mkdir(output_dir / 'log')
model_dir = mkdir(output_dir / 'model')
data_loader = DataLoader(data, batch_size=batch_size, shuffle=True,
drop_last=True)
n_batch = len(data_loader)
data_noise = torch.FloatTensor(batch_size, nz).to(device)
mask_noise = torch.FloatTensor(batch_size, nz).to(device)
# Interpolation coefficient
eps = torch.FloatTensor(batch_size, 1, 1, 1).to(device)
# For computing gradient penalty
ones = torch.ones(batch_size).to(device)
lrate = 1e-4
# lrate = 1e-5
data_gen_optimizer = optim.Adam(
data_gen.parameters(), lr=lrate, betas=(.5, .9))
mask_gen_optimizer = optim.Adam(
mask_gen.parameters(), lr=lrate, betas=(.5, .9))
data_critic_optimizer = optim.Adam(
data_critic.parameters(), lr=lrate, betas=(.5, .9))
mask_critic_optimizer = optim.Adam(
mask_critic.parameters(), lr=lrate, betas=(.5, .9))
update_data_critic = CriticUpdater(
data_critic, data_critic_optimizer, eps, ones, gp_lambda)
update_mask_critic = CriticUpdater(
mask_critic, mask_critic_optimizer, eps, ones, gp_lambda)
start_epoch = 0
critic_updates = 0
log = defaultdict(list)
if checkpoint:
data_gen.load_state_dict(checkpoint['data_gen'])
mask_gen.load_state_dict(checkpoint['mask_gen'])
data_critic.load_state_dict(checkpoint['data_critic'])
mask_critic.load_state_dict(checkpoint['mask_critic'])
data_gen_optimizer.load_state_dict(checkpoint['data_gen_opt'])
mask_gen_optimizer.load_state_dict(checkpoint['mask_gen_opt'])
data_critic_optimizer.load_state_dict(checkpoint['data_critic_opt'])
mask_critic_optimizer.load_state_dict(checkpoint['mask_critic_opt'])
start_epoch = checkpoint['epoch']
critic_updates = checkpoint['critic_updates']
log = checkpoint['log']
with (log_dir / 'gpu.txt').open('a') as f:
print(torch.cuda.device_count(), start_epoch, file=f)
def save_model(path, epoch, critic_updates=0):
torch.save({
'data_gen': data_gen.state_dict(),
'mask_gen': mask_gen.state_dict(),
'data_critic': data_critic.state_dict(),
'mask_critic': mask_critic.state_dict(),
'data_gen_opt': data_gen_optimizer.state_dict(),
'mask_gen_opt': mask_gen_optimizer.state_dict(),
'data_critic_opt': data_critic_optimizer.state_dict(),
'mask_critic_opt': mask_critic_optimizer.state_dict(),
'epoch': epoch + 1,
'critic_updates': critic_updates,
'log': log,
'args': args,
}, str(path))
sns.set()
start = time.time()
epoch_start = start
for epoch in range(start_epoch, epochs):
sum_data_loss, sum_mask_loss = 0, 0
for real_data, real_mask, _, _ in data_loader:
# Assume real_data and mask have the same number of channels.
# Could be modified to handle multi-channel images and
# single-channel masks.
real_mask = real_mask.float()[:, None]
real_data = real_data.to(device)
real_mask = real_mask.to(device)
masked_real_data = mask_data(real_data, real_mask, tau)
# Update discriminators' parameters
data_noise.normal_()
mask_noise.normal_()
fake_data = data_gen(data_noise)
fake_mask = mask_gen(mask_noise)
masked_fake_data = mask_data(fake_data, fake_mask, tau)
update_data_critic(masked_real_data, masked_fake_data)
update_mask_critic(real_mask, fake_mask)
sum_data_loss += update_data_critic.loss_value
sum_mask_loss += update_mask_critic.loss_value
critic_updates += 1
if critic_updates == n_critic:
critic_updates = 0
# Update generators' parameters
for p in data_critic.parameters():
p.requires_grad_(False)
for p in mask_critic.parameters():
p.requires_grad_(False)
data_gen.zero_grad()
mask_gen.zero_grad()
data_noise.normal_()
mask_noise.normal_()
fake_data = data_gen(data_noise)
fake_mask = mask_gen(mask_noise)
masked_fake_data = mask_data(fake_data, fake_mask, tau)
data_loss = -data_critic(masked_fake_data).mean()
data_loss.backward(retain_graph=True)
data_gen_optimizer.step()
mask_loss = -mask_critic(fake_mask).mean()
(mask_loss + data_loss * alpha).backward()
mask_gen_optimizer.step()
for p in data_critic.parameters():
p.requires_grad_(True)
for p in mask_critic.parameters():
p.requires_grad_(True)
mean_data_loss = sum_data_loss / n_batch
mean_mask_loss = sum_mask_loss / n_batch
log['data loss', 'data_loss'].append(mean_data_loss)
log['mask loss', 'mask_loss'].append(mean_mask_loss)
for (name, shortname), trace in log.items():
fig, ax = plt.subplots(figsize=(6, 4))
ax.plot(trace)
ax.set_ylabel(name)
ax.set_xlabel('epoch')
fig.savefig(str(log_dir / f'{shortname}.png'), dpi=300)
plt.close(fig)
if plot_interval > 0 and (epoch + 1) % plot_interval == 0:
print(f'[{epoch:4}] {mean_data_loss:12.4f} {mean_mask_loss:12.4f}')
filename = f'{epoch:04d}.png'
data_gen.eval()
mask_gen.eval()
with torch.no_grad():
data_noise.normal_()
mask_noise.normal_()
data_samples = data_gen(data_noise)
plot_samples(data_samples, str(gen_data_dir / filename))
mask_samples = mask_gen(mask_noise)
plot_samples(mask_samples, str(gen_mask_dir / filename))
data_gen.train()
mask_gen.train()
if save_interval > 0 and (epoch + 1) % save_interval == 0:
save_model(model_dir / f'{epoch:04d}.pth', epoch, critic_updates)
epoch_end = time.time()
time_elapsed = epoch_end - start
epoch_time = epoch_end - epoch_start
epoch_start = epoch_end
with (log_dir / 'time.txt').open('a') as f:
print(epoch, epoch_time, time_elapsed, file=f)
save_model(log_dir / 'checkpoint.pth', epoch, critic_updates)
print(output_dir)
```
#### File: misgan/src/mnist_imputer.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
# Must sub-class Imputer to provide fc1
class Imputer(nn.Module):
def __init__(self, arch=(784, 784)):
super().__init__()
# self.fc1 = nn.Linear(784, arch[0])
self.fc2 = nn.Linear(arch[0], arch[1])
self.fc3 = nn.Linear(arch[1], arch[0])
self.fc4 = nn.Linear(arch[0], 784)
self.transform = lambda x: torch.sigmoid(x).view(-1, 1, 28, 28)
def forward(self, input, data, mask):
net = input.view(input.size(0), -1)
net = F.relu(self.fc1(net))
net = F.relu(self.fc2(net))
net = F.relu(self.fc3(net))
net = self.fc4(net)
net = self.transform(net)
# return data * mask + net * (1 - mask)
# NOT replacing observed part with input data for computing
# autoencoding loss.
return net
class ComplementImputer(Imputer):
def __init__(self, arch=(784, 784)):
super().__init__(arch=arch)
self.fc1 = nn.Linear(784, arch[0])
def forward(self, input, mask, noise):
net = input * mask + noise * (1 - mask)
return super().forward(net, input, mask)
class MaskImputer(Imputer):
def __init__(self, arch=(784, 784)):
super().__init__(arch=arch)
self.fc1 = nn.Linear(784 * 2, arch[0])
def forward(self, input, mask, noise):
batch_size = input.size(0)
net = torch.cat(
[(input * mask + noise * (1 - mask)).view(batch_size, -1),
mask.view(batch_size, -1)], 1)
return super().forward(net, input, mask)
class FixedNoiseDimImputer(Imputer):
def __init__(self, arch=(784, 784)):
super().__init__(arch=arch)
self.fc1 = nn.Linear(784 * 3, arch[0])
def forward(self, input, mask, noise):
batch_size = input.size(0)
net = torch.cat([(input * mask).view(batch_size, -1),
mask.view(batch_size, -1),
noise.view(batch_size, -1)], 1)
return super().forward(net, input, mask)
```
#### File: misgan/src/unet.py
```python
import torch
import torch.nn as nn
# Code adapted from https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
#
# Defines the submodule with skip connection.
# X -------------------identity---------------------- X
# |-- downsampling -- |submodule| -- upsampling --|
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False,
norm_layer=nn.BatchNorm2d):
super().__init__()
self.outermost = outermost
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
if norm_layer is not None:
downnorm = norm_layer(inner_nc)
upnorm = norm_layer(outer_nc)
uprelu = nn.ReLU(True)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv]
if norm_layer is not None:
up.append(upnorm)
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv]
if norm_layer is not None:
down.append(downnorm)
up.append(upnorm)
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
``` |
{
"source": "jmnote/kubeyaml",
"score": 2
} |
#### File: kubeyaml/src/lib.py
```python
import yaml
def read_yaml(filepath):
f = open(filepath, "r")
y = yaml.safe_load(f)
f.close()
return y
``` |
{
"source": "jmnybl/finnish-srl",
"score": 3
} |
#### File: jmnybl/finnish-srl/conllu2sdp.py
```python
import sys
ID,FORM,LEMMA,UPOS,POS,FEAT,HEAD,DEPREL,DEPS,MISC=range(10)
SID,SFORM,SLEMMA,SPLEMMA,SPOS,SPPOS,SFEAT,SPFEAT,SHEAD,SPHEAD,SDEPREL,SPDEPREL,PREDICATE,SENSE,ARG=range(15)
from collections import defaultdict
def read_conllu(f):
comments=[]
tokens=[]
predicates={} # key: predicate_id, value: sense
arguments=defaultdict(list) # key: argument_id (token[ID]), value: (predicate_id,role)
for line in f:
line=line.strip()
if not line: # new sentence
if tokens:
yield comments,tokens,predicates,arguments
comments,tokens,predicates,arguments=[],[],{},defaultdict(list)
elif line.startswith("#"):
comments.append(line)
else: #normal line
cols=line.split("\t")
tokens.append(cols)
# senses
if "PBSENSE" in cols[MISC]:
for m in cols[MISC].split("|"):
if m.startswith("PBSENSE"):
sense=m.split("=",1)[-1]
predicates[cols[ID]]=sense # ID (str) based indexing, not list based
# arguments
if "PBArg" in cols[DEPS]:
for a in cols[DEPS].split("|"):
if "PBArg" in a:
idx,arg=a.split(":",1) # idx is predicate token[ID]
arguments[cols[ID]].append((idx,arg))
else:
if tokens:
yield comments,tokens,predicates,arguments
def read_sdp(f):
comments=[]
tokens=[]
predicates={} # key: predicate_id, value: sense
arguments=defaultdict(list) # key: argument_id (token[ID]), value: (predicate_id,role)
tmp_preds=[]
for line in f:
line=line.strip()
if not line: # empty line
continue
if line.startswith("1\t") and len(tokens)>0: # new sentence, yield current
# turn predicated counters into real token IDs
new_arguments={}
for key in arguments:
new_arguments[key]=[]
for pred_c,role in arguments[key]:
new_arguments[key].append((tmp_preds[pred_c],role))
#
yield comments,tokens,predicates,new_arguments
comments,tokens,predicates,arguments,tmp_preds=[],[],{},defaultdict(list),[]
cols=line.split("\t")
tokens.append(cols)
# senses
if "Y" in cols[PREDICATE]: # this is predicate
tmp_preds.append(cols[ID])
sense=cols[SENSE]
predicates[cols[ID]]=sense # ID (str) based indexing, not list based
# arguments
for i in range(ARG,len(cols)):
if cols[i]!="_": # this is argument
pred_counter=i-ARG # 0-based counter of predicates
arguments[cols[ID]].append((pred_counter,cols[i])) # key is the ID of the argument word (str), value is the counter of the predicate (int) and the role
else:
if tokens:
# turn predicated counters into real token IDs
new_arguments={}
for key in arguments:
new_arguments[key]=[]
for pred_c,role in arguments[key]:
new_arguments[key].append((tmp_preds[pred_c],role))
#
yield comments,tokens,predicates,new_arguments
def print_sdp_line(i,cols,predicates,predicate_counter,arguments):
new_cols=["_"]*(14+len(predicates))
for c,u in zip([ID,FORM,LEMMA,UPOS,FEAT,HEAD,DEPREL], [SID,SFORM,SLEMMA,SPOS,SFEAT,SHEAD,SDEPREL]):
new_cols[u]=cols[c]
if cols[ID] in predicates:
new_cols[PREDICATE]="Y"
new_cols[SENSE]=predicates[cols[ID]]
if cols[ID] in arguments:
for pidx,role in arguments[cols[ID]]:
p_count=predicate_counter[pidx]
new_cols[14+p_count]=role
print("\t".join(new_cols))
def print_sdp(fname):
f=open(fname, "rt", encoding="utf-8")
for comments,tokens,predicates,arguments in read_conllu(f):
predicate_counter={}
for key, val in sorted(predicates.items(),key=lambda x: int(x[0])):
predicate_counter[key]=len(predicate_counter)
for i,token in enumerate(tokens):
print_sdp_line(i,token,predicates,predicate_counter,arguments)
print("")
def print_conllu_line(i,cols,predicates,arguments):
# TODO: do not eat deps
new_cols=["_"]*10
for c,u in zip([ID,FORM,LEMMA,UPOS,FEAT,HEAD,DEPREL], [SID,SFORM,SLEMMA,SPOS,SFEAT,SHEAD,SDEPREL]):
new_cols[c]=cols[u]
if cols[SID] in predicates:
new_cols[MISC]="PBSENSE="+predicates[cols[SID]] # sense
if cols[SID] in arguments:
args=[h+":PBArg_"+r for h,r in arguments[cols[SID]]]
new_cols[DEPS]="|".join(sorted(args))
print("\t".join(new_cols))
def print_conllu(fname):
f=open(fname, "rt", encoding="utf-8")
for comments,tokens,predicates,arguments in read_sdp(f):
for i,token in enumerate(tokens):
print_conllu_line(i,token,predicates,arguments)
print("")
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser(description='')
g=parser.add_argument_group("Reguired arguments")
g.add_argument('-f', '--file', type=str, required=True, help='File name')
g.add_argument('-r', '--reversed', action='store_true', default=False, help='Reverse the process --> read sdp and print conllu')
args = parser.parse_args()
if args.reversed:
print("Reading sdp and printing conllu",file=sys.stderr)
print_conllu(args.file)
else:
print("Reading conllu and printing sdp",file=sys.stderr)
print_sdp(args.file)
```
#### File: jmnybl/finnish-srl/model.py
```python
from keras.models import Model, model_from_json
from keras.layers import Dense, Input, Reshape, Flatten, concatenate, Bidirectional, TimeDistributed, RepeatVector
from keras.layers.recurrent import LSTM
from keras.layers.embeddings import Embedding
from keras import optimizers
import keras.backend as K
from keras.layers.core import Lambda
from keras.engine.topology import Layer
from keras.callbacks import Callback
from collections import Counter
from math import ceil
import sys
import gzip
import numpy as np
import random
import lwvlib
import json
# parameters
word_embedding_size=200
recurrent_inner=512
stack=1
def read_pretrained_vectors(vocabulary,vector_model,max_words=1000000):
print("Loading pretrained word embeddings from "+vector_model)
model=lwvlib.load(vector_model,max_words,max_words)
vectors=np.zeros((len(vocabulary),model.vectors.shape[1]),np.float)
for word,idx in vocabulary.items():
if idx==0 or idx==1 or idx==2:
continue # mask and unk and end
midx=model.get(word)
if midx is not None:
vectors[idx]=model.vectors[midx] # TODO: normalize?
return vectors
# Implements a layer which picks the predicate word from the lstm sequence
# Input shape: (minibatch,1), (minibatch, max_sequence, lstm_width)
# Output shape: (minibatch, 1, lstm_width)
#def vector_from_sequence(inputs):
# p,lstm=inputs[0],inputs[1]
# v=lstm[:,1,:]
# print(K.shape(v)[1])
# return Flatten()(v)
#is=Lambda(vector_from_sequence,vector_from_sequence_dim)([i,lstms[-1]])
#concat_lstm_sense=concatenate([lstms[-1],is],axis=-1)
#def vector_from_sequence_dim(lstm_input_shape):
# return (lstm_input_shape[1][0],30,lstm_input_shape[1][2])
def build_model(vs,max_sent_len_words):
print("Building model",file=sys.stderr)
# inputs
input_words=Input(shape=(max_sent_len_words,),name="input_words")
input_predicate=Input(shape=(max_sent_len_words,),name="predicate_vector") # vector where predicate word is 1. other words are zero
input_predicate_sense=Input(shape=(max_sent_len_words,),name="predicate_sense") # vector where predicate word is 1. other words are zero
# embeddings
pretrained=read_pretrained_vectors(vs.words,"/home/jmnybl/word_embeddings/pb34_wf_200_v2.bin") #
word_embeddings=Embedding(vs.vocab_size, word_embedding_size, name="word_embeddings", mask_zero=False, weights=[pretrained])(input_words)
binary=Embedding(3, 1, name="binary_embeddings", mask_zero=False, weights=[np.array([0.0,0.0,1.0]).reshape((3,1))], trainable=False)(input_predicate)
sense_embeddings=Embedding(len(vs.predicate_senses), word_embedding_size, name="sense_embeddings", mask_zero=False)(input_predicate_sense)
# concatenate word embeddings and predicate vector
concat_embedding=concatenate([word_embeddings,binary],axis=-1)
lstms=[concat_embedding]
# recurrent
for _ in range(stack):
if len(lstms)>1:
lstm_in=concatenate(lstms,axis=-1)
else:
lstm_in=lstms[0]
lstm_out=Bidirectional(LSTM(recurrent_inner,name="bilstm",return_sequences=True, activation="relu"), merge_mode="ave")(lstm_in)
lstms.append(lstm_out)
concat_lstm_sense=concatenate([lstms[-1],sense_embeddings],axis=-1)
# softmax
predictions=TimeDistributed(Dense(len(vs.argument_roles),activation="softmax",name="prediction_layer"))(concat_lstm_sense)
model=Model(inputs=[input_words,input_predicate,input_predicate_sense], outputs=[predictions])
# adam=optimizers.Adam(beta_2=0.9)
model.compile(optimizer="adam",loss='categorical_crossentropy',metrics=['accuracy'])
print(model.summary())
from keras.utils import plot_model
plot_model(model,to_file="model.png",show_shapes=True)
return model
def load_model(model_name):
with open(model_name+".json", "rt", encoding="utf-8") as f:
model=model_from_json(f.read())
model.load_weights(model_name+".h5")
return model
```
#### File: jmnybl/finnish-srl/predict.py
```python
from data_reader import Vocabulary, transform_data, load_vocabularies, FORM, Corpus
from model import build_model, load_model
import numpy as np
import sys
def predict(args):
# ARGUMENTS
test_file=args.data
minibatch=64
max_sent_len_words=30
print("fsdfdfsdfs",file=sys.stderr)
## VOCABULARY
vs=load_vocabularies(args.model_name+"-vocab.pickle")
corpus=Corpus(test_file,test_time=True)
sentences,x,y=transform_data(corpus,vs,max_sent_len_words,test_time=True)
print(len(sentences),flush=True,file=sys.stderr)
model=load_model(args.model_name)
predictions=model.predict(x,batch_size=64,verbose=1)
from collections import defaultdict
# reconstruct file
last_sentence=None
for (sent_id,predicate_id),pred in zip(sentences,predictions):
if last_sentence!=None and sent_id!=last_sentence.id: # new sentence
corpus.sentences[last_sentence.id]=last_sentence
last_sentence=corpus.sentences[sent_id]
last_sentence.arguments=defaultdict(list) # clear arguments to be sure there is nothing
elif last_sentence==None:
last_sentence=corpus.sentences[sent_id]
# add predicted arguments for this predicate
# predicate_id is the token[ID] of the current predicate
labels=[vs.idx2label[np.argmax(t)] for t in pred]
for i,r in enumerate(labels): # i is now token counter, r is predicted role
if r!="<MASK>" and r!="None":
last_sentence.arguments[str(i+1)].append((predicate_id,r))
corpus.save(args.output)
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser(description='')
g=parser.add_argument_group("Reguired arguments")
g.add_argument('-d', '--data', type=str, required=True, help='Training file')
g.add_argument('-m', '--model_name', type=str, required=True, help='Name of the saved model')
g.add_argument('-o', '--output', type=str, required=True, help='Output file name')
args = parser.parse_args()
predict(args)
```
#### File: jmnybl/finnish-srl/train.py
```python
from data_reader import Vocabulary, transform_data, save_vocabularies, Corpus
from model import build_model
from keras.callbacks import ModelCheckpoint
def train(args):
# ARGUMENTS
training_file=args.data
minibatch=64
max_sent_len_words=30
epochs=args.epochs
corpus=Corpus(training_file,test_time=False)
## VOCABULARY
vs=Vocabulary()
vs.build(corpus,min_count_word=args.min_count_word,min_count_sense=args.min_count_sense)
sentences,x,y=transform_data(corpus,vs,max_sent_len_words)
model=build_model(vs,max_sent_len_words)
# save model json
model_json = model.to_json()
with open(args.model_name+".json", "w") as json_file:
json_file.write(model_json)
save_vocabularies(vs,args.model_name+"-vocab.pickle")
# +".{epoch:02d}.h5"
save_cb=ModelCheckpoint(filepath=args.model_name+".h5", monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
model.fit(x,y,batch_size=minibatch,epochs=epochs,verbose=1,validation_split=0.1,callbacks=[save_cb])
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser(description='')
g=parser.add_argument_group("Reguired arguments")
g.add_argument('-d', '--data', type=str, required=True, help='Training file')
g.add_argument('-m', '--model_name', type=str, required=True, help='Name of the saved model')
g.add_argument('--min_count_word', type=int, default=2, help='Frequency threshold, how many times a word must occur to be included in the vocabulary? (default %(default)d)')
g.add_argument('--min_count_sense', type=int, default=2, help='Frequency threshold, how many times a verb sense must occur to be included in the vocabulary? (default %(default)d)')
g.add_argument('--epochs', type=int, default=10, help='Number of training epochs')
args = parser.parse_args()
train(args)
``` |
{
"source": "jmnybl/inflect-words",
"score": 2
} |
#### File: jmnybl/inflect-words/predict.py
```python
from __future__ import division
from builtins import bytes
import os
import argparse
import math
import codecs
import torch
import sys
sys.path.insert(0,os.getcwd()+"/OpenNMT-py")
import onmt
import onmt.IO
import opts
from itertools import takewhile, count
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
parser = argparse.ArgumentParser(description='translate.py')
opts.add_md_help_argument(parser)
opts.translate_opts(parser)
opt = parser.parse_args()
if opt.batch_size != 1:
print("WARNING: -batch_size isn't supported currently, "
"we set it to 1 for now!")
opt.batch_size = 1
def report_score(name, score_total, words_total):
print("%s AVG SCORE: %.4f, %s PPL: %.4f" % (
name, score_total / words_total,
name, math.exp(-score_total/words_total)))
def get_src_words(src_indices, index2str):
words = []
raw_words = (index2str[i] for i in src_indices)
words = takewhile(lambda w: w != onmt.IO.PAD_WORD, raw_words)
return " ".join(words)
def read_word_statistics(fname, min_freq=25):
words={}
for line in open(fname, "rt", encoding="utf-8"):
line=line.strip()
count,word=line.split()
if int(count)<min_freq:
return words
words[word]=int(count)
return words
def score_beam(predictions, scores, word_statistics):
rescored=[]
for p,sc in zip(predictions, scores):
count=word_statistics.get(p,0)
rescored.append((p,sc,count))
# if [p for p,s in rescored+tmp][0] != predictions[0]:
# print(src_words,"---",predictions[0],"---",[p for p,s in rescored+tmp][0])
# print(predictions)
# print([p for p,s in rescored+tmp])
# print()
return [p for p,s,c in sorted(rescored,key=lambda x:x[2], reverse=True)], [s for p,s,c in sorted(rescored,key=lambda x:x[2], reverse=True)], [c for p,s,c in sorted(rescored,key=lambda x:x[2], reverse=True)]
def main():
dummy_parser = argparse.ArgumentParser(description='train.py')
opts.model_opts(dummy_parser)
dummy_opt = dummy_parser.parse_known_args([])[0]
opt.cuda = opt.gpu > -1
if opt.cuda:
torch.cuda.set_device(opt.gpu)
translator = onmt.Translator(opt, dummy_opt.__dict__)
out_file = codecs.open(opt.output, 'w', 'utf-8')
pred_score_total, pred_words_total = 0, 0
gold_score_total, gold_words_total = 0, 0
if opt.dump_beam != "":
import json
translator.initBeamAccum()
withAttention=len(opt.save_attention) > 0
if withAttention:
if opt.n_best>1:
print("-save_attention works only with -n_best 1 right now. Attention weights are not saved!")
withAttention=False
else:
print("saving attention weights to",opt.save_attention)
attn_file=open(opt.save_attention,"wt")
data = onmt.IO.ONMTDataset(opt.src, opt.tgt, translator.fields, None)
word_statistics=read_word_statistics("/home/jmnybl/finnish_vocab")
test_data = onmt.IO.OrderedIterator(
dataset=data, device=opt.gpu,
batch_size=opt.batch_size, train=False, sort=False,
shuffle=False)
counter = count(1)
for batch in test_data:
pred_batch, gold_batch, pred_scores, gold_scores, attn, src \
= translator.translate(batch, data)
pred_score_total += sum(score[0] for score in pred_scores)
pred_words_total += sum(len(x[0]) for x in pred_batch)
if opt.tgt:
gold_score_total += sum(gold_scores)
gold_words_total += sum(len(x) for x in batch.tgt[1:])
# z_batch: an iterator over the predictions, their scores,
# the gold sentence, its score, and the source sentence for each
# sentence in the batch. It has to be zip_longest instead of
# plain-old zip because the gold_batch has length 0 if the target
# is not included.
z_batch = zip_longest(
pred_batch, gold_batch,
pred_scores, gold_scores, attn,
(sent.squeeze(1) for sent in src.split(1, dim=1)))
for pred_sents, gold_sent, pred_score, gold_score, attention, src_sent in z_batch:
n_best_preds = ["".join(pred)+"\t"+str(score) for pred,score in zip(pred_sents[:opt.n_best],pred_score[:opt.n_best])]
#n_best_preds, scores, counts = score_beam(n_best_preds, pred_score[:opt.n_best], word_statistics)
out_file.write('\n'.join(n_best_preds))
out_file.write('\n')
out_file.flush()
sent_number = next(counter)
if withAttention:
best_pred = n_best_preds[0]
best_score = pred_score[0]
words = get_src_words(
src_sent, translator.fields["src"].vocab.itos)
print(" ||| ".join([str(sent_number),best_pred,str(best_score),words,str(len(words.split(" ")))+" "+str(len(best_pred.split(" ")))]),file=attn_file)
for y in attention:
for i,v in enumerate(y.cpu().numpy()):
print(" ".join(str(x) for x in v),file=attn_file)
if opt.verbose:
words = get_src_words(
src_sent, translator.fields["src"].vocab.itos)
os.write(1, bytes('\nSENT %d: %s\n' %
(sent_number, words), 'UTF-8'))
best_pred = n_best_preds[0]
best_score = pred_score[0]
os.write(1, bytes('PRED %d: %s\n' %
(sent_number, best_pred), 'UTF-8'))
print("PRED SCORE: %.4f" % best_score)
if opt.tgt:
tgt_sent = ' '.join(gold_sent)
os.write(1, bytes('GOLD %d: %s\n' %
(sent_number, tgt_sent), 'UTF-8'))
print("GOLD SCORE: %.4f" % gold_score)
if len(n_best_preds) > 1:
print('\nBEST HYP:')
for score, sent in zip(pred_score, n_best_preds):
os.write(1, bytes("[%.4f] %s\n" % (score, sent),
'UTF-8'))
report_score('PRED', pred_score_total, pred_words_total)
if opt.tgt:
report_score('GOLD', gold_score_total, gold_words_total)
if opt.dump_beam:
json.dump(translator.beam_accum,
codecs.open(opt.dump_beam, 'w', 'utf-8'))
if withAttention:
attn_file.close()
if __name__ == "__main__":
main()
``` |
{
"source": "jmnybl/keras-models",
"score": 3
} |
#### File: jmnybl/keras-models/generate.py
```python
from __future__ import print_function
from keras.models import Model, model_from_json
from keras.layers import Dense, Activation, Dropout, Input, Embedding
from keras.layers import CuDNNLSTM as LSTM
from keras.optimizers import RMSprop, Adam
from keras.utils.data_utils import get_file
from keras.callbacks import ModelCheckpoint
import numpy as np
import random
import sys
import re
### Only needed for me, not to block the whole GPU, you don't need this stuff
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.3
set_session(tf.Session(config=config))
### ---end of weird stuff
## helper functions
from nltk.tokenize import word_tokenize # turns text into list of words
def load_vocabulary(file_name):
import json
with open(file_name, "rt", encoding="utf-8") as f:
vocab=json.load(f)
return vocab
def load_model(model_file, weight_file):
with open(model_file, "rt", encoding="utf-8") as f:
model=model_from_json(f.read())
model.load_weights(weight_file)
return model
vocab_file="generation-vocab.json"
model_file="generation-model.json"
weight_file="generation-weights.h5"
vocab, _ = load_vocabulary(vocab_file)
model = load_model(model_file, weight_file)
print("Vocabulary size:", len(vocab))
inversed_vocab = {value: key for key, value in vocab.items()}
print("Inversed vocabulary size:", len(inversed_vocab))
print(vocab, inversed_vocab)
context_size=50
embedding_size=50
batch_size=150
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# generate
while True:
print()
print('-' * 50)
text = input("Seed for generation:").strip()
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = []
for c in text:
generated.append(c)
print('----- Generating with seed: "' + "".join(generated) + '"')
sys.stdout.write("".join(generated))
sentence=generated
# vectorize seed
generate_X=np.zeros((1,context_size))
for i,c in enumerate(sentence):
generate_X[0,i]=vocab.get(c,vocab["<UNKNOWN>"])
for i in range(200):
# predict
preds = model.predict(generate_X, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = inversed_vocab[next_index]
generated += [next_char]
sentence=generated[len(generated)-context_size:]
# vectorize new seed
generate_X=np.zeros((1,context_size))
for i,c in enumerate(sentence):
generate_X[0,i]=vocab.get(c,vocab["<UNKNOWN>"])
sys.stdout.write(next_char)
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.flush()
print()
```
#### File: jmnybl/keras-models/skipgram-ns.py
```python
from keras.models import Model
from keras.layers import Dense, Input, Reshape, Flatten, dot, RepeatVector, concatenate
from keras.layers.embeddings import Embedding
from keras import optimizers
import sys
import gzip
import numpy as np
import keras.backend as K
from keras.layers.core import Lambda
from collections import Counter
class Vocabulary(object):
def __init__(self,data=None):
self.words=None
def build(self,training_file,min_count=5,estimate=0):
# min_count: discard words that appear less than X times
# estimate: estimate vocabulary using X words, 0 for read all
word_counter=0
c=Counter()
tmp=[]
for line in gzip.open(training_file,"rt",encoding="utf-8"):
for word in line.strip().split(" "):
word_counter+=1
tmp.append(word)
if word_counter%1000000==0:
print(word_counter,"words",file=sys.stderr)
if len(tmp)>100000:
c.update(tmp)
tmp=[]
if estimate!=0 and word_counter>=estimate:
break
if len(tmp)>0:
c.update(tmp)
words={"<MASK>":0,"<UNK>":1}
for w,count in c.most_common():
if count<min_count:
break
words[w]=len(words)
self.words=words
self.vocab_size=len(self.words)
self.inverted_words={}
for key,idx in self.words.items():
self.inverted_words[idx]=key
print("Vocabulary created with {w} words.".format(w=self.vocab_size),file=sys.stderr)
self.total_word_count=word_counter
def word_idx(self,word):
return self.words.get(word,self.words["<UNK>"])
def make_sampling_table():
# TODO: make a proper sampling table with probabilities and all
pass
def sample_negatives(self,current_word,negatives):
negative_samples = np.random.randint(1,self.vocab_size,negatives)
while current_word in negative_samples:
negative_samples = np.random.randint(1,self.vocab_size,negatives)
return negative_samples
def infinite_iterator(fname, vs, window, negatives, batch_size, max_iterations=10):
focus_words=[]
target_words=[]
targets=[]
iterations=0
while True:
print("Iteration:",iterations,file=sys.stderr)
if iterations==max_iterations:
if len(examples)>0:
yield {"focus_word":np.array(focus_words),"target_words":np.array(target_words)}, np.array(targets)
break
for line in gzip.open(fname,"rt",encoding="utf-8"):
words=line.strip().split(" ")
for i in range(0,len(words)): # i is a focus word now
focus_word=vs.word_idx(words[i])
if focus_word==vs.word_idx("<UNK>"):
if np.random.random_sample() < 0.8: # 80% change to drop <UNK> training example
continue
for j in range(max(0,i-window),min(len(words),i+window+1)):
if i==j:
continue
target_word=vs.word_idx(words[j])
negative_sample=vs.sample_negatives(focus_word,negatives)
focus_words.append(focus_word)
target_words.append([target_word]+list(negative_sample))
targets.append([1.0]+[0.0]*negatives)
if len(focus_words)==batch_size:
yield {"focus_word":np.array(focus_words),"target_words":np.array(target_words)}, np.array(targets)
focus_words=[]
target_words=[]
targets=[]
iterations+=1
def train(args):
# SETTINGS
minibatch=400
embedding_size=args.embedding_size
window_size=args.window
negative_size=args.negatives
training_file=args.data
steps_per_epoch=10000
## VOCABULARY
vs=Vocabulary()
vs.build(training_file,min_count=args.min_count,estimate=args.estimate_vocabulary)
data_iterator=infinite_iterator(training_file,vs,window_size,negative_size,minibatch)
## MODEL
# input
focus_input=Input(shape=(1,), name="focus_word")
target_input=Input(shape=(negative_size+1,), name="target_words")
# Embeddings
focus_embeddings=Embedding(vs.vocab_size, embedding_size, name="word_embeddings")(focus_input)
repeated=RepeatVector(negative_size+1)(Flatten()(focus_embeddings))
context_embeddings=Embedding(vs.vocab_size,embedding_size, name="context_embeddings")(target_input)
def my_dot(l):
return K.sum(l[0]*l[1],axis=-1,keepdims=True)
def my_dot_dim(input_shape):
return (input_shape[0][0],input_shape[0][1],1)
dot_out=Lambda(my_dot,my_dot_dim)([repeated,context_embeddings])
sigmoid_layer=Dense(1, activation='sigmoid')
s_out=Flatten()(sigmoid_layer(dot_out))
model=Model(inputs=[focus_input,target_input], outputs=[s_out])
adam=optimizers.Adam(beta_2=0.9)
model.compile(optimizer=adam,loss='binary_crossentropy')
print(model.summary())
from keras.utils import plot_model
plot_model(model,to_file=args.model_name+".png",show_shapes=True)
model.fit_generator(data_iterator,steps_per_epoch=steps_per_epoch,epochs=args.epochs,verbose=1)
def save_embeddings(model, name, vs):
# save word embeddings and vocabulary
with open(name, 'wt') as f:
embedding_w=model.get_weights()[0]
print(vs.vocab_size,embedding_size,file=f)
for i in range(0,vs.vocab_size):
print(vs.inverted_words[i]," ".join(str(x) for x in embedding_w[i]),file=f)
save_embeddings(model,args.model_name,vs)
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser(description='')
g=parser.add_argument_group("Reguired arguments")
g.add_argument('-d', '--data', type=str, required=True, help='Training data file (gzipped)')
g.add_argument('-m', '--model_name', type=str, required=True, help='Name of the saved model (in .txt format)')
g.add_argument('--min_count', type=int, default=2, help='Frequency threshold, how many times an ngram must occur to be included? (default %(default)d)')
g.add_argument('--window', type=int, default=5, help='Window size, default is 5 words to the left and 5 words to the right')
g.add_argument('--embedding_size', type=int, default=200, help='Dimensionality of the trained vectors')
g.add_argument('--negatives', type=int, default=5, help='How many negatives to sample, default=5')
g.add_argument('--epochs', type=int, default=10, help='Number of training epochs')
g.add_argument('--estimate_vocabulary', type=int, default=0, help='Estimate vocabulary using x words, default=0 (all).')
args = parser.parse_args()
train(args)
``` |
{
"source": "jmnybl/text-generation",
"score": 3
} |
#### File: jmnybl/text-generation/inp.py
```python
import sys
def is_new_doc(comments):
for c in comments:
if c.startswith("# fname"):
return True
return False
def get_documents(fname):
if fname.endswith(".gz"):
import gzip
f=gzip.open(fname, "rt")
else:
f=open(fname)
current_doc=[]
current_metadata=None
for comm, sent in get_sentences(f):
if comm and is_new_doc(comm): # new document starts
if current_doc:
yield current_doc, current_metadata
current_doc, current_metadata = [], None
current_doc.append((comm,sent))
else:
if current_doc:
yield current_doc, current_metadata
f.close()
def get_sentences(f):
"""conllu reader"""
sent=[]
comment=[]
for line in f:
line=line.strip()
if not line: # new sentence
if sent:
yield comment,sent
comment=[]
sent=[]
elif line.startswith("#"):
comment.append(line)
else: #normal line
sent.append(line.split("\t"))
else:
if sent:
yield comment, sent
if __name__=="__main__":
# test
for i, (document, doc_meta) in enumerate(get_documents("/home/ginter/text-generation/all_stt.conllu.gz")):
print("Document:",i)
for comm,sent in document:
if comm:
print(comm)
print(sent)
print()
break
``` |
{
"source": "J-Mo63/contact-register",
"score": 3
} |
#### File: contact-register/contactregister/contactregister_test.py
```python
from contextlib import redirect_stdout
from models.Contact import Contact
from display import html
import contactregister
import unittest
import helpers
import json
import glob
import csv
import os
import io
class ContactRegisterTestCase(unittest.TestCase):
def tearDown(self):
files = glob.glob('../data/*')
for f in files:
os.remove(f)
contactregister.contacts = []
class AddContact(ContactRegisterTestCase):
def test_add_single(self):
contactregister.add_contact("<NAME>", "123 Hello Rd", "+614090000")
self.assertEqual(1, len(contactregister.contacts))
def test_add_multi(self):
contactregister.add_contact("<NAME>", "125 Welcome Plc", "+614090002")
contactregister.add_contact("<NAME>", "124 Goodbye St", "+614090001")
self.assertEqual(2, len(contactregister.contacts))
class ListContacts(ContactRegisterTestCase):
def test_list_empty(self):
self.assertEqual(0, len(contactregister.get_all_contacts()))
def test_list_single(self):
contactregister.contacts.append(Contact("<NAME>", "123 Hello Rd", "+614090000"))
self.assertEqual(1, len(contactregister.get_all_contacts()))
def test_list_multi(self):
contactregister.contacts.append(Contact("<NAME>", "125 Welcome Plc", "+614090002"))
contactregister.contacts.append(Contact("<NAME>", "124 Goodbye St", "+614090001"))
self.assertEqual(2, len(contactregister.get_all_contacts()))
class SearchContacts(ContactRegisterTestCase):
def test_search_empty(self):
self.assertEqual(0, len(contactregister.search_contacts("name=Jon*")))
def test_search_one(self):
jon_jon = Contact("<NAME>", "123 Hello Rd", "+614090000")
contactregister.contacts.append(jon_jon)
matches = contactregister.search_contacts("name=Jon*")
self.assertEqual(1, len(matches))
self.assertEqual([jon_jon], matches)
self.assertEqual(0, len(contactregister.search_contacts("name=Bon*")))
self.assertEqual(1, len(contactregister.search_contacts("address=12? Hello*")))
self.assertEqual(1, len(contactregister.search_contacts("name=Jon*, address=12? Hello*")))
self.assertEqual(1, len(contactregister.search_contacts("phone=+6*")))
self.assertEqual(0, len(contactregister.search_contacts("phone=+6*, address = 124*")))
def test_search_multi(self):
jon_jon = Contact("<NAME>", "123 Hello Rd", "+614090000")
ron_ron = Contact("<NAME>", "125 Welcome Plc", "+614090002")
jon_bon = Contact("<NAME>", "124 Goodbye St", "+614090001")
contactregister.contacts.extend([jon_jon, ron_ron, jon_bon])
matches = contactregister.search_contacts("address=12*")
self.assertEqual(3, len(matches))
self.assertEqual([jon_jon, ron_ron, jon_bon], matches)
self.assertEqual(3, len(contactregister.search_contacts("name=*on*")))
self.assertEqual(2, len(contactregister.search_contacts("name=Jon*")))
self.assertEqual(1, len(contactregister.search_contacts("name=Jon*, phone = *01")))
self.assertEqual(2, len(contactregister.search_contacts("address = *el*")))
class DisplayContacts(ContactRegisterTestCase):
@staticmethod
def get_text_display_output():
f = io.StringIO()
with redirect_stdout(f):
contactregister.display_contacts("text")
return f.getvalue()
@staticmethod
def get_html_format(elements=""):
return '<html>\n\t<head>\n\t\t<title>Data Value</title>\n\t</head>\n\t<body>\n' \
f'\t\t<h1>All Contacts</h1>\n\t\t<ul>\n{elements}\n\t\t</ul>\n\t</body>\n</html>'
@staticmethod
def get_html_file_output():
with open("../data/contacts.html", 'r', newline='') as file:
return file.read()
def test_display_text_empty(self):
output = DisplayContacts.get_text_display_output()
self.assertEqual("name address phone\n"
"------ --------- -------\n", output)
def test_display_text_single(self):
contactregister.contacts.append(Contact("<NAME>", "123 Hello Rd", "+614090000"))
output = DisplayContacts.get_text_display_output()
self.assertEqual("name address phone\n"
"------- ------------ ----------\n"
"<NAME> 123 Hello Rd +614090000\n", output)
def test_display_text_multi(self):
contactregister.contacts.append(Contact("<NAME>", "125 Welcome Plc", "+614090002"))
contactregister.contacts.append(Contact("<NAME>", "124 Goodbye St", "+614090001"))
output = DisplayContacts.get_text_display_output()
self.assertEqual("name address phone\n"
"------- --------------- ----------\n"
"<NAME> 125 Welcome Plc +614090002\n"
"<NAME> 124 Goodbye St +614090001\n", output)
def test_display_html_empty(self):
contactregister.display_contacts("html")
self.assertEqual(DisplayContacts.get_html_format(), DisplayContacts.get_html_file_output())
def test_display_html_single(self):
jon_jon = Contact("<NAME>", "123 Hello Rd", "+614090000")
contactregister.contacts.append(jon_jon)
contactregister.display_contacts("html")
self.assertEqual(DisplayContacts.get_html_format(html.contacts_to_list_items([jon_jon])),
DisplayContacts.get_html_file_output())
def test_display_html_multi(self):
jon_jon = Contact("<NAME>", "123 Hello Rd", "+614090000")
ron_ron = Contact("<NAME>", "125 Welcome Plc", "+614090002")
contactregister.contacts.extend([jon_jon, ron_ron])
contactregister.display_contacts("html")
self.assertEqual(DisplayContacts.get_html_format(html.contacts_to_list_items([jon_jon, ron_ron])),
DisplayContacts.get_html_file_output())
class ExportContacts(ContactRegisterTestCase):
@staticmethod
def get_json_file_output():
with open("../data/contacts.json", 'r', newline='') as file:
return json.load(file)
@staticmethod
def get_csv_file_output():
with open("../data/contacts.csv", 'r', newline='') as file:
return file.read()
def test_export_json_empty(self):
contactregister.export_contacts("json")
self.assertEqual([], ExportContacts.get_json_file_output())
def test_export_json_single(self):
jon_jon = Contact("<NAME>", "123 Hello Rd", "+614090000")
contactregister.contacts.append(jon_jon)
contactregister.export_contacts("json")
self.assertEqual([jon_jon.to_dict()], ExportContacts.get_json_file_output())
def test_export_json_multi(self):
jon_jon = Contact("<NAME>", "123 Hello Rd", "+614090000")
ron_ron = Contact("<NAME>", "125 Welcome Plc", "+614090002")
contactregister.contacts.extend([jon_jon, ron_ron])
contactregister.export_contacts("json")
self.assertEqual([jon_jon.to_dict(), ron_ron.to_dict()], ExportContacts.get_json_file_output())
def test_export_csv_empty(self):
contactregister.export_contacts("csv")
self.assertEqual('"name","address","phone"\r\n', ExportContacts.get_csv_file_output())
def test_export_csv_single(self):
jon_jon = Contact("<NAME>", "123 Hello Rd", "+614090000")
contactregister.contacts.append(jon_jon)
contactregister.export_contacts("csv")
self.assertEqual('"name","address","phone"\r\n'
'"<NAME>","123 Hello Rd","+614090000"\r\n', ExportContacts.get_csv_file_output())
def test_export_csv_multi(self):
jon_jon = Contact("<NAME>", "124 Hello Rd", "+614090000")
ron_ron = Contact("<NAME>", "125 Welcome Plc", "+614090002")
contactregister.contacts.extend([jon_jon, ron_ron])
contactregister.export_contacts("csv")
self.assertEqual('"name","address","phone"\r\n'
'"<NAME>","124 Hello Rd","+614090000"\r\n'
'"<NAME>","125 Welcome Plc","+614090002"\r\n', ExportContacts.get_csv_file_output())
class ImportContacts(ContactRegisterTestCase):
@staticmethod
def create_json_file_with(contacts):
helpers.try_create_dir("../data/")
with open("../data/contacts.json", 'w', newline='') as file:
json.dump([contact.to_dict() for contact in contacts], file, indent=4)
@staticmethod
def create_csv_file_with(contacts):
helpers.try_create_dir("../data/")
with open("../data/contacts.csv", 'w', newline='') as file:
writer = csv.writer(file, quoting=csv.QUOTE_ALL)
writer.writerow(Contact.supported_search_fields)
[writer.writerow(contact.to_list()) for contact in contacts]
def test_import_json_empty(self):
ImportContacts.create_json_file_with([])
contactregister.import_contacts("json")
self.assertEqual(0, len(contactregister.contacts))
def test_import_json_single(self):
jon_jon = Contact("<NAME>", "123 Hello Rd", "+614090000")
ImportContacts.create_json_file_with([jon_jon])
contactregister.import_contacts("json")
self.assertEqual(1, len(contactregister.contacts))
def test_import_json_multi(self):
jon_jon = Contact("<NAME>", "124 Hello Rd", "+614090000")
ron_ron = Contact("<NAME>", "125 Welcome Plc", "+614090002")
ImportContacts.create_json_file_with([jon_jon, ron_ron])
contactregister.import_contacts("json")
self.assertEqual(2, len(contactregister.contacts))
def test_import_csv_empty(self):
ImportContacts.create_csv_file_with([])
contactregister.import_contacts("csv")
self.assertEqual(0, len(contactregister.contacts))
def test_import_csv_single(self):
jon_jon = Contact("<NAME>", "123 Hello Rd", "+614090000")
ImportContacts.create_csv_file_with([jon_jon])
contactregister.import_contacts("csv")
self.assertEqual(1, len(contactregister.contacts))
def test_import_csv_multi(self):
jon_jon = Contact("<NAME>", "124 Hello Rd", "+614090000")
ron_ron = Contact("<NAME>", "125 Welcome Plc", "+614090002")
ImportContacts.create_csv_file_with([jon_jon, ron_ron])
contactregister.import_contacts("csv")
self.assertEqual(2, len(contactregister.contacts))
if __name__ == '__main__':
unittest.main()
```
#### File: contact-register/contactregister/helpers.py
```python
import errno
import os
def get_module_files(file) -> [str]:
"""
A helper function to return the names of all non-system
.py files (not __init__, __main__, etc.) in a given module
...
Parameters
----------
file : str
the __file__ attribute of the specified module
...
Returns
-------
[str]
the list of module file names
"""
# Get the directory of the file
dir_path = os.path.dirname(os.path.realpath(file))
# Get all files in the directory and filter out Python standard module files
module_files = list(filter(lambda x: not x.startswith("__"), os.listdir(dir_path)))
# Return filenames stripped of extension
return [filename.strip('.py') for filename in module_files]
def display_command_options(options, title="Options:") -> None:
"""
A helper function to enumerate a list of options to the user
...
Parameters
----------
options : str
a list of options to visually enumerate for the user
title : str
a title for the command option display (default is "Options:")
"""
# Display the title, followed by a list of enumerated options
print(title)
[print(f'{i}: {options[i]}') for i in range(0, len(options))]
def get_option_selection(options, prompt="Option: ") -> str:
"""
A helper function to get integer input from the user based
on a provided list of enumerated options
...
Parameters
----------
options : str
a list of options that the user may select
prompt : str
a prompt to display (default is "Option: ")
...
Returns
-------
str
the option name as a string
"""
# Loop until receiving valid input
selected_option = None
while selected_option is None:
try:
# Get user input as an integer
selected_option = int(input(prompt).strip())
# Handle out of bounds case
if not (-1 < selected_option < len(options)):
selected_option = None
print("Input value is out of range, try again")
# Handle non-integer case
except ValueError:
print("Input value not a valid integer, try again")
# Return the selected option by name
return options[selected_option]
class QueryFilter:
"""A container class for query data"""
def __init__(self, field, pattern):
"""
Initialises the class with relevant parameters
...
Parameters
----------
field : str
the field on which to perform the query
pattern : str
the pattern to perform the query with
Returns
-------
QueryFilter
a new QueryFilter object
"""
self.field = field.strip()
self.pattern = pattern.strip()
def parse_query_filters(query) -> [QueryFilter]:
"""
A helper function to parse a query string as a list of
query filter objects
...
Parameters
----------
query : str
a comma-separated query string in the format of field=name
...
Returns
-------
[QueryFilter]
a list of sanitised QueryFilter objects
"""
filters = []
# Split the queries by comma into a list of lists split an equals sign
for query_filter in [query_field.split("=", 1) for query_field in query.split(",")]:
# Create a new QueryFilter object from the field and pattern on each query
filters.append(QueryFilter(query_filter[0], query_filter[1]))
return filters
def try_create_dir(directory_path) -> None:
"""
A helper function to try create a directory
...
Parameters
----------
directory_path : str
the string directory path to try create
"""
try:
# Create the specified directory
os.makedirs(directory_path)
except OSError as e:
# Handle file existing case
if e.errno != errno.EEXIST:
raise
class UnknownQueryField(Exception):
"""Raised when searched query field is unknown"""
def __init__(self, field_name):
"""
Initialises the class with relevant parameters
...
Parameters
----------
field_name : str
the name of offending field
Returns
-------
UnknownQueryField
a new UnknownQueryField object
"""
self.field = field_name
class MalformedQuery(Exception):
"""Raised when parsed query is malformed"""
def __init__(self, query):
"""
Initialises the class with relevant parameters
...
Parameters
----------
query : str
the value of the malformed query
Returns
-------
MalformedQuery
a new MalformedQuery object
"""
self.query = query
class NonexistentFile(Exception):
"""Raised when attempting to import a nonexistent file"""
def __init__(self, filepath):
"""
Initialises the class with relevant parameters
...
Parameters
----------
filepath : str
the missing filepath
Returns
-------
NonexistentFile
a new NonexistentFile object
"""
self.filepath = filepath
``` |
{
"source": "J-Mo63/usdmanager",
"score": 2
} |
#### File: usdmanager/plugins/__init__.py
```python
from Qt.QtCore import QObject
class Plugin(QObject):
""" Classes in modules in the plugins directory that inherit from Plugin will be automatically initialized when the
main window loads.
"""
def __init__(self, parent, **kwargs):
""" Initialize the plugin.
:Parameters:
parent : `UsdMngrWindow`
Main window
"""
super(Plugin, self).__init__(parent, **kwargs)
``` |
{
"source": "jmoch1214/gcpy",
"score": 3
} |
#### File: gcpy/tests/test_units.py
```python
import pytest
import numpy as np
from gcpy.units import *
def test_adjust_units():
for v in ['kg/m2/s', 'kgm-2s-1', 'kgm^-2s^-1']:
assert adjust_units(v) == 'kg/m2/s'
for v in ['kgC/m2/s', 'kgCm-2s-1', 'kgCm^-2s^-1',
'kgc/m2/s', 'kgcm-2s-1', 'kgcm^-2s^-1']:
assert adjust_units(v) == 'kgC/m2/s'
for v in ['molec/cm2/s', 'moleccm-2s-1', 'moleccm^-2s^-1']:
assert adjust_units(v) == 'molec/cm2/s'
for v in ['atoms C/cm2/s', 'atomsC/cm2/s']:
assert adjust_units(v) == 'atomsC/cm2/s'
```
#### File: gcpy/gcpy/units.py
```python
import numpy as np
import xarray as xr
def adjust_units(units):
'''
Creates a consistent unit string that will be used in the unit
conversion routines below.
Args:
units : str
Input unit string.
Returns:
adjusted_units: str
Output unit string, adjusted to a consistent value.
Remarks:
Unit list is incomplete -- currently is geared to units from
common model diagnostics (e.g. kg/m2/s, kg, and variants).
Examples:
>>> import gcpy
>>> print(gcpy.adjust_units('kg/m2/s'))
kg/m2/s
>>> print(gcpy.adjust_units('kg m-2 s-1'))
kg/m2/s
>>> print(gcpy.adjust_units('kg m^-2 s^-1))
kg/m2/s
'''
# Strip all spaces in the unit string
units_squeezed = units.replace(' ', '')
if units_squeezed in ['kg/m2/s', 'kgm-2s-1', 'kgm^-2s^-1']:
unit_desc = 'kg/m2/s'
elif units_squeezed in ['kgC/m2/s', 'kgCm-2s-1', 'kgCm^-2s^-1',
'kgc/m2/s', 'kgcm-2s-1', 'kgcm^-2s^-1']:
unit_desc = 'kgC/m2/s'
elif units_squeezed in [ 'molec/cm2/s', 'moleccm-2s-1', 'moleccm^-2s^-1']:
unit_desc = 'molec/cm2/s'
else:
unit_desc = units_squeezed
return unit_desc
def convert_kg_to_target_units(data_kg, target_units, kg_to_kgC):
'''
Converts a data array from kg to one of several types of target units.
Args:
data_kg : numpy ndarray
Input data array, in units of kg.
target_units : str
String containing the name of the units to which the "data_kg"
argument will be converted. Examples: 'Tg', 'Tg C', 'Mg',
'Mg C', 'kg, 'kg C', etc.
kg_to_kg_C : float
Conversion factor from kg to kg carbon.
Returns:
data : numpy ndarray
Ouptut data array, converted to the units specified
by the 'target_units' argument.
Remarks:
At present, only those unit conversions corresponding to the
GEOS-Chem benchmarks have been implemented.
This is an internal routine, which is meant to be called
directly from convert_units.
'''
# Convert to target unit
if target_units == 'Tg':
data = data_kg * 1e-9
elif target_units == 'Tg C':
data = data_kg * kg_to_kgC * 1.0e-9
elif target_units == 'Gg':
data = data_kg * 1e-6
elif target_units == 'Gg C':
data = data_kg * kg_to_kgC * 1.0e-6
elif target_units == 'Mg':
data = data_kg * 1e-3
elif target_units == 'Mg C':
data = data_kg * kg_to_kgC * 1.0e-3
elif target_unit == 'kg':
data = data_kg
elif target_unit == 'kg C':
data = data_kg * kg_to_kgC
elif target_unit == 'g':
data = data_kg * 1e3
elif target_unit == 'g C':
data = data_kg * kg_to_kgC * 1.0e3
else:
msg = 'Target units {} are not yet supported! {}'.format(target_units)
raise ValueError(msg)
# Return converted data
return data
def convert_units(dr, species_name, species_properties, target_units,
interval=None, area_m2=None, delta_p=None, box_height=None):
'''
Converts data stored in an xarray DataArray object from its native
units to a target unit.
Args:
-----
dr : xarray DataArray
Data to be converted from native units to target units.
species_name : str
Name of the species corresponding to the data stored in "dr".
species_properties : dict
Dictionary containing species properties (e.g. molecular
weights and other metadata) for the given species.
target_units : str
Units to which the data will be converted.
Keyword Args (optional):
------------------------
interval : float
The length of the averaging period in seconds.
area_m2 : xarray DataArray
Surface area in square meters
delta_p : xarray DataArray
Delta-pressure between top and bottom edges of grid box (dry air)
in hPa
box_height : xarray DataArray
Grid box height in meters
Returns:
--------
dr_new : xarray DataArray
Data converted to target units.
Remarks:
--------
At present, only certain types of unit conversions have been
implemented (corresponding to the most commonly used unit
conversions for model benchmark output).
Example:
--------
>>> import.gcpy
>>> import xarray as xr
>>> import json
>>> ds = xr.open_dataset("myfile.nc")
>>> dr = ds["CO"]
>>> properties = json.load(open(species_database.json))
>>> dr_new = convert_units(dr, "CO", properties.get("CO"),
"Tg", interval=86400.0, ds["AREA"])
'''
# Get species molecular weight information
mw_g = species_properties.get('MW_g')
emitted_mw_g = species_properties.get('EmMW_g')
moles_C_per_mole_species = species_properties.get('MolecRatio')
# ==============================
# Compute conversion factors
# ==============================
# Physical constants
Avo = 6.022140857e+23 # molec/mol
mw_air = 28.97 # g/mole dry air
g0 = 9.80665 # m/s2
# Get a consistent value for the units string
# (ignoring minor differences in formatting)
units = adjust_units(dr.units)
# Error checks
if units == 'molmol-1dry' and area_m2 is None:
raise ValueError('Conversion from {} to () for {} requires area_m2 as input'.format(unit,target_unit,species_name))
if units == 'molmol-1dry' and delta_p is None:
raise ValueError('Conversion from {} to () for {} requires delta_p as input'.format(unit,target_unit,species_name))
if 'g' in target_units and mw_g is None:
raise ValueError('Conversion from () to {} for {} requires MW_g definition in species_database.json'.format(unit,target_unit,species_name))
# Conversion factor for kg species to kg C
kg_to_kgC = (emitted_mw_g * moles_C_per_mole_species) / mw_g
# Mass of dry air in kg (required when converting from v/v)
if units == 'molmol-1dry':
air_mass = delta_p * 100.0 / g0 * area_m2
# Conversion factor for v/v to kg
# v/v * kg dry air / g/mol dry air * g/mol species = kg species
if units == 'molmol-1dry' and 'g' in target_units:
vv_to_kg = air_mass / mw_air * mw_g
# Conversion factor for v/v to molec/cm3
# v/v * kg dry air * mol/g dry air * molec/mol dry air /
# (area_m2 * box_height ) * 1m3/10^6cm3 = molec/cm3
if units == 'molmol-1dry' and 'molec' in target_units:
vv_to_MND = air_mass / mw_air * Avo / ( area_m2 * box_height) / 1e6
# ==============================
# Compute target units
# ==============================
if units == 'kg/m2/s':
data_kg = dr.values * area_m2.values * interval
data = convert_kg_to_target_units(data_kg, target_units, kg_to_kgC)
elif units == 'kgC/m2/s':
data_kg = dr.values * area_m2.values * interval / kg_to_kgC
data = convert_kg_to_target_units(data_kg, target_units, kg_to_kgC)
elif units == 'kg':
data_kg = dr.values
data = convert_kg_to_target_units(data_kg, target_units, kg_to_kgC)
elif units == 'kgC':
data_kg = dr.values / kg_to_kgC
data = convert_kg_to_target_units(data_kg, target_units, kg_to_kgC)
# elif units == 'molec/cm2/s':
# # Implement later
# elif units == 'atomsC/cm2/s':
# implement later
elif units == 'molmol-1dry':
if 'g' in target_units:
data_kg = dr.values * vv_to_kg
data = convert_kg_to_target_units(data_kg, target_units, kg_to_kgC)
elif 'molec' in target_units:
data = dr.values * vv_to_MND
else:
raise ValueError('Units ({}) in variable {} are not supported'.format(units, species_name))
# ==============================
# Return result
# ==============================
# Create a new DataArray. This will be exactly the same as the old
# DataArray, except that the data will have been converted to the
# target_units, and the units string will have been adjusted accordingly.
dr_new = xr.DataArray(data, name=dr.name, coords=dr.coords,
dims=dr.dims, attrs=dr.attrs)
dr_new.attrs['units'] = target_units
# Return to calling routine
return dr_new
``` |
{
"source": "jmodelcxc/eng_archive",
"score": 2
} |
#### File: Ska/engarchive/converters.py
```python
from __future__ import print_function, division, absolute_import
import logging
import numpy
import sys
from collections import OrderedDict
from six.moves import zip
import numpy as np
import Ska.Numpy
from Chandra.Time import DateTime
import Ska.tdb
from . import units
MODULE = sys.modules[__name__]
logger = logging.getLogger('engarchive')
class NoValidDataError(Exception):
pass
class DataShapeError(Exception):
pass
def quality_index(dat, colname):
"""Return the index for `colname` in `dat`"""
colname = colname.split(':')[0]
return list(dat.dtype.names).index(colname)
def numpy_converter(dat):
return Ska.Numpy.structured_array(dat, colnames=dat.dtype.names)
def convert(dat, content):
# Zero-length file results in `dat is None`
if dat is None:
raise NoValidDataError
try:
converter = getattr(MODULE, content.lower())
except AttributeError:
converter = numpy_converter
return converter(dat)
def generic_converter(prefix=None, add_quality=False, aliases=None):
"""Convert an input FITS recarray assuming that it has a TIME column.
If ``add_prefix`` is set then add ``content_`` as a prefix
to the data column names. If ``add_quality`` is set then add a QUALITY
column with all values False.
"""
def _convert(dat):
colnames = dat.dtype.names
colnames_out = [x.upper() for x in colnames]
if aliases:
colnames_out = [aliases.get(x, x).upper() for x in colnames_out]
if prefix:
# Note to self: never change an enclosed reference, i.e. don't do
# prefix = prefix.upper() + '_'
# You will lose an hour again figuring this out if so.
PREFIX = prefix.upper() + '_'
colnames_out = [(x if x in ('TIME', 'QUALITY') else PREFIX + x)
for x in colnames_out]
arrays = [dat.field(x) for x in colnames]
if add_quality:
descrs = [(x,) + y[1:] for x, y in zip(colnames_out, dat.dtype.descr)]
quals = numpy.zeros((len(dat), len(colnames) + 1), dtype=numpy.bool)
descrs += [('QUALITY', numpy.bool, (len(colnames) + 1,))]
arrays += [quals]
else:
descrs = [(name, array.dtype.str, array.shape[1:])
for name, array in zip(colnames_out, arrays)]
return numpy.rec.fromarrays(arrays, dtype=descrs)
return _convert
def get_bit_array(dat, in_name, out_name, bit_index):
bit_indexes = [int(bi) for bi in bit_index.split(',')]
bit_index = max(bit_indexes)
if dat[in_name].shape[1] < bit_index:
raise DataShapeError('column {} has shape {} but need at least {}'
.format(in_name, dat[in_name].shape[1], bit_index + 1))
if len(bit_indexes) > 1:
mult = 1
out_array = np.zeros(len(dat), dtype=np.uint32) # no more than 32 bit indexes
for bit_index in reversed(bit_indexes):
# Note: require casting mult and 0 to uint32 because recent versions of numpy
# disallow in-place adding of int64 to uint32.
out_array += np.where(dat[in_name][:, bit_index], np.uint32(mult), np.uint32(0))
mult *= 2
else:
try:
tscs = Ska.tdb.msids[out_name].Tsc
scs = {tsc['LOW_RAW_COUNT']: tsc['STATE_CODE'] for tsc in tscs}
except (KeyError, AttributeError):
scs = ['OFF', 'ON ']
# CXC telemetry stores state code vals with trailing spaces so all match
# in length. Annoying, but reproduce this here for consistency so
# fetch Msid.raw_vals does the right thing.
max_len = max(len(sc) for sc in scs.values())
fmtstr = '{:' + str(max_len) + 's}'
scs = [fmtstr.format(val) for key, val in scs.items()]
out_array = np.where(dat[in_name][:, bit_index], scs[1], scs[0])
return out_array
def generic_converter2(msid_cxc_map, default_dtypes=None):
"""Convert an input FITS recarray assuming that it has a TIME column. Use the
``msid_cxc_map`` to define the list of output eng archive MSIDs (keys) and the
corresponding colnames in the CXC archive FITS file (values).
The CXC values can contain an optional bit specifier in the form <colname>:<N>
where N is the bit selector referenced from 0 as the leftmost bit.
:param msid_cxc_map: dict of out_name => in_name mapping
"""
def _convert(dat):
# Make quality bool array with entries for TIME, QUALITY, then all other cols
out_names = ['TIME', 'QUALITY'] + list(msid_cxc_map.keys())
out_quality = np.zeros(shape=(len(dat), len(out_names)), dtype=np.bool)
out_arrays = {'TIME': dat['TIME'],
'QUALITY': out_quality}
for out_name, in_name in msid_cxc_map.items():
if ':' in in_name:
in_name, bit_index = in_name.split(':')
out_array = get_bit_array(dat, in_name, out_name, bit_index)
quality = dat['QUALITY'][:, quality_index(dat, in_name)]
else:
if in_name in dat.dtype.names:
out_array = dat[in_name]
quality = dat['QUALITY'][:, quality_index(dat, in_name)]
else:
# Handle column that is intermittently available in `dat` by using the
# supplied default dtype. Quality is True (missing) everywhere.
out_array = np.zeros(shape=len(dat), dtype=default_dtypes[out_name])
quality = True
assert out_array.ndim == 1
out_arrays[out_name] = out_array
out_quality[:, out_names.index(out_name)] = quality
out = Ska.Numpy.structured_array(out_arrays, out_names)
return out
return _convert
orbitephem0 = generic_converter('orbitephem0', add_quality=True)
lunarephem0 = generic_converter('lunarephem0', add_quality=True)
solarephem0 = generic_converter('solarephem0', add_quality=True)
orbitephem1 = generic_converter('orbitephem1', add_quality=True)
lunarephem1 = generic_converter('lunarephem1', add_quality=True)
solarephem1 = generic_converter('solarephem1', add_quality=True)
angleephem = generic_converter(add_quality=True)
def parse_alias_str(alias_str, invert=False):
aliases = OrderedDict()
for line in alias_str.strip().splitlines():
cxcmsid, msid = line.split()[:2]
if invert:
aliases[msid] = cxcmsid
else:
aliases[cxcmsid] = msid
return aliases
ALIASES = {'simdiag': """
RAMEXEC 3SDSWELF SEA CSC Exectuting from RAM
DSTACKPTR 3SDPSTKP SEA Data Stack Ptr
TSCEDGE 3SDTSEDG TSC Tab Edge Detection Flags
FAEDGE 3SDFAEDG FA Tab Edge Detection Flags
MJFTIME 3SDMAJFP Major Frame Period Time Measured by SEA
MRMDEST 3SDRMOVD Most Recent Motor Move Destination
TSCTABADC 3SDTSTSV TSC Tab Position Sensor A/D converter
FATABADC 3SDFATSV FA Tab Position Sensor A/D Converter
AGRNDADC 3SDAGV Analog Ground A/D Converter Reading
P15VADC 3SDP15V +15V Power Supply A/D Converter Reading
P5VADC 3SDP5V +5V Power Supply A/D Converter Reading
N15VADC 3SDM15V -15V Power Supply A/D Converter Reading
FLEXATEMPADC 3SDFLXAT Flexture A Thermistor A/D Converter
FLEXBTEMPADC 3SDFLXBT Flexture B Thermistor A/D Converter
FLEXCTEMPADC 3SDFLXCT Flexture C Thermistor A/D Converter
TSCMTRTEMPADC 3SDTSMT TSC Motor Thermistor A/D Converter
FAMTRTEMPADC 3SDFAMT FA Motor Thermistor A/D Converter
PSUTEMPADC 3SDPST SEA Power Supply Thermistor A/D Converter
BOXTEMPADC 3SDBOXT SEA Box Thermistor A/D Converter
RAMFAILADDR 3SDRMFAD RAM Most Recent detected Fail Address
TSCTABWID 3SDTSTBW TSC Most Recent detected Tab Width
FATABWID 3SDFATBW FA Most Recent detected Tab Width
SYNCLOSS 3SDSYRS Process Reset Due Synchronization Loss
WARMRESET 3SDWMRS Processor Warm Reset
TSCHISTO 3SDTSP TSC Most Recent PWM Histogram
FAHISTO 3SDFAP FA Most Recent PWM Histogram
INVCMDCODE 3SDINCOD SEA Invalid CommandCode
""",
'sim_mrg': """
TLMUPDATE 3SEATMUP "Telemtry Update Flag"
SEAIDENT 3SEAID "SEA Identification Flag"
SEARESET 3SEARSET "SEA Reset Flag"
PROMFAIL 3SEAROMF "SEA PROM Checksum Flag"
INVCMDGROUP 3SEAINCM "SEA Invalid Command Group Flag"
TSCMOVING 3TSCMOVE "TSC In Motion Flag"
FAMOVING 3FAMOVE "FA In Motion Flag"
FAPOS 3FAPOS "FA Position"
TSCPOS 3TSCPOS "TSC Postion"
PWMLEVEL 3MRMMXMV "Max Power Motor Volt recent move"
LDRTMECH 3LDRTMEK "Last Detected Reference Mechanism Tab"
LDRTNUM 3LDRTNO "Last Detected Reference Tab Number"
LDRTRELPOS 3LDRTPOS "Last Detected Reference Relative Postion"
FLEXATEMP 3FAFLAAT "Flexture A Temperature"
FLEXBTEMP 3FAFLBAT "Flexture B Temperature"
FLEXCTEMP 3FAFLCAT "Flexture C Temperature"
TSCMTRTEMP 3TRMTRAT "TSC Motor Temperature"
FAMTRTEMP 3FAMTRAT "FA Motor Temperature"
PSUTEMP 3FAPSAT "SEA Power Supply Temperature"
BOXTEMP 3FASEAAT "SEA Box Temperature"
STALLCNT 3SMOTSTL "SEA Motor Stall Counter"
TAB2AUTOPOS 3STAB2EN "SEA Tab 2 Auto Position Update Status"
MTRDRVRLY 3SMOTPEN "SEA Motor Driver Power Relay status"
MTRSELRLY 3SMOTSEL "SEA Motor Selection Relay Status"
HTRPWRRLY 3SHTREN "SEA Heater Power Relay Status"
RAMFAIL 3SEARAMF "SEA RAM Failure Detected Flag"
MTROVRCCNT 3SMOTOC "Motor Drive Overcurrent Counter"
PENDCMDCNT 3SPENDC "SEA Pending Command Count"
FLEXATSET 3SFLXAST "Flexture A Temperature Setpoint"
FLEXBTSET 3SFLXBST "Flexture B Temperature Setpoint"
FLEXCTSET 3SFLXCST "Flexture C Temperature Setpoint"
""",
'hrc0ss': """
TLEVART 2TLEV1RT
VLEVART 2VLEV1RT
SHEVART 2SHEV1RT
TLEVART 2TLEV2RT
VLEVART 2VLEV2RT
SHEVART 2SHEV2RT
""",
'hrc0hk': """
SCIDPREN:0,1,2,3,8,9,10 HRC_SS_HK_BAD
P24CAST:7 224PCAST
P15CAST:7 215PCAST
N15CAST:7 215NCAST
SPTPAST 2SPTPAST
SPBPAST 2SPBPAST
IMTPAST 2IMTPAST
IMBPAST 2IMBPAST
MTRSELCT:3 2NYMTAST
MTRSELCT:4 2PYMTAST
MTRSELCT:5 2CLMTAST
MTRSELCT:6 2DRMTAST
MTRSELCT:7 2ALMTAST
MTRSTATR:0 2MSMDARS
MTRSTATR:1 2MDIRAST
MTRSTATR:2 2MSNBAMD
MTRSTATR:3 2MSNAAMD
MTRSTATR:4 2MSLBAMD
MTRSTATR:5 2MSLAAMD
MTRSTATR:6 2MSPRAMD
MTRSTATR:7 2MSDRAMD
MTRCMNDR:0 2MCMDARS
MTRCMNDR:2 2MCNBAMD
MTRCMNDR:3 2MCNAAMD
MTRCMNDR:4 2MCLBAMD
MTRCMNDR:5 2MCLAAMD
MTRCMNDR:6 2MCPRAMD
MTRCMNDR:7 2MDRVAST
SCTHAST 2SCTHAST
MTRITMP:1 2SMOIAST
MTRITMP:2 2SMOTAST
MTRITMP:5 2DROTAST
MTRITMP:6 2DROIAST
MLSWENBL:3 2SFLGAST
MLSWENBL:4 2OSLSAST
MLSWENBL:5 2OPLSAST
MLSWENBL:6 2CSLSAST
MLSWENBL:7 2CPLSAST
MLSWSTAT:2 2OSLSADT
MLSWSTAT:3 2OSLSAAC
MLSWSTAT:4 2OPLSAAC
MLSWSTAT:5 2CSLSADT
MLSWSTAT:6 2CSLSAAC
MLSWSTAT:7 2CPLSAAC
FCPUAST 2FCPUAST
FCPVAST 2FCPVAST
CBHUAST 2CBHUAST
CBLUAST 2CBLUAST
CBHVAST 2CBHVAST
CBLVAST 2CBLVAST
WDTHAST 2WDTHAST
SCIDPREN:4 2CLMDAST
SCIDPREN:5 2FIFOAVR
SCIDPREN:6 2OBNLASL
SCIDPREN:7 2SPMDASL
SCIDPREN:11 2EBLKAVR
SCIDPREN:12 2CBLKAVR
SCIDPREN:13 2ULDIAVR
SCIDPREN:14 2WDTHAVR
SCIDPREN:15 2SHLDAVR
HVPSSTAT:0 2SPONST
HVPSSTAT:1 2SPCLST
HVPSSTAT:2 2S1ONST
HVPSSTAT:3 2IMONST
HVPSSTAT:4 2IMCLST
HVPSSTAT:5 2S2ONST
S1HVST 2S1HVST
S2HVST 2S2HVST
C05PALV 2C05PALV
C15PALV 2C15PALV
C15NALV 2C15NALV
C24PALV 2C24PALV
IMHVLV 2IMHVLV
IMHBLV 2IMHBLV
SPHVLV 2SPHVLV
SPHBLV 2SPHBLV
S1HVLV 2S1HVLV
S2HVLV 2S2HVLV
PRBSCR 2PRBSCR
PRBSVL 2PRBSVL
ULDIALV 2ULDIALV
LLDIALV 2LLDIALV
FEPRATM 2FEPRATM
CALPALV 2CALPALV
GRDVALV 2GRDVALV
RSRFALV 2RSRFALV
SPINATM 2SPINATM
IMINATM 2IMINATM
LVPLATM 2LVPLATM
SPHVATM 2SPHVATM
IMHVATM 2IMHVATM
SMTRATM 2SMTRATM
FE00ATM 2FE00ATM
CE00ATM 2CE00ATM
CE01ATM 2CE01ATM
""",
}
CXC_TO_MSID = {key: parse_alias_str(val) for key, val in ALIASES.items()}
MSID_TO_CXC = {key: parse_alias_str(val, invert=True) for key, val in ALIASES.items()}
def sim_mrg(dat):
"""
Custom converter for SIM_MRG.
There is a bug in CXCDS L0 SIM decom wherein the 3LDRTMEK MSID is
incorrectly assigned (TSC and FA are reversed). The calibration
of 3LDRTPOS from steps to mm is then also wrong because it uses
the FA conversion instead of TSC.
This function fixes 3LDRTMEK, then backs out the (incorrect) 3LDRTPOS
steps to mm conversion and re-does it correctly using the TSC conversion.
Note that 3LDRTMEK is (by virtue of the way mission operations run)
always "TSC".
"""
# Start with the generic converter
out = generic_converter(aliases=CXC_TO_MSID['sim_mrg'])(dat)
# Now do the fixes. FOT mech has stated that 3LDRTMEK is always 'FA'
# in practice.
bad = out['3LDRTMEK'] == b'FA '
if np.count_nonzero(bad):
out['3LDRTMEK'][bad] = b'TSC'
pos_tsc_steps = units.converters['mm', 'FASTEP'](out['3LDRTPOS'][bad])
out['3LDRTPOS'][bad] = units.converters['TSCSTEP', 'mm'](pos_tsc_steps)
return out
simdiag = generic_converter(aliases=CXC_TO_MSID['simdiag'])
hrc0ss = generic_converter2(MSID_TO_CXC['hrc0ss'])
def hrc0hk(dat):
# Read the data and allow for missing columns in input L0 HK file.
default_dtypes = {'2CE00ATM': 'f4',
'2CE01ATM': 'f4'}
out = generic_converter2(MSID_TO_CXC['hrc0hk'], default_dtypes)(dat)
# Set all HRC HK data columns to bad quality where HRC_SS_HK_BAD is not zero
# First three columns are TIME, QUALITY, and HRC_SS_HK_BAD -- do not filter these.
bad = out['HRC_SS_HK_BAD'] > 0
if np.any(bad):
out['QUALITY'][bad, 3:] = True
logger.info('Setting {} readouts of all HRC HK telem to bad quality (bad SCIDPREN)'
.format(np.count_nonzero(bad)))
# Detect the secondary-science byte-shift anomaly by finding out-of-range 2SMTRATM values.
# For those bad frames:
# - Set bit 10 (from LSB) of HRC_SS_HK_BAD
# - Set all analog MSIDs (2C05PALV and later in the list) to bad quality
bad = (out['2SMTRATM'] < -20) | (out['2SMTRATM'] > 50)
if np.any(bad):
out['HRC_SS_HK_BAD'][bad] |= 2 ** 10 # 1024
analogs_index0 = list(out.dtype.names).index('2C05PALV')
out['QUALITY'][bad, analogs_index0:] = True
logger.info('Setting {} readouts of analog HRC HK telem to bad quality (bad 2SMTRATM)'
.format(np.count_nonzero(bad)))
return out
def obc4eng(dat):
"""
At 2014:342:XX:XX:XX, patch PR-361 was applied which transitioned 41 OBA thermistors to
read out in wide-mode. After this time the data in the listed OOBTHRxx MSIDs became
invalid while the OOBTHRxx_WIDE MSIDs became valid. This converter simply copies the
*_WIDE values to the original MSIDs after the time of patch activation. The *_WIDE
MSIDs are not available in the eng archive (by the _WIDE names).
"""
# MSIDs OOBTHR<msid_num> that went to _WIDE after the patch, which was done in parts A
# and B.
msid_nums = {'a': '08 09 10 11 12 13 14 15 17 18 19 20 21 22 23 24 25 26 27 28 29'.split(),
'b': '30 31 33 34 35 36 37 38 39 40 41 44 45 46 49 50 51 52 53 54'.split(),
'c': '02 03 04 05 06 07'.split()
}
# Convert using the baseline converter
out = numpy_converter(dat)
# The patch times below correspond to roughly the middle of the major frame where
# patches A and B were applied, respectively.
patch_times = {'a': DateTime('2014:342:16:29:30').secs,
'b': DateTime('2014:342:16:32:45').secs,
'c': DateTime('2017:312:16:11:16').secs}
for patch in ('a', 'b', 'c'):
# Set a mask defining times after the activation of wide-range telemetry in PR-361
mask = out['TIME'] > patch_times[patch]
if np.any(mask):
for msid_num in msid_nums[patch]:
msid = 'OOBTHR' + msid_num
msid_wide = msid + '_WIDE'
print('Fixing MSID {}'.format(msid))
out[msid][mask] = out[msid_wide][mask]
q_index = quality_index(out, msid)
q_index_wide = quality_index(out, msid_wide)
out['QUALITY'][mask, q_index] = out['QUALITY'][mask, q_index_wide]
return out
def tel2eng(dat):
"""
At 2014:342:XX:XX:XX, patch PR-361 was applied which transitioned 41 OBA thermistors to
read out in wide-mode. As 4OAVOBAT is an average of all these MSIDs and calculated on board,
only the wide version of this MSID is valid after this patch is applied.
This converter simply copies the 4OAVOBAT_WIDE values after the time of patch activation to
4OAVOBAT. 4OAVOBAT_WIDE is not available in the eng archive (by the _WIDE name).
"""
# Convert using the baseline converter
out = numpy_converter(dat)
# 4OAVOBAT is modified by both patches since it is an average of MSIDs in both parts of the
# patch. Use the second time value as this is when the process is complete. See obc4eng() for
# both times and further details.
patch_time = DateTime('2014:342:16:32:45').secs
mask = out['TIME'] > patch_time
if np.any(mask):
print('Fixing MSID 4OAVOBAT')
out['4OAVOBAT'][mask] = out['4OAVOBAT_WIDE'][mask]
q_index = quality_index(out, '4OAVOBAT')
q_index_wide = quality_index(out, '4OAVOBAT_WIDE')
out['QUALITY'][mask, q_index] = out['QUALITY'][mask, q_index_wide]
return out
def acisdeahk(dat):
"""Take the archive ACIS-0 DEA HKP data and convert to a format that is
consistent with normal eng0 files. ACIS-0 housekeeping has data stored
in query-records, one row per DEA statistic query. Gather all the
time-synced queries corresponding to columns for the acis0hkp table
and put into a single row. Write out to temp files and modify self->{arch_files}.
"""
logger.info('Converting acisdeahk data to eng0 format')
cols = _get_deahk_cols()
col_query_ids = tuple(x['query_id'] for x in cols)
col_names = tuple(x['name'].upper() for x in cols)
# Filter only entries with ccd_id >= 10 which indicates data from the I/F control
dat = pyfits_to_recarray(dat)
rows = dat[dat['CCD_ID'] >= 10]
if len(rows) == 0:
raise NoValidDataError()
# Go through input data one row (query) at a time and assemble contemporaneous
# queries into a single row with a column for each query value.
# Collect each assembled row into %data_out for writing to a FITS bin table
block_idxs = 1 + numpy.flatnonzero(numpy.abs(rows['TIME'][1:] - rows['TIME'][:-1]) > 0.001)
block_idxs = numpy.hstack([[0], block_idxs, [len(rows)]])
query_val_tus = rows['QUERY_VAL_TU']
query_vals = rows['QUERY_VAL']
query_ids = rows['QUERY_ID']
times = rows['TIME']
outs = []
for i0, i1 in zip(block_idxs[:-1], block_idxs[1:]):
# Map query_id to an index into rows for each row in the block
id_idxs = dict((query_ids[i], i) for i in range(i0, i1))
# Make tuples of the values and qual flags corresponding to each DEAHK_COLUMN
bads = tuple(query_id not in id_idxs for query_id in col_query_ids)
vals = tuple((0.0 if bad else query_vals[id_idxs[query_id]])
for bad, query_id in zip(bads, col_query_ids))
val_tus = tuple((0 if bad else query_val_tus[id_idxs[query_id]])
for bad, query_id in zip(bads, col_query_ids))
# Now have another pass at finding bad values. Take these out now so the
# 5min and daily stats are not frequently corrupted.
bads = tuple(True if (val_tu == 65535 or numpy.isnan(val)) else bad
for bad, val, val_tu in zip(bads, vals, val_tus))
quality = (False, False) + bads
outs.append((times[i0], quality) + vals)
dtype = [('TIME', numpy.float64),
('QUALITY', numpy.bool, (len(col_names) + 2,))]
dtype += [(col_name, numpy.float32) for col_name in col_names]
return numpy.rec.fromrecords(outs, dtype=dtype)
def _get_deahk_cols():
out = [
{
"query_id": 1,
"name": "tmp_bep_pcb",
"unit": "K",
"descr": "DPA Thermistor 1 - BEP PC Board"
},
{
"query_id": 2,
"name": "tmp_bep_osc",
"unit": "K",
"descr": "DPA Thermistor 2 - BEP Oscillator"
},
{
"query_id": 3,
"name": "tmp_fep0_mong",
"unit": "K",
"descr": "DPA Thermistor 3 - FEP 0 Mongoose"
},
{
"query_id": 4,
"name": "tmp_fep0_pcb",
"unit": "K",
"descr": "DPA Thermistor 4 - FEP 0 PC Board"
},
{
"query_id": 5,
"name": "tmp_fep0_actel",
"unit": "K",
"descr": "DPA Thermistor 5 - FEP 0 ACTEL"
},
{
"query_id": 6,
"name": "tmp_fep0_ram",
"unit": "K",
"descr": "DPA Thermistor 6 - FEP 0 RAM"
},
{
"query_id": 7,
"name": "tmp_fep0_fb",
"unit": "K",
"descr": "DPA Thermistor 7 - FEP 0 Frame Buf"
},
{
"query_id": 8,
"name": "tmp_fep1_mong",
"unit": "K",
"descr": "DPA Thermistor 8 - FEP 1 Mongoose"
},
{
"query_id": 9,
"name": "tmp_fep1_pcb",
"unit": "K",
"descr": "DPA Thermistor 9 - FEP 1 PC Board"
},
{
"query_id": 10,
"name": "tmp_fep1_actel",
"unit": "K",
"descr": "DPA Thermistor 10 - FEP 1 ACTEL"
},
{
"query_id": 11,
"name": "tmp_fep1_ram",
"unit": "K",
"descr": "DPA Thermistor 11 - FEP 1 RAM"
},
{
"query_id": 12,
"name": "tmp_fep1_fb",
"unit": "K",
"descr": "DPA Thermistor 12 - FEP 1 Frame Buf"
},
{
"query_id": 15,
"name": "fptemp_12",
"unit": "K",
"descr": "Focal Plane Temp. Board 12"
},
{
"query_id": 16,
"name": "fptemp_11",
"unit": "K",
"descr": "Focal Plane Temp. Board 11"
},
{
"query_id": 17,
"name": "dpagndref1",
"unit": "V",
"descr": "DPA Ground Reference 1"
},
{
"query_id": 18,
"name": "dpa5vhka",
"unit": "V",
"descr": "DPA 5V Housekeeping A"
},
{
"query_id": 19,
"name": "dpagndref2",
"unit": "V",
"descr": "DPA Ground Reference 2"
},
{
"query_id": 20,
"name": "dpa5vhkb",
"unit": "V",
"descr": "DPA 5V Housekeeping B"
},
{
"query_id": 25,
"name": "dea28volta",
"unit": "V",
"descr": "Primary Raw DEA 28V DC"
},
{
"query_id": 26,
"name": "dea24volta",
"unit": "V",
"descr": "Primary Raw DEA 24V DC"
},
{
"query_id": 27,
"name": "deam15volta",
"unit": "V",
"descr": "Primary Raw DEA -15.5V"
},
{
"query_id": 28,
"name": "deap15volta",
"unit": "V",
"descr": "Primary Raw DEA +15.5V"
},
{
"query_id": 29,
"name": "deam6volta",
"unit": "V",
"descr": "Primary Raw DEA -6V DC"
},
{
"query_id": 30,
"name": "deap6volta",
"unit": "V",
"descr": "Primary Raw DEA +6V DC"
},
{
"query_id": 31,
"name": "rad_pcb_a",
"descr": "Relative Dose Rad. Monitor Side A"
},
{
"query_id": 32,
"name": "gnd_1",
"unit": "V",
"descr": "Interface Ground Reference"
},
{
"query_id": 33,
"name": "dea28voltb",
"unit": "V",
"descr": "Backup Raw DEA 28V DC"
},
{
"query_id": 34,
"name": "dea24voltb",
"unit": "V",
"descr": "Backup DEA 24V DC"
},
{
"query_id": 35,
"name": "deam15voltb",
"unit": "V",
"descr": "Backup DEA -15.5V DC"
},
{
"query_id": 36,
"name": "deap15voltb",
"unit": "V",
"descr": "Backup DEA +15.5V DC"
},
{
"query_id": 37,
"name": "deam6voltb",
"unit": "V",
"descr": "Backup DEA -6V DC"
},
{
"query_id": 38,
"name": "deap6voltb",
"unit": "V",
"descr": "Backup DEA +6V DC"
},
{
"query_id": 39,
"name": "rad_pcb_b",
"descr": "Relative Dose Rad. Monitor Side B"
},
{
"query_id": 40,
"name": "gnd_2",
"unit": "V",
"descr": "Ground"
}
]
return out
def pyfits_to_recarray(dat):
dtypes = []
colnames = dat.dtype.names
for colname in colnames:
col = dat.field(colname)
if col.dtype.isnative:
dtype = (colname, col.dtype)
else:
dtype = (colname, col.dtype.type)
if len(col.shape) > 1:
dtype = dtype + tuple(col.shape[1:])
dtypes.append(dtype)
# Now define a new recarray and copy the original data
# Note: could use numpy.empty to generate a structured array.
out = numpy.recarray(len(dat), dtype=dtypes)
for colname in colnames:
out[colname][:] = dat.field(colname)
return out
```
#### File: engarchive/derived/eps.py
```python
from . import base
class DerivedParameterEps(base.DerivedParameter):
content_root = 'eps'
# --------------------------------------------
class DP_BATT1_TAVE(DerivedParameterEps):
"""Battery 1 Average Temperature. Derived from average of all three battery temperature sensors.
Telemetry 16x / MF
"""
rootparams = ['TB1T1', 'TB1T2', 'TB1T3']
time_step = 2.05
def calc(self, data):
BATT1_TAVE = (data['TB1T1'].vals + data['TB1T2'].vals +
data['TB1T3'].vals) / 3
return BATT1_TAVE
# --------------------------------------------
class DP_BATT2_TAVE(DerivedParameterEps):
"""Battery 2 Average Temperature. Derived from average of all three battery temperature sensors.
Telemetry 16x / MF
"""
rootparams = ['TB2T1', 'TB2T2', 'TB2T3']
time_step = 2.05
def calc(self, data):
BATT2_TAVE = (data['TB2T1'].vals + data['TB2T2'].vals +
data['TB2T3'].vals) / 3
return BATT2_TAVE
# --------------------------------------------
class DP_BATT3_TAVE(DerivedParameterEps):
"""Battery 3 Average Temperature. Derived from average of all three battery temperature sensors.
Telemetry 16x / MF
"""
rootparams = ['TB3T1', 'TB3T2', 'TB3T3']
time_step = 2.05
def calc(self, data):
BATT3_TAVE = (data['TB3T1'].vals + data['TB3T2'].vals +
data['TB3T3'].vals) / 3
return BATT3_TAVE
# --------------------------------------------
class DP_EPOWER1(DerivedParameterEps):
"""Bus Power = ELBI_LOW * ELBV
Telemetry 8x / MF
"""
rootparams = ['ELBI_LOW', 'ELBV']
time_step = 4.1
def calc(self, data):
EPOWER1 = (data['ELBI_LOW'].vals * data['ELBV'].vals)
return EPOWER1
# --------------------------------------------
class DP_MYSAPOW(DerivedParameterEps):
"""-Y Solar Array Power = ESAMYI * ELBV
Telemetry 8x / MF
"""
rootparams = ['ESAMYI', 'ELBV']
time_step = 4.1
def calc(self, data):
MYSAPOW = (data['ESAMYI'].vals * data['ELBV'].vals)
return MYSAPOW
# --------------------------------------------
class DP_PYSAPOW(DerivedParameterEps):
"""+Y Solar Array Power = ESAPYI * ELBV
Telemetry 8x / MF
"""
rootparams = ['ESAPYI', 'ELBV']
time_step = 4.1
def calc(self, data):
PYSAPOW = (data['ESAPYI'].vals * data['ELBV'].vals)
return PYSAPOW
```
#### File: Ska/engarchive/fix_bad_values.py
```python
import argparse
import itertools
import contextlib
import numpy as np
import tables
import scipy.stats.mstats
import pyyaks.context
import pyyaks.logger
from Ska.engarchive import fetch
import Ska.engarchive.file_defs as file_defs
from Chandra.Time import DateTime
ft = fetch.ft
opt = None
msid_files = None
logger = None
@contextlib.contextmanager
def _set_msid_files_basedir(datestart):
"""
If datestart is before 2000:001:00:00:00 then use the 1999 archive files.
"""
try:
cache_basedir = msid_files.basedir
if datestart < fetch.DATE2000_LO:
# Note: don't use os.path.join because ENG_ARCHIVE and basedir must
# use linux '/' convention but this might be running on Windows.
msid_files.basedir = msid_files.basedir + '/1999'
yield
finally:
msid_files.basedir = cache_basedir
def get_opt():
parser = argparse.ArgumentParser(description='Fix bad values in eng archive')
parser.add_argument('--msid',
type=str,
help='MSID name')
parser.add_argument('--start',
help='Start time of bad values')
parser.add_argument('--stop',
help='Stop time of bad values')
parser.add_argument('--value',
help='Update with <value> instead of setting as bad')
parser.add_argument("--run",
action="store_true",
help="Actually modify files (dry run is the default)")
parser.add_argument("--data-root",
default=".",
help="Engineering archive root directory for MSID and arch files")
args = parser.parse_args()
return args
def calc_stats_vals(msid, rows, indexes, interval):
quantiles = (1, 5, 16, 50, 84, 95, 99)
cols_stats = ('index', 'n', 'val')
n_out = len(rows) - 1
msid_dtype = msid.vals.dtype
msid_is_numeric = not msid_dtype.name.startswith('string')
# Predeclare numpy arrays of correct type and sufficient size for accumulating results.
out = dict(index=np.ndarray((n_out,), dtype=np.int32),
n=np.ndarray((n_out,), dtype=np.int32),
val=np.ndarray((n_out,), dtype=msid_dtype),
)
if msid_is_numeric:
cols_stats += ('min', 'max', 'mean')
out.update(dict(min=np.ndarray((n_out,), dtype=msid_dtype),
max=np.ndarray((n_out,), dtype=msid_dtype),
mean=np.ndarray((n_out,), dtype=np.float32),))
if interval == 'daily':
cols_stats += ('std',) + tuple('p%02d' % x for x in quantiles)
out['std'] = np.ndarray((n_out,), dtype=msid_dtype)
out.update(('p%02d' % x, np.ndarray((n_out,), dtype=msid_dtype)) for x in quantiles)
i = 0
for row0, row1, index in itertools.izip(rows[:-1], rows[1:], indexes[:-1]):
vals = msid.vals[row0:row1]
times = msid.times[row0:row1]
n_vals = len(vals)
if n_vals > 0:
out['index'][i] = index
out['n'][i] = n_vals
out['val'][i] = vals[n_vals // 2]
if msid_is_numeric:
if n_vals <= 2:
dts = np.ones(n_vals, dtype=np.float64)
else:
dts = np.empty(n_vals, dtype=np.float64)
dts[0] = times[1] - times[0]
dts[-1] = times[-1] - times[-2]
dts[1:-1] = ((times[1:-1] - times[:-2])
+ (times[2:] - times[1:-1])) / 2.0
negs = dts < 0.0
if np.any(negs):
times_dts = [(DateTime(t).date, dt)
for t, dt in zip(times[negs], dts[negs])]
logger.warning('WARNING - negative dts in {} at {}'
.format(msid.MSID, times_dts))
# Clip to range 0.001 to 300.0. The low bound is just there
# for data with identical time stamps. This shouldn't happen
# but in practice might. The 300.0 represents 5 minutes and
# is the largest normal time interval. Data near large gaps
# will get a weight of 5 mins.
dts.clip(0.001, 300.0, out=dts)
sum_dts = np.sum(dts)
out['min'][i] = np.min(vals)
out['max'][i] = np.max(vals)
out['mean'][i] = np.sum(dts * vals) / sum_dts
if interval == 'daily':
# biased weighted estimator of variance (N should be big enough)
# http://en.wikipedia.org/wiki/Mean_square_weighted_deviation
sigma_sq = np.sum(dts * (vals - out['mean'][i]) ** 2) / sum_dts
out['std'][i] = np.sqrt(sigma_sq)
quant_vals = scipy.stats.mstats.mquantiles(vals, np.array(quantiles) / 100.0)
for quant_val, quantile in zip(quant_vals, quantiles):
out['p%02d' % quantile][i] = quant_val
i += 1
return np.rec.fromarrays([out[x][:i] for x in cols_stats], names=cols_stats)
def fix_stats_h5(msid, tstart, tstop, interval):
dt = {'5min': 328,
'daily': 86400}[interval]
ft['msid'] = msid
ft['interval'] = interval
datestart = DateTime(tstart).date
with _set_msid_files_basedir(datestart):
stats_file = msid_files['stats'].abs
logger.info('Updating stats file {}'.format(stats_file))
index0 = int(tstart // dt)
index1 = int(tstop // dt) + 1
indexes = np.arange(index0, index1 + 1, dtype=np.int32)
times = indexes * dt
logger.info('Indexes = {}:{}'.format(index0, index1))
logger.info('Fetching {} data between {} to {}'.format(msid, DateTime(times[0] - 500).date,
DateTime(times[-1] + 500).date))
dat = fetch.Msid(msid, times[0] - 500, times[-1] + 500)
# Check within each stat interval?
if len(dat.times) == 0:
logger.info('Skipping: No values within interval {} to {}'
.format(DateTime(times[0] - 500).date,
DateTime(times[-1] + 500).date))
return
rows = np.searchsorted(dat.times, times)
vals_stats = calc_stats_vals(dat, rows, indexes, interval)
try:
h5 = tables.openFile(stats_file, 'a')
table = h5.root.data
row0, row1 = np.searchsorted(table.col('index'), [index0, index1])
for row_idx, vals_stat in itertools.izip(range(row0, row1), vals_stats):
if row1 - row0 < 50 or row_idx == row0 or row_idx == row1 - 1:
logger.info('Row index = {}'.format(row_idx))
logger.info(' ORIGINAL: %s', table[row_idx])
logger.info(' UPDATED : %s', vals_stat)
if opt.run:
table[row_idx] = tuple(vals_stat)
finally:
h5.close()
logger.info('')
def fix_msid_h5(msid, tstart, tstop):
"""
Fix the msid full-resolution HDF5 data file
"""
logger.info('Fixing MSID {} h5 file'.format(msid))
row_slice = fetch.get_interval(ft['content'].val, tstart, tstop)
# Load the time values and find indexes corresponding to start / stop times
ft['msid'] = 'TIME'
filename = msid_files['data'].abs
logger.info('Reading TIME file {}'.format(filename))
h5 = tables.openFile(filename)
times = h5.root.data[row_slice]
h5.close()
# Index values that need to be fixed are those within the specified time range, offset by
# the beginning index of the row slice.
fix_idxs = np.flatnonzero((tstart <= times) & (times <= tstop)) + row_slice.start
# Open the msid HDF5 data file and set the corresponding quality flags to True (=> bad)
ft['msid'] = msid
filename = msid_files['msid'].abs
logger.info('Reading msid file {}'.format(filename))
h5 = tables.openFile(filename, 'a')
try:
if opt.value is not None:
# Set data to <value> over the specified time range
i0, i1 = fix_idxs[0], fix_idxs[-1] + 1
logger.info('Changing {}.data[{}:{}] to {}'
.format(msid, i0, i1, opt.value))
if opt.run:
h5.root.data[i0:i1] = opt.value
else:
for idx in fix_idxs:
quality = h5.root.quality[idx]
if quality:
logger.info('Skipping idx={} because quality is already True'.format(idx))
continue
if len(fix_idxs) < 100 or idx == fix_idxs[0] or idx == fix_idxs[-1]:
logger.info('{}.data[{}] = {}'.format(msid, idx, h5.root.data[idx]))
logger.info('Changing {}.quality[{}] from {} to True'
.format(msid, idx, quality))
if opt.run:
h5.root.quality[idx] = True
finally:
h5.close()
logger.info('')
def main():
global opt
global msid_files
global logger
opt = get_opt()
# Set up infrastructure to directly access HDF5 files
msid_files = pyyaks.context.ContextDict('msid_files',
basedir=(opt.data_root or file_defs.msid_root))
msid_files.update(file_defs.msid_files)
# Set up fetch so it reads from opt.data_root
fetch.msid_files.basedir = opt.data_root
# Set up logging
loglevel = pyyaks.logger.INFO
logger = pyyaks.logger.get_logger(name='fix_bad_values', level=loglevel,
format="%(message)s")
logger.info('** If something gets corrupted then there is the NetApp snapshot for recovery **')
logger.info('')
if not opt.run:
logger.info('** DRY RUN **')
logger.info('')
msid = opt.msid.upper()
ft['content'] = fetch.content[msid]
# Get the relevant row slice covering the requested time span for this content type
tstart = DateTime(opt.start).secs - 0.001
stop = DateTime(opt.stop or opt.start)
tstop = stop.secs + 0.001
# First fix the HDF5 file with full resolution MSID data
# Need to potentially set basedir for 1999 data.
with _set_msid_files_basedir(DateTime(tstart).date):
fix_msid_h5(msid, tstart, tstop)
# Now fix stats files
fix_stats_h5(msid, tstart, tstop, '5min')
fix_stats_h5(msid, tstart, tstop, 'daily')
if __name__ == '__main__':
main()
```
#### File: Ska/engarchive/lazy.py
```python
class LazyDict(dict):
def __init__(self, load_func, *args, **kwargs):
self._load_func = load_func
self._args = args
self._kwargs = kwargs
self._loaded = False
super().__init__()
def load(self):
if not self._loaded:
self.update(self._load_func(*self._args, **self._kwargs))
self._loaded = True
# Clear these out so pickling always works (pickling a func can fail)
self._load_func = None
self._args = None
self._kwargs = None
def __getitem__(self, item):
try:
return super().__getitem__(item)
except KeyError:
self.load()
return super().__getitem__(item)
def __contains__(self, item):
self.load()
return super().__contains__(item)
def keys(self):
self.load()
return super().keys()
def values(self):
self.load()
return super().values()
def items(self):
self.load()
return super().items()
def __len__(self):
self.load()
return super().__len__()
def get(self, key, default=None):
self.load()
return super().get(key, default)
```
#### File: engarchive/tests/test_intervals.py
```python
import numpy as np
import pytest
from Chandra.Time import DateTime
from .. import fetch, utils
try:
import kadi.events
HAS_EVENTS = True
except ImportError:
HAS_EVENTS = False
# Use dwells for some interval filter tests
#
# In [2]: print events.dwells.filter('2012:001', '2012:002')
# <Dwell: start=2012:001:16:07:27.515 dur=1834>
# <Dwell: start=2012:001:17:27:28.615 dur=3182>
# <Dwell: start=2012:001:18:41:12.515 dur=29295>
# <Dwell: start=2012:002:02:58:11.817 dur=387>
# <Dwell: start=2012:002:03:23:45.217 dur=3835>
# <Dwell: start=2012:002:04:59:00.617 dur=18143>
# [('2012:001:12:00:00.000', '2012:001:15:50:03.040'),
# ('2012:001:16:07:27.515', '2012:001:16:38:01.240'),
# ('2012:001:17:27:28.615', '2012:001:18:20:30.215'),
# ('2012:001:18:41:12.515', '2012:002:02:49:27.017'),
# ('2012:002:02:58:11.817', '2012:002:03:04:39.267'),
# ('2012:002:03:23:45.217', '2012:002:04:27:39.742'),
# ('2012:002:04:59:00.617', '2012:002:10:01:23.118'),
# ('2012:002:10:38:21.218', '2012:002:12:00:00.000')]
@pytest.mark.skipif("not HAS_EVENTS")
def test_fetch_MSID_intervals():
"""
Show that fetching an MSID with start=<some intervals> is exactly the same as
fetching over the time range and selecting <some intervals>.
"""
# Interval with a bad quality point around 2012:175:02:10:021.981
start, stop = '2012:175:02:00:00', '2012:175:03:00:00'
for filter_bad in (True, False):
for stat in (None, '5min'):
dat = fetch.MSID('tephin', start, stop, filter_bad=filter_bad, stat=stat)
dat.select_intervals(kadi.events.dwells)
dat2 = fetch.MSID('tephin', kadi.events.dwells.intervals(start, stop),
filter_bad=filter_bad, stat=stat)
assert np.all(dat.bads == dat2.bads)
assert dat.colnames == dat2.colnames
for attr in dat.colnames:
assert np.all(getattr(dat, attr) == getattr(dat2, attr))
@pytest.mark.skipif("not HAS_EVENTS")
def test_fetch_MSIDset_intervals():
"""
Show that fetching an MSIDset with start=<some intervals> is exactly the same as
fetching over the time range and selecting <some intervals>.
"""
# Interval with a bad quality point around 2012:175:02:10:021.981
start, stop = '2012:175:02:00:00', '2012:175:03:00:00'
msids = ['tephin', 'aopcadmd']
for filter_bad in (True, False):
for stat in (None, '5min'):
dat = fetch.MSIDset(msids, start, stop, filter_bad=filter_bad, stat=stat)
for msid in msids:
dat[msid].select_intervals(kadi.events.dwells)
dat2 = fetch.MSIDset(msids, kadi.events.dwells.intervals(start, stop),
filter_bad=filter_bad, stat=stat)
for msid in msids:
dm = dat[msid]
dm2 = dat2[msid]
assert np.all(dm.bads == dm2.bads)
assert dm.colnames == dm2.colnames
for attr in dm.colnames:
assert np.all(getattr(dm, attr) == getattr(dm2, attr))
@pytest.mark.skipif("not HAS_EVENTS")
def test_select_remove_interval():
"""
Test basic select and remove intervals functionality. Do this with two
inputs: (1) a QueryEvent object, (2) a table with 'datestart' and 'datestop' cols.
The latter is obtained from kadi.events.dwells.intervals, but this is the same format
as the output from logical_intervals().
"""
start, stop = '2012:002:02:00:00', '2012:002:04:00:00'
dat = fetch.MSID('tephin', start, stop)
intervals = kadi.events.dwells.intervals(start, stop)
for filt in (kadi.events.dwells, intervals):
dat_r = dat.remove_intervals(filt, copy=True)
dat_s = dat.select_intervals(filt, copy=True)
assert len(dat) == len(dat_r) + len(dat_s)
assert len(dat) == 219
assert len(dat_r) == 51
assert len(dat_s) == 168
dates_r = DateTime(dat_r.times).date
assert dates_r[0] == '2012:002:02:49:39.317' # First after '2012:002:02:49:27.017'
assert dates_r[15] == '2012:002:02:57:51.317' # Gap '2012:002:02:58:11.817'
assert dates_r[16] == '2012:002:03:04:57.717' # to '2012:002:03:04:39.267'
assert dates_r[50] == '2012:002:03:23:32.917' # last before '2012:002:03:23:45.217'
assert set(dat_r.times).isdisjoint(dat_s.times)
@pytest.mark.skipif("not HAS_EVENTS")
def test_remove_subclassed_eventquery_interval():
"""
Test remove intervals functionality with an EventQuery subclass
(LttBadsEventQuery).
"""
start, stop = '2010:002:02:00:00', '2013:002:04:00:00'
dat = fetch.MSID('tephin', start, stop, stat='daily')
assert len(dat) == 1096
dat.remove_intervals(kadi.events.ltt_bads)
assert len(dat) == 1026
@pytest.mark.skipif("not HAS_EVENTS")
def test_remove_intervals_stat():
start, stop = '2012:002:12:00:00', '2012:003:12:00:00'
for stat in (None, '5min'):
intervals = kadi.events.dwells.intervals(start, stop)
for filt in (kadi.events.dwells, intervals):
dat = fetch.MSID('tephin', start, stop)
dat.remove_intervals(filt)
attrs = [attr for attr in ('vals', 'mins', 'maxes', 'means',
'p01s', 'p05s', 'p16s', 'p50s',
'p84s', 'p95s', 'p99s', 'midvals')
if hasattr(dat, attr)]
for attr in attrs:
assert len(dat) == len(getattr(dat, attr))
@pytest.mark.skipif("not HAS_EVENTS")
def test_select_remove_all_interval():
"""
Select or remove all data points via an event that entirely spans the MSID data.
"""
dat = fetch.Msid('tephin', '2012:001:20:00:00', '2012:001:21:00:00')
dat_r = dat.remove_intervals(kadi.events.dwells, copy=True)
dat_s = dat.select_intervals(kadi.events.dwells, copy=True)
assert len(dat) == 110
assert len(dat_r) == 0
assert len(dat_s) == 110
def test_msid_logical_intervals():
"""
Test MSID.logical_intervals()
"""
dat = fetch.Msid('aopcadmd', '2013:001:00:00:00', '2013:001:02:00:00')
# default complete_intervals=True
intervals = dat.logical_intervals('==', 'NPNT')
assert len(intervals) == 1
assert np.all(intervals['datestart'] == ['2013:001:01:03:37.032'])
assert np.all(intervals['datestop'] == ['2013:001:01:26:13.107'])
# Now with incomplete intervals on each end
intervals = dat.logical_intervals('==', 'NPNT', complete_intervals=False)
assert len(intervals) == 3
assert np.all(intervals['datestart'] == ['2012:366:23:59:59.932',
'2013:001:01:03:37.032',
'2013:001:01:59:06.233'])
assert np.all(intervals['datestop'] == ['2013:001:00:56:07.057',
'2013:001:01:26:13.107',
'2013:001:01:59:59.533'])
def test_util_logical_intervals():
"""
Test utils.logical_intervals()
"""
dat = fetch.Msidset(['3tscmove', 'aorwbias', 'coradmen'],
'2012:190:12:00:00', '2012:205:12:00:00')
dat.interpolate(32.8) # Sample MSIDs onto 32.8 second intervals (like 3TSCMOVE)
scs107 = ((dat['3tscmove'].vals == 'T')
& (dat['aorwbias'].vals == 'DISA')
& (dat['coradmen'].vals == 'DISA'))
scs107s = utils.logical_intervals(dat.times, scs107)
scs107s['duration'].format = '{:.1f}'
assert (scs107s['datestart', 'datestop', 'duration'].pformat() ==
[' datestart datestop duration',
'--------------------- --------------------- --------',
'2012:194:20:00:48.052 2012:194:20:04:37.652 229.6',
'2012:196:21:07:52.852 2012:196:21:11:42.452 229.6',
'2012:201:11:46:03.252 2012:201:11:49:52.852 229.6'])
def test_util_logical_intervals_gap():
"""
Test the max_gap functionality
"""
times = np.array([1, 2, 3, 200, 201, 202])
bools = np.ones(len(times), dtype=bool)
out = utils.logical_intervals(times, bools, complete_intervals=False, max_gap=10)
assert np.allclose(out['tstart'], [0.5, 197.5])
assert np.allclose(out['tstop'], [5.5, 202.5])
out = utils.logical_intervals(times, bools, complete_intervals=False)
assert np.allclose(out['tstart'], [0.5])
assert np.allclose(out['tstop'], [202.5])
def test_msid_state_intervals():
"""
Test MSID.state_intervals() - basic aliveness and regression test
"""
expected = [' datestart datestop val ',
'--------------------- --------------------- ----',
'2012:366:23:59:59.932 2013:001:00:56:07.057 NPNT',
'2013:001:00:56:07.057 2013:001:01:03:37.032 NMAN',
'2013:001:01:03:37.032 2013:001:01:26:13.107 NPNT',
'2013:001:01:26:13.107 2013:001:01:59:06.233 NMAN',
'2013:001:01:59:06.233 2013:001:01:59:59.533 NPNT']
dat = fetch.Msid('aopcadmd', '2013:001:00:00:00', '2013:001:02:00:00')
intervals = dat.state_intervals()['datestart', 'datestop', 'val']
assert intervals.pformat() == expected
intervals = utils.state_intervals(dat.times, dat.vals)['datestart', 'datestop', 'val']
assert intervals.pformat() == expected
```
#### File: engarchive/tests/test_sync.py
```python
import sys
import os
import pickle
import shutil
from pathlib import Path
import pytest
import numpy as np
import Ska.DBI
import tables
from Chandra.Time import DateTime
from .. import fetch
from .. import update_client_archive, update_server_sync
from ..utils import STATS_DT, set_fetch_basedir
pytestmark = pytest.mark.skipif(sys.maxsize <= 2 ** 32, reason="tests for 64-bit only")
# Covers safe mode and IRU swap activities around 2018:283. This is a time
# with rarely-seen telemetry.
START, STOP = '2018:281', '2018:293'
# Content types and associated MSIDs that will be tested
CONTENTS = {'acis4eng': ['1WRAT'], # [float]
'dp_pcad32': ['DP_SYS_MOM_TOT'], # Derived parameter [float]
'orbitephem0': ['ORBITEPHEM0_X'], # Heavily overlapped [float]
'cpe1eng': ['6GYRCT1', '6RATE1'], # Safe mode, [int, float]
'pcad13eng': ['ASPAGYC2A'], # PCAD subformat and rarely sampled [int]
'sim_mrg': ['3TSCMOVE', '3TSCPOS'], # [str, float]
'simcoor': ['SIM_Z_MOVED'], # [bool]
}
LOG_LEVEL = 50 # quiet
def make_linked_local_archive(outdir, content, msids):
"""
Create a hard-link version of archive containing only
necessary files. In this way the sync repo creation is done
for only required MSIDs.
:param content: content type
:param msids: list of MSIDs in that content type
:param outdir: temporary output directory
:return: None
"""
basedir_in = Path(fetch.msid_files.basedir) / 'data'
# Was: Path(os.environ['SKA']) / 'data' / 'eng_archive' / 'data' but that
# doesn't respect possible ENG_ARCHIVE override
basedir_out = Path(outdir) / 'data'
if basedir_out.exists():
shutil.rmtree(basedir_out)
(basedir_out / content).mkdir(parents=True)
(basedir_out / content / '5min').mkdir()
(basedir_out / content / 'daily').mkdir()
for file in 'archfiles.db3', 'colnames.pickle', 'TIME.h5':
shutil.copy(basedir_in / content / file,
basedir_out / content / file)
for msid in msids:
file = f'{msid}.h5'
try:
os.link(basedir_in / content / file,
basedir_out / content / file)
except OSError:
os.symlink(basedir_in / content / file,
basedir_out / content / file)
try:
os.link(basedir_in / content / '5min' / file,
basedir_out / content / '5min' / file)
except OSError:
os.symlink(basedir_in / content / '5min' / file,
basedir_out / content / '5min' / file)
try:
os.link(basedir_in / content / 'daily' / file,
basedir_out / content / 'daily' / file)
except OSError:
os.symlink(basedir_in / content / 'daily' / file,
basedir_out / content / 'daily' / file)
def make_sync_repo(outdir, content):
"""Create a new sync repository with data root ``outdir`` (which is
assumed to be clean).
This also assumes the correct fetch.msid_files.basedir is set to point at
a copy of the cheta archfile that contains requisite data.
"""
date_start = (DateTime(START) - 8).date
date_stop = (DateTime(STOP) + 2).date
print(f'Updating server sync for {content}')
args = [f'--sync-root={outdir}',
f'--date-start={date_start}',
f'--date-stop={date_stop}',
f'--log-level={LOG_LEVEL}',
f'--content={content}']
update_server_sync.main(args)
def make_stub_archfiles(date, basedir_ref, basedir_stub):
archfiles_def = (Path(fetch.__file__).parent / 'archfiles_def.sql').read_text()
with set_fetch_basedir(basedir_ref):
filename = fetch.msid_files['archfiles'].abs
with Ska.DBI.DBI(dbi='sqlite', server=filename) as db:
filetime = DateTime(date).secs
# Last archfile that starts before date.
last_row = db.fetchone(f'select * from archfiles '
f'where filetime < {filetime} '
f'order by filetime desc'
)
with set_fetch_basedir(basedir_stub):
filename = fetch.msid_files['archfiles'].abs
if os.path.exists(filename):
os.unlink(filename)
with Ska.DBI.DBI(dbi='sqlite', server=filename, autocommit=False) as db:
db.execute(archfiles_def)
db.insert(last_row, 'archfiles')
db.commit()
return last_row['rowstart'], last_row['rowstop']
def make_stub_stats_col(msid, stat, row1, basedir_ref, basedir_stub, date_stop):
# Max allowed tstop.
tstop = DateTime(date_stop).secs
with set_fetch_basedir(basedir_ref):
fetch.ft['msid'] = 'TIME'
file_time = fetch.msid_files['msid'].abs
fetch.ft['msid'] = msid
fetch.ft['interval'] = stat
file_stats_ref = fetch.msid_files['stats'].abs
if not Path(file_stats_ref).exists():
return
with set_fetch_basedir(basedir_stub):
file_stats_stub = fetch.msid_files['stats'].abs
with tables.open_file(file_time, 'r') as h5:
# Pad out tstop by DT in order to be sure that all records before a long
# gap get found. This comes into play for pcad13eng which is in PCAD subformat only.
# In addition, do not select data beyond tstop (date_stop), which is the
# stub file end time. This is mostly for ephemeris data, where one archfile covers
# many weeks (6?) of data.
tstop = min(h5.root.data[row1 - 1] + STATS_DT[stat], tstop)
# Need at least 10 days of real values in stub file to start sync
tstart = tstop - 10 * 86400
with tables.open_file(file_stats_ref, 'r') as h5:
tbl = h5.root.data
times = (tbl.col('index') + 0.5) * STATS_DT[stat]
stat_row0, stat_row1 = np.searchsorted(times, [tstart, tstop])
# Back up a bit to ensure getting something since an MSID that is not
# typically sampled (because of subformat for instance) may show up
# in full data with quality=True everywhere
# and thus have no stats samples.
stat_row0 -= 5
tbl_rows = np.zeros(stat_row1, dtype=tbl.dtype)
tbl_rows[stat_row0:stat_row1] = tbl[stat_row0:stat_row1]
# returns np.ndarray (structured array)
Path(file_stats_stub).parent.mkdir(exist_ok=True, parents=True)
filters = tables.Filters(complevel=5, complib='zlib')
with tables.open_file(file_stats_stub, mode='a', filters=filters) as stats:
stats.create_table(stats.root, 'data', tbl_rows,
f'{stat} sampling', expectedrows=1e5)
stats.root.data.flush()
def make_stub_h5_col(msid, row0, row1, basedir_ref, basedir_stub):
fetch.ft['msid'] = msid
with set_fetch_basedir(basedir_ref):
file_ref = fetch.msid_files['data'].abs
if not Path(file_ref).exists():
return
with tables.open_file(file_ref, 'r') as h5:
data_stub = h5.root.data[row0:row1]
qual_stub = h5.root.quality[row0:row1]
n_rows = len(h5.root.data)
data_fill = np.zeros(row0, dtype=data_stub.dtype)
qual_fill = np.ones(row0, dtype=qual_stub.dtype) # True => bad
with set_fetch_basedir(basedir_stub):
file_stub = fetch.msid_files['data'].abs
if os.path.exists(file_stub):
os.unlink(file_stub)
filters = tables.Filters(complevel=5, complib='zlib')
with tables.open_file(file_stub, mode='w', filters=filters) as h5:
h5shape = (0,) + data_stub.shape[1:]
h5type = tables.Atom.from_dtype(data_stub.dtype)
h5.create_earray(h5.root, 'data', h5type, h5shape, title=msid,
expectedrows=n_rows)
h5.create_earray(h5.root, 'quality', tables.BoolAtom(), (0,), title='Quality',
expectedrows=n_rows)
with tables.open_file(file_stub, mode='a') as h5:
h5.root.data.append(data_fill)
h5.root.data.append(data_stub)
h5.root.quality.append(qual_fill)
h5.root.quality.append(qual_stub)
def make_stub_colnames(basedir_ref, basedir_stub):
"""
Copy colnames.pickle to the stub dir. Also get the list of MSIDs that are
actually in the reference archive.
"""
with set_fetch_basedir(basedir_ref):
file_ref = fetch.msid_files['colnames'].abs
with set_fetch_basedir(basedir_stub):
file_stub = fetch.msid_files['colnames'].abs
shutil.copy(file_ref, file_stub)
with open(file_stub, 'rb') as fh:
msids = pickle.load(fh)
return msids
def make_stub_content(content=None, date=None,
basedir_ref=None, basedir_stub=None,
msids=None, msids_5min=None, msids_daily=None):
# If no content then require msids has been passed
if content is None:
content = fetch.content[msids[0].upper()]
for msid in msids:
assert fetch.content[msid.upper()] == content
print(f'Making stub archive for {content}')
fetch.ft['content'] = content
with set_fetch_basedir(basedir_stub):
dirname = Path(fetch.msid_files['contentdir'].abs)
if dirname.exists():
shutil.rmtree(dirname)
dirname.mkdir(parents=True)
row0, row1 = make_stub_archfiles(date, basedir_ref, basedir_stub)
msids_ref = make_stub_colnames(basedir_ref, basedir_stub)
if msids is None:
msids = msids_ref
msids = msids.copy()
if 'TIME' not in msids:
msids.append('TIME')
if msids_5min is None:
msids_5min = msids
if msids_daily is None:
msids_daily = msids
for msid in msids:
make_stub_h5_col(msid, row0, row1, basedir_ref, basedir_stub)
for msid in msids_5min:
make_stub_stats_col(msid, '5min', row1, basedir_ref, basedir_stub, date)
for msid in msids_daily:
make_stub_stats_col(msid, 'daily', row1, basedir_ref, basedir_stub, date)
def check_content(outdir, content, msids=None):
outdir = Path(outdir)
if outdir.exists():
shutil.rmtree(outdir)
print()
print(f'Test dir: {outdir}')
if msids is None:
msids = CONTENTS[content]
basedir_ref = outdir / 'orig'
basedir_test = outdir / 'test'
basedir_ref.mkdir(parents=True)
basedir_test.mkdir(parents=True)
# Make a local hard-link copy of select parts (content and msids) of the
# "official" cheta archive data (nominally $SKA/data/engarchive) in basedir_ref.
# This hard-link repo servers as the source for making the sync repo so this
# is faster/lighter.
make_linked_local_archive(basedir_ref, content, msids)
# Make the sync repo, using basedir_ref as input data and outputting the
# sync/ dir to basedir_test.
with set_fetch_basedir(basedir_ref):
make_sync_repo(basedir_test, content)
# Make stubs of archive content, meaning filled with mostly zeros until about
# before before test start date, then some real data to get the sync'ing going.
make_stub_content(content,
date=DateTime(START) - 2,
basedir_stub=basedir_test,
basedir_ref=basedir_ref,
msids=msids)
date_stop = (DateTime(STOP) + 2).date
print(f'Updating client archive {content}')
with set_fetch_basedir(basedir_test):
update_client_archive.main([f'--content={content}',
f'--log-level={LOG_LEVEL}',
f'--date-stop={date_stop}',
f'--data-root={basedir_test}',
f'--sync-root={basedir_test}'])
print(f'Checking {content} {msids}')
for stat in None, '5min', 'daily':
for msid in msids:
fetch.times_cache['key'] = None
with set_fetch_basedir(basedir_test):
dat_stub = fetch.Msid(msid, START, STOP, stat=stat)
fetch.times_cache['key'] = None
with set_fetch_basedir(basedir_ref):
dat_orig = fetch.Msid(msid, START, STOP, stat=stat)
for attr in dat_orig.colnames:
assert np.all(getattr(dat_stub, attr) == getattr(dat_orig, attr))
@pytest.mark.parametrize('content', list(CONTENTS))
def test_sync(tmpdir, content):
check_content(tmpdir, content)
# Clean up if test successful (otherwise check_content raises)
if Path(tmpdir).exists():
shutil.rmtree(tmpdir)
``` |
{
"source": "jmoehler/CityDistance",
"score": 4
} |
#### File: jmoehler/CityDistance/distanceCalc.py
```python
from math import cos, acos, pi, sqrt, sin
class City:
def __init__(self, name, lat, lon, temp):
self.name = name
self.lat = lat
self.lon = lon
self.temp = temp
def describe(self):
print("Die Koordinaten von %s sind %f Lat und %f Lon. Die Temperatur beträgt %.1f Grad Celcius." %(self.name, self.lat, self.lon, self.temp))
def info(self):
print("Die Koordinaten von %s sind %.1f Lat und %.1f Lon." %(self.name, self.lat, self.lon))
def __str__(self):
return "Die Koordinaten von %s sind %.1f Lat und %.1f Lon." %(self.name, self.lat, self.lon)
def __repr__(self):
return self.__str__()
def diffGamma(alpha, beta):
# differenz von winkel alpha zu Winkel beta
dGamma = alpha - beta
dGammaRad = pi / 180 * dGamma
# Erdradius in km
r = 6378
# länge auf lat berechnen
return r*sqrt(2*(1-cos(dGammaRad)))
def distanceBase(dilat, dilon):
# insgesammte länge berechnen
return sqrt(dilon**2 + dilat**2)
def distance(city1, city2):
dilat = diffGamma(city1.lat, city2.lat)
dilon = diffGamma(city1.lon, city2.lon)
return distanceBase(dilat, dilon)
def tempDiff(city1, city2):
return abs(city1.temp - city2.temp)
```
#### File: jmoehler/CityDistance/distanceDict.py
```python
from distance import distanceCalc
from City import City
distDict = {}
def calcOnce(alleCities):
leng = len(alleCities)
for i in range(0,leng):
for j in range(0, leng):
c1 = alleCities[i]
c2 = alleCities[j]
dist = distanceCalc(c1, c2)
nameCon = c1.name + c2.name
distDict[nameCon] = dist
def distance(nameA, nameB):
nameCon = nameA.name + nameB.name
distDicta = distDict[nameCon]
return distDicta
``` |
{
"source": "jmohr/conrad",
"score": 3
} |
#### File: conrad/adapter/base.py
```python
import logging
import pyodbc
from abc import ABCMeta, abstractmethod, abstractproperty
from conrad.utils import plural
logger = logging.getLogger(__name__)
class Base(object):
"""
This is a base DB adapter, for a database which implements the
Python DBAPI2.0 spec. Much more testing needs to be done with this,
as it currently has only been tested with the ODBC subclass. In theory,
though, you should be able to create your own adapter by subclassing
this, and defining the connect() method for whatever database you
are trying to connect to. Just have it set self.cursor and
self.connection, and you should be good to go. You can override
any of the other methods if your database is non-standard or
if the module doesn't fully implement DBAPI2.0.
"""
__metaclass__ = ABCMeta
def __init__(self, *args, **kwargs):
logger.debug('Initializing Base DB adapter')
if not (args or kwargs):
logger.debug('No args or kwargs defined')
else:
logger.debug('Calling connect with args: {} and kwargs: {}'.format(
args, kwargs))
self.connect(*args, **kwargs)
@abstractmethod
def connect(self, *args, **kwargs):
return
@abstractmethod
def find(self, resource, conditions={}):
return
@abstractmethod
def create(self, resource, attributes={}):
return
@abstractmethod
def update(self, resource, attributes={}, conditions={}):
return
@abstractmethod
def delete(self, resource, conditions={}):
return
@classmethod
def result_dict(cls, results):
return dict(results)
```
#### File: units/adapter/test_base_adapter.py
```python
from conrad.adapter import Base
class TestBase(object):
def test_methods(self):
for method in ['connect', 'find', 'update', 'create',
'delete', 'result_dict']:
assert hasattr(Base, method), 'Base is missing method {}'.format(
method)
def test_result_dict(self):
test_tuple = (('one', 111), ('two', 222), ('three', 333))
d = Base.result_dict(test_tuple)
for k, v in test_tuple:
assert d.has_key(k), 'result is missing key "{}"'.format(k)
assert d[k] == v, 'result[{}] is not {}'.format(k, v)
```
#### File: test/units/test_condition.py
```python
from conrad import query
class TestCondition(object):
def test_attrs(self):
for module_attr in ['gt', 'lt', 'gte', 'lte']:
errmsg = 'condition does not have attr {}'.format(module_attr)
assert hasattr(query, module_attr), errmsg
assert hasattr(query.Condition, 'statement')
class TestGreaterThan(object):
def test_class(self):
print query.lt
print query.Condition
assert issubclass(query.lt, query.Condition)
def test_sqlgen(self):
gt = query.gt(5)
assert gt.statement == '{name} > {placeholder}'
assert 5 == gt.variable
class TestLessThan(object):
def test_sqlgen(self):
lt = query.lt(5)
assert lt.statement == '{name} < {placeholder}'
assert 5 == lt.variable
class TestGreaterThanOrEqualTo(object):
def test_sqlgen(self):
gte = query.gte(5)
assert gte.statement == '{name} >= {placeholder}'
assert 5 == gte.variable
class TestLessThanOrEqualTo(object):
def test_sqlgen(self):
lte = query.lte(5)
assert lte.statement == '{name} <= {placeholder}'
assert 5 == lte.variable
class TestConditionFilterIntegration(object):
def test_filtering(self):
q = query.Select('test_table').filter(name=query.lte(25))
assert q.variables == [25], 'q.variables is {}, should be [25]'.format(q.variables)
assert 'name <= ?' in q.statement
def test_filter_chaining(self):
q = query.Select('test_table').filter(name=query.gt('foobar')).filter(age=query.lte(33))
assert 33 in q.variables, q.variables
assert 'foobar' in q.variables
assert 'name > ?' in q.statement
assert 'age <= ?' in q.statement
``` |
{
"source": "jmoiron/daneel",
"score": 3
} |
#### File: daneel/plugins/weblib.py
```python
import re
import requests
import urlparse
import traceback
import json
from lxml.cssselect import CSSSelector as cs
from lxml.html import document_fromstring
from daneel import utils
ua = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.43 Safari/537.31"
def text(selector, html):
res = cs(selector)(html)
if not res:
return ""
if res and len(res) == 1:
return res[0].text_content().strip()
res = map(lambda x: x.text_content().strip(), res)
return "".join(res)
def first(selector, html):
res = cs(selector)(html)
if not res or not len(res):
return None
return res[0]
def get_summary(url):
"""Get a summary for a url."""
if "yelp.com" in url:
return "Yelp is not supported because they IP blocked the server daneel runs on."
page = requests.get(url, headers={'User-Agent': ua})
try:
content = page.text
except:
content = page.content
content = utils.utf8_damnit(content)
return summarize(content, url)
def summarize(content, url=""):
"""Return a summary for an html document. If a URL is passed, it may be
treated specially to give better results, eg. twitter will return the tweet."""
html = document_fromstring(content)
if url:
parsed = urlparse.urlparse(url)
if parsed.netloc.endswith("twitter.com") and "status" in url:
tweet = text(".permalink-tweet .tweet-text", html)
try:
username = cs(".permalink-tweet")(html)[0].attrib["data-screen-name"]
return "@%s: %s" % (username, tweet)
except:
return tweet
# try to return opengraph description or title first, then just the <title>
ogdesc = first("meta[property=\"og:description\"]", html)
if ogdesc:
return utils.maxlen(ogdesc.attrib["content"])
ogtitle = first("meta[property=\"og:title\"]", html)
if ogtitle:
return utils.maxlen(ogtitle.attrib["content"])
return text("title", html)
def shorten_url(url):
gurl = "https://www.googleapis.com/urlshortener/v1/url"
data = json.dumps({"longUrl": url})
resp = requests.post(gurl, data, headers={"Content-Type": "application/json"})
if resp.json and "id" in resp.json:
return resp.json["id"]
return None
url_re = re.compile(r"(https?://[^ ]+)")
def find_urls(message):
"""Finds urls in a message. Returns a list of URLs, or an empty list
if none are found. Only looks for http & https schemes."""
urls = url_re.findall(message)
return [url.rstrip(',') for url in urls]
``` |
{
"source": "jmojoo/MLMO",
"score": 3
} |
#### File: MLMO/utils/image_dataset.py
```python
import os
import cv2
try:
from PIL import Image
available = True
except ImportError as e:
available = False
_import_error = e
import six
import numpy as np
from chainercv import transforms
import chainer
from chainer.dataset import dataset_mixin
def _read_image_as_array(path, dtype):
image = cv2.imread(path)
return image
def _postprocess_image(image):
if image.ndim == 2:
# image is greyscale
image = image[..., None]
return image
class Transform(object):
def __init__(self, args, augment=False, ensemble=False):
self.extractor = args.extractor
self.augment = augment
self.dataset = args.dataset
self.ensemble = ensemble
if 'alexnet' in self.extractor:
self.size = (227, 227)
elif 'darknet-19' in self.extractor:
self.size = (320, 320)
elif 'vgg16' in self.extractor:
self.size = (224, 224)
def __call__(self, in_data):
img = in_data
img = img.astype(np.float32)
# if img.max() > 1:
# img /= 255.0
if img.shape[0] in (1, 3):
img = np.transpose(img, (1, 2, 0))
if img.shape[2] == 1:
img = np.broadcast_to(img, (img.shape[0], img.shape[1], 3))
if 'alexnet' in self.extractor or 'vgg16' in self.extractor:
img = cv2.resize(img, (256, 256))
if 'alexnet' in self.extractor:
if img.max() <= 1:
img *= 255.0
# img = img[:, :, ::-1] # RGB -> BGR # unnecessary if loaded using opencv
mean_bgr = np.array([104, 117, 123], dtype=np.float32)
img -= mean_bgr
elif 'darknet-19' in self.extractor:
if img.max() > 1:
img /= 255.0
elif 'vgg16' in self.extractor:
if img.max() <= 1:
img *= 255.0
mean_bgr = np.array([103.939, 116.779, 123.68], dtype=np.float32)
img -= mean_bgr
if img.shape[2] == 3:
img = np.transpose(img, (2, 0, 1))
augment = chainer.global_config.train and self.augment
if augment:
if np.random.randint(2):
img = transforms.random_flip(
img, x_random=True, y_random=False)
if self.ensemble:
img = transforms.ten_crop(img, self.size)
else:
img = transforms.resize(img, self.size)
#
return img
class ImageDataset(dataset_mixin.DatasetMixin):
"""Dataset of images built from a list of paths to image files.
This dataset reads an external image file on every call of the
:meth:`__getitem__` operator. The paths to the image to retrieve is given
as either a list of strings or a text file that contains paths in distinct
lines.
Each image is automatically converted to arrays of shape
``channels, height, width``, where ``channels`` represents the number of
channels in each pixel (e.g., 1 for grey-scale images, and 3 for RGB-color
images).
.. note::
**This dataset requires the Pillow package being installed.** In order
to use this dataset, install Pillow (e.g. by using the command ``pip
install Pillow``). Be careful to prepare appropriate libraries for image
formats you want to use (e.g. libpng for PNG images, and libjpeg for JPG
images).
.. warning::
**You are responsible for preprocessing the images before feeding them
to a model.** For example, if your dataset contains both RGB and
grayscale images, make sure that you convert them to the same format.
Otherwise you will get errors because the input dimensions are different
for RGB and grayscale images.
Args:
paths (str or list of strs): If it is a string, it is a path to a text
file that contains paths to images in distinct lines. If it is a
list of paths, the ``i``-th element represents the path to the
``i``-th image. In both cases, each path is a relative one from the
root path given by another argument.
root (str): Root directory to retrieve images from.
dtype: Data type of resulting image arrays. ``chainer.config.dtype`` is
used by default (see :ref:`configuration`).
"""
def __init__(self, paths, root='.', dtype=None):
_check_pillow_availability()
if isinstance(paths, six.string_types):
with open(paths) as paths_file:
paths = [path.strip() for path in paths_file]
self._paths = paths
self._root = root
self._dtype = chainer.get_dtype(dtype)
def __len__(self):
return len(self._paths)
def get_path(self, i):
return self._paths[i]
def get_example(self, i):
path = os.path.join(self._root, self._paths[i])
image = _read_image_as_array(path, self._dtype)
return _postprocess_image(image)
def _check_pillow_availability():
if not available:
raise ImportError('PIL cannot be loaded. Install Pillow!\n'
'The actual import error is as follows:\n' +
str(_import_error))
``` |
{
"source": "jmoldon/hcg_hi_pipeline",
"score": 2
} |
#### File: jmoldon/hcg_hi_pipeline/clean_image.py
```python
import imp, numpy, glob, shutil
imp.load_source('common_functions','common_functions.py')
import common_functions as cf
def noise_est(config,logger):
"""
Makes an estimate of the theortically expected noise level for each science target.
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
Output:
noise = Estimate of the theortical noise in Jy/beam. (List of Floats)
"""
logger.info('Starting making noise estimation.')
targets = config['calibration']['target_names'][:]
calib = config['calibration']
if calib['mosaic']:
targets = list(set(calib['target_names']))
cln_param = config['clean']
src_dir = config['global']['src_dir']+'/'
noise = []
for target in targets:
msmd.open(src_dir+target+'.split.contsub')
N = msmd.nantennas()
t_int = msmd.effexposuretime()['value']
t_unit = msmd.effexposuretime()['unit']
if t_unit != 's' and 'sec' not in t_unit:
logger.warning('Integration time units are not in seconds. Estimated noise may be incorrect.')
ch_wid = numpy.mean(msmd.chanwidths(0))
#Note: The above line may cause issues if different spectral windows
#have very difference frequency resolutions
corr_eff = cln_param['corr_eff']
SEFD = cln_param['sefd']
N_pol = 2.
f_smo = 1.
if config_raw.has_option('importdata','hanning'):
if config['importdata']['hanning']:
if not config['importdata']['mstransform']:
f_smo = 8./3.
else:
if not config_raw.has_option('importdata','chanavg'):
f_smo = 8./3.
else:
Nchan = float(config['importdata']['chanavg'])
if Nchan > 1.:
f_smo = Nchan/((Nchan-2.) + 2.*(9./16.) + 2.*(1./16.))
else:
f_smo = 8./3.
noise.append(SEFD/(corr_eff*numpy.sqrt(f_smo*N_pol*N*(N-1.)*t_int*ch_wid)))
logger.info('Effective integration time for {0}: {1} {2}'.format(target,int(t_int),msmd.effexposuretime()['unit']))
logger.info('Expected rms noise for {0}: {1} Jy/beam'.format(target,SEFD/(corr_eff*numpy.sqrt(f_smo*N_pol*N*(N-1.)*t_int*ch_wid))))
msmd.close()
logger.info('Completed making noise estimation.')
return noise
def image(config,config_raw,config_file,logger):
"""
Generates a clean (continuum subtracted) image of each science target.
Checks that the CLEANing scales and line emission channels are set (may prompt user if in interactive mode).
Makes varies check on the ratio of pixel size to beam size and the scales and the maximum baseline (may prompt user if in interactive mode).
Exports the final images as fits cubes (after regridding to J2000 if necessary).
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
config_raw = The instance of the parser.
config_file = Path to configuration file. (String)
"""
noises = noise_est(config,logger)
cln_param = config['clean']
if config_raw.has_option('clean','noise'):
noises = cln_param['noise'][:]
logger.info('Noise level(s) set manually as {0} Jy.'.format(noises))
calib = config['calibration']
contsub = config['continuum_subtraction']
rest_freq = config['global']['rest_freq']
targets = calib['target_names'][:]
fields = calib['targets'][:]
for i in range(len(targets)):
target = targets[i]
if 'spw' in target:
inx = target.index('.spw')
target_name = target[:inx]
if target_name in calib['target_names'][i-1]:
fields.insert(i,fields[i-1])
if calib['mosaic']:
targets = list(set(targets))
src_dir = config['global']['src_dir']+'/'
img_dir = config['global']['img_dir']+'/'
cf.makedir('./'+img_dir,logger)
logger.info('Starting generation of clean image(s).')
reset_cln = False
reset_cln = False
if len(cln_param['line_ch']) == 0 or len(cln_param['line_ch']) != len(targets):
if not interactive:
logger.critical('The number of line channel ranges provided does not match the number of targets.')
logger.info('Pixel sizes: {}'.format(cln_param['line_ch']))
logger.info('Targets: {}'.format(targets))
sys.exit(-1)
reset_cln = True
if len(cln_param['line_ch']) < len(targets):
logger.warning('There are more target fields than channel ranges. Appending blank ranges.')
while len(cln_param['line_ch']) < len(targets):
cln_param['line_ch'].append('')
elif len(cln_param['line_ch']) > len(targets):
logger.warning('There are more channel ranges than target fields.')
logger.info('Current channel ranges: {}'.format(cln_param['line_ch']))
logger.warning('The channel range list will now be truncated to match the number of targets.')
cln_param['line_ch'] = cln_param['line_ch'][:len(targets)]
elif interactive:
print('Current image channels set as:')
for i in range(len(cln_param['line_ch'])):
print('{0}: {1}'.format(targets[i],cln_param['line_ch'][i]))
resp = str(raw_input('Do you want revise the channels that will be imaged (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('For each target enter the channels you want to image in the following format:\nspwID:min_ch~max_ch')
for i in range(len(targets)):
print('Note: The continuum channels for this target were set to: {}'.format(contsub['linefree_ch'][i]))
cln_param['line_ch'][i] = cf.uinput('Channels to image for {}: '.format(targets[i]), cln_param['line_ch'][i])
logger.info('Setting image channels for {0} as: {1}.'.format(targets[i], cln_param['line_ch'][i]))
logger.info('Updating config file to set channels to be imaged.')
config_raw.set('clean','line_ch',cln_param['line_ch'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Line emission channels set as: {}.'.format(cln_param['line_ch']))
logger.info('For the targets: {}.'.format(targets))
if numpy.any(cln_param['multiscale']):
algorithm = 'multiscale'
logger.info('Setting CLEAN algorithm to MS-CLEAN.')
if not numpy.all(cln_param['multiscale']):
logger.info('However, some targets will still use Hogbom CLEAN.')
reset_cln = False
if cln_param['beam_scales'] == []:
reset_cln = True
logger.warning('MS-CLEAN scales not set.')
elif 0 not in cln_param['beam_scales']:
logger.warning('MS-CLEAN scales do not include point sources. This is highly recommended.')
if interactive:
resp = str(raw_input('Do you want revise MS-CLEAN scales (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
else:
logger.info('Adding point source to MS-CLEAN scales.')
cln_param['beam_scales'].append(0)
reset_cln = True
if reset_cln:
if interactive:
print('Current scales set to: {} beam diameters.'.format(cln_param['beam_scales']))
cln_param['beam_scales'] = cf.uinput('Enter new scales: ', cln_param['beam_scales'])
logger.info('Setting MS-CLEAN scales as {} beams.'.format(cln_param['beam_scales']))
logger.info('Updating config file to set MS-CLEAN scales.')
config_raw.set('clean','scales',cln_param['beam_scales'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
reset_cln = False
scales = cln_param['beam_scales']
else:
algorithm = 'hogbom'
logger.info('Setting CLEAN algorithm to Hogbom.')
scales = None
for i in range(len(targets)):
target = targets[i]
field = fields[i]
if numpy.all(cln_param['multiscale']):
ms_clean = True
algorithm = 'multiscale'
elif type(cln_param['multiscale']) != type(True):
ms_clean = cln_param['multiscale'][i]
if ms_clean:
algorithm = 'multiscale'
else:
algorithm = 'hogbom'
else:
ms_clean = False
algorithm = 'hogbom'
logger.info('Starting {} image.'.format(target))
reset_cln = False
ia.open(img_dir+target+'.dirty.image')
rest_beam = ia.restoringbeam()
ia.close()
if rest_beam['minor']['unit'] not in cln_param['pix_size'][i]:
logger.error('The pixel size and beam size have diffent units.')
if ms_clean:
logger.error('MS-CLEAN scales will likely be incorrect.')
logger.info('Pixel size: {}'.format(cln_param['pix_size'][i]))
logger.info('Beam size units: {}'.format(rest_beam['minor']['unit']))
pix_size = cln_param['pix_size'][i]
pix_size = float(pix_size[:pix_size.find(rest_beam['minor']['unit'])])
if pix_size > 0.2*rest_beam['minor']['value']:
logger.warning('There are fewer than 5 pixels across the beam minor axis. Consider decreasing the pixel size.')
if interactive:
print('Beam dimensions:')
print('Major: {0:.2f} {1}'.format(rest_beam['major']['value'],rest_beam['major']['unit']))
print('Minor: {0:.2f} {1}'.format(rest_beam['minor']['value'],rest_beam['minor']['unit']))
print('Pixel size: {}'.format(cln_param['pix_size']))
resp = str(raw_input('Do you want revise the pixel size (y/n): '))
if resp.lower() in ['yes','ye','y']:
reset_cln = True
if reset_cln and interactive:
print('Enter the desired pixel size:')
cln_param['pix_size'][i] = cf.uinput('Pixel size for {}: '.format(target), cln_param['pix_size'][i])
logger.info('Setting pixel size for {0} as: {1}.'.format(target, cln_param['pix_size'][i]))
resp = str(raw_input('Would you also like to revise the image size: '))
if resp.lower() in ['yes','ye','y']:
cln_param['im_size'][i] = cf.uinput('Image size for {}: '.format(target), cln_param['im_size'][i])
logger.info('Setting image size for {0} as: {1}.'.format(target, cln_param['im_size'][i]))
logger.info('Updating config file to set pixel (image) size.')
config_raw.set('clean','pix_size',cln_param['pix_size'])
config_raw.set('clean','im_size',cln_param['im_size'])
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
reset_cln = False
if cln_param['automask_sl'] == '':
cln_param['automask_sl'] == 2.0
logger.warning('Automasking sidelobe threshold not set. Using default value: {}'.format(cln_param['automask_sl']))
if cln_param['automask_ns'] == '':
cln_param['automask_ns'] == 4.25
logger.warning('Automasking noise threshold not set. Using default value: {}'.format(cln_param['automask_ns']))
if cln_param['automask_lns'] == '':
cln_param['automask_lns'] == 1.5
logger.warning('Automasking low noise threshold not set. Using default value: {}'.format(cln_param['automask_lns']))
if cln_param['automask_mbf'] == '':
cln_param['automask_mbf'] == 0.3
logger.warning('Automasking minimum beam fraction not set. Using default value: {}'.format(cln_param['automask_mbf']))
if cln_param['automask_neg'] == '':
cln_param['automask_neg'] == 15.0
logger.warning('Automasking negative threshold not set. Using default value: {}'.format(cln_param['automask_neg']))
logger.info('Automasking parameters set as:')
logger.info('sidelobethreshold = {}'.format(cln_param['automask_sl']))
logger.info('noisethreshold = {}'.format(cln_param['automask_ns']))
logger.info('lownoisethreshold = {}'.format(cln_param['automask_lns']))
logger.info('minbeamfraction = {}'.format(cln_param['automask_mbf']))
logger.info('negativethreshold = {}'.format(cln_param['automask_neg']))
if ms_clean:
pix_size = cln_param['pix_size'][i]
pix_size = float(pix_size[:pix_size.find(rest_beam['minor']['unit'])])
pix_per_beam = rest_beam['major']['value']/pix_size
scales = cln_param['beam_scales']
scales = list(numpy.array(numpy.array(scales)*pix_per_beam,dtype='int'))
B_min = au.getBaselineLengths('{0}{1}.split.contsub'.format(src_dir,target), sort=True)[0][1]
msmd.open('{0}{1}.split.contsub'.format(src_dir,target))
spws = msmd.spwsforfield(field)
f_min = None
for spw in spws:
if f_min == None or f_min > min(msmd.chanfreqs(spw=spw,unit='Hz')):
f_min = min(msmd.chanfreqs(spw=spw,unit='Hz'))
msmd.close()
max_scale = 180.*3600.*299792458./(1.2*numpy.pi*f_min*B_min)
logger.info('The maximum recoverable scale for {0} is {1} arcsec.'.format(target,int(max_scale)))
if 'arcsec' not in cln_param['pix_size'][i]:
logger.warning('Pixel size not in arcsec. Maximum scale not checked.')
else:
pix_size = cln_param['pix_size'][i]
pix_size = float(pix_size[:pix_size.find('arcsec')])
if max(scales)*pix_size > max_scale:
logger.warning('Some MS-CLEAN scale(s) is (are) larger than largest recoverable angular scales.')
logger.info('Removing offending scales.')
scales = list(set(numpy.where(numpy.array(scales)*pix_size <= max_scale,scales,0)))
logger.info('CLEANing with scales of {} pixels.'.format(scales))
logger.info('CLEANing {0} to a threshold of {1} Jy.'.format(target,noises[i]*cln_param['thresh']))
if cln_param['automask']:
mask = 'auto-multithresh'
else:
mask = 'pb'
gridder = 'wproject'
if calib['mosaic']:
for target_name in targets:
inx = [j for j in range(len(calib['target_names'])) if target_name in calib['target_names'][j]]
fields = numpy.array(calib['targets'],dtype='str')[inx]
field = ','.join(fields)
gridder = 'mosaic'
command = "tclean(vis='{0}{1}'+'.split.contsub', field='{2}', spw='{3}', imagename='{4}{1}', cell='{5}', imsize=[{6},{6}], specmode='cube', outframe='bary', veltype='radio', restfreq='{7}', gridder='{8}', wprojplanes=-1, pblimit=0.1, normtype='flatnoise', deconvolver='{9}', scales={10}, restoringbeam='common', pbcor=True, weighting='briggs', robust={11}, niter=100000, gain=0.1, threshold='{12}Jy', usemask='{13}', phasecenter='{14}', sidelobethreshold={15}, noisethreshold={16}, lownoisethreshold={17}, minbeamfrac={18}, negativethreshold={19}, cyclefactor=2.0,interactive=False)".format(src_dir,target,field,cln_param['line_ch'][i],img_dir,cln_param['pix_size'][i],cln_param['im_size'][i],rest_freq,gridder,algorithm,scales,cln_param['robust'],noises[i]*cln_param['thresh'],mask,cln_param['phasecenter'],cln_param['automask_sl'],cln_param['automask_ns'],cln_param['automask_lns'],cln_param['automask_mbf'],cln_param['automask_neg'])
logger.info('Executing command: '+command)
exec(command)
cf.check_casalog(config,config_raw,logger,casalog)
logger.info('CLEANing finished. Image cube saved as {}.'.format(target+'.image'))
ia.open(img_dir+target+'.dirty.image')
coords = ia.coordsys()
coord_chn = False
if 'J2000' not in coords.referencecode()[0]:
coord_chn = True
logger.info('Coordinate system not J2000. Image will be regridded.')
command = "imregrid(imagename='{0}{1}'+'.image', template='J2000', output='{0}{1}'+'.image.J2000', asvelocity=True, interpolation='linear', decimate=10, overwrite=True)".format(img_dir,target)
logger.info('Executing command: '+command)
exec(command)
cf.check_casalog(config,config_raw,logger,casalog)
logger.info('{} regridded in J2000 coordinates.'.format(target+'.image.J2000'))
command = "imregrid(imagename='{0}{1}'+'.image.pbcor', template='J2000', output='{0}{1}'+'.image.pbcor.J2000', asvelocity=True, interpolation='linear', decimate=10, overwrite=True)".format(img_dir,target)
logger.info('Executing command: '+command)
exec(command)
cf.check_casalog(config,config_raw,logger,casalog)
logger.info('{} regridded in J2000 coordinates.'.format(target+'.image.pbcor.J2000'))
coords.done()
ia.close()
fitsname = target+'_HI.fits'
logger.info('Saving image cube as {}'.format(fitsname))
if coord_chn:
imagename = target+'.image.J2000'
else:
imagename = target+'.image'
command = "exportfits(imagename='{0}{1}', fitsimage='{0}{2}', velocity=True,optical=False,overwrite=True,dropstokes=True,stokeslast=True,history=True,dropdeg=True)".format(img_dir,imagename,fitsname)
logger.info('Executing command: '+command)
exec(command)
cf.check_casalog(config,config_raw,logger,casalog)
fitsname = target+'_HI.pbcor.fits'
logger.info('Saving primary beam corrected image cube as {}'.format(fitsname))
if coord_chn:
imagename = target+'.image.pbcor.J2000'
else:
imagename = target+'.image.pbcor'
command = "exportfits(imagename='{0}{1}', fitsimage='{0}{2}', velocity=True,optical=False,overwrite=True,dropstokes=True,stokeslast=True,history=True,dropdeg=True)".format(img_dir,imagename,fitsname)
logger.info('Executing command: '+command)
exec(command)
cf.check_casalog(config,config_raw,logger,casalog)
coord_chn = False
logger.info('Completed generation of clean image(s).')
# Read configuration file with parameters
config_file = sys.argv[-1]
config,config_raw = cf.read_config(config_file)
interactive = config['global']['interactive']
# Set up your logger
logger = cf.get_logger(LOG_FILE_INFO = '{}.log'.format(config['global']['project_name']),
LOG_FILE_ERROR = '{}_errors.log'.format(config['global']['project_name'])) # Set up your logger
# Define MS file name
msfile = '{0}.ms'.format(config['global']['project_name'])
#Remove previous image files
targets = config['calibration']['target_names']
img_path = config['global']['img_dir']+'/'
cf.check_casaversion(logger)
logger.info('Deleting any existing clean image(s).')
for target in targets:
del_list = [img_path+target+'.mask',img_path+target+'.model',img_path+target+'.pb',img_path+target+'.psf',img_path+target+'.residual',img_path+target+'.sumwt',img_path+target+'.weight']
del_list.extend(glob.glob(img_path+'{}.image*'.format(target)))
if len(del_list) > 0:
for file_path in del_list:
try:
shutil.rmtree(file_path)
except OSError:
pass
del_list = [img_path+target+'_HI.fits',img_path+target+'_HI.pbcor.fits']
for file_path in del_list:
try:
os.remove(file_path)
except OSError:
pass
#Make clean image
image(config,config_raw,config_file,logger)
#Review and backup parameters file
cf.diff_pipeline_params(config_file,logger)
cf.backup_pipeline_params(config_file,logger)
```
#### File: jmoldon/hcg_hi_pipeline/moment_zero.py
```python
import imp, numpy, glob, shutil
imp.load_source('common_functions','common_functions.py')
import common_functions as cf
def noise_est(config,logger):
"""
Makes an estimate of the theortically expected noise level for each science target.
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
Output:
noise = Estimate of the theortical noise in Jy/beam. (List of Floats)
"""
logger.info('Starting making noise estimation.')
targets = config['calibration']['target_names'][:]
calib = config['calibration']
if calib['mosaic']:
targets = list(set(calib['target_names']))
cln_param = config['clean']
src_dir = config['global']['src_dir']+'/'
noise = []
for target in targets:
msmd.open(src_dir+target+'.split.contsub')
N = msmd.nantennas()
t_int = msmd.effexposuretime()['value']
t_unit = msmd.effexposuretime()['unit']
if t_unit != 's' and 'sec' not in t_unit:
logger.warning('Integration time units are not in seconds. Estimated noise may be incorrect.')
ch_wid = numpy.mean(msmd.chanwidths(0))
#Note: The above line may cause issues if different spectral windows
#have very difference frequency resolutions
corr_eff = cln_param['corr_eff']
SEFD = cln_param['sefd']
N_pol = 2.
f_smo = 1.
if config_raw.has_option('importdata','hanning'):
if config['importdata']['hanning']:
if not config['importdata']['mstransform']:
f_smo = 8./3.
else:
if not config_raw.has_option('importdata','chanavg'):
f_smo = 8./3.
else:
Nchan = float(config['importdata']['chanavg'])
if Nchan > 1.:
f_smo = Nchan/((Nchan-2.) + 2.*(9./16.) + 2.*(1./16.))
else:
f_smo = 8./3.
noise.append(SEFD/(corr_eff*numpy.sqrt(f_smo*N_pol*N*(N-1.)*t_int*ch_wid)))
logger.info('Effective integration time for {0}: {1} {2}'.format(target,int(t_int),msmd.effexposuretime()['unit']))
logger.info('Expected rms noise for {0}: {1} Jy/beam'.format(target,SEFD/(corr_eff*numpy.sqrt(N_pol*N*(N-1.)*t_int*ch_wid))))
msmd.close()
logger.info('Completed making noise estimation.')
return noise
def moment0(config,config_raw,config_file,logger):
"""
Generates a moment zero map of each science target.
Input:
config = The parameters read from the configuration file. (Ordered dictionary)
config_raw = The instance of the parser.
config_file = Path to configuration file. (String)
"""
noises = noise_est(config,logger)
cln_param = config['clean']
calib = config['calibration']
if config_raw.has_option('clean','noise'):
noises = cln_param['noise'][:]
logger.info('Noise level(s) set manually as {0} Jy.'.format(noises))
moment = config['moment']
thresh = moment['mom_thresh']
chans = moment['mom_chans']
targets = config['calibration']['target_names']
if calib['mosaic']:
targets = list(set(targets))
img_dir = config['global']['img_dir']+'/'
mom_dir = config['global']['mom_dir']+'/'
cf.makedir('./'+mom_dir,logger)
change_made = False
if len(chans) == 0 or len(chans) != len(targets):
if len(chans) < len(targets):
logger.warning('There are more target fields than channel ranges for moments.')
while len(chans) < len(targets):
chans.append('')
elif len(chans) > len(targets):
logger.warning('There are more moment channel ranges than target fields.')
logger.info('Current channel ranges: {}'.format(chans))
logger.warning('The channel range list will now be truncated to match the number of targets.')
chans = chans[:len(targets)]
change_made = True
if interactive:
print('Current moment channel ranges set as:')
print(chans)
print('For the targets:')
print(targets)
resp = ''
while (resp.lower() not in ['yes','ye','y']) and (resp.lower() not in ['no','n']) :
resp = str(raw_input('Do you want to revise these channel ranges (y/n): '))
if resp.lower() in ['yes','ye','y']:
change_made = True
print('Please specify the channel ranges in the format: chan1~chan2.')
for i in range(len(chans)):
chans[i] = cf.uinput('Enter channel range for target {}: '.format(targets[i]), chans[i])
else:
pass
if change_made:
logger.info('Updating config file to set new moment channel ranges.')
config_raw.set('moment','mom_chans',chans)
configfile = open(config_file,'w')
config_raw.write(configfile)
configfile.close()
logger.info('Starting generation of moment map(s).')
J2000 = False
img_list = glob.glob(img_dir+'*.image.J2000')
if len(img_list) > 0:
J2000 = True
for i in range(len(targets)):
if J2000:
imagename = targets[i]+'.image.J2000'
else:
imagename = targets[i]+'.image'
command = "immoments(imagename='{0}{1}',includepix=[{2},{3}],chans='{4}',outfile='{5}{6}.mom0')".format(img_dir,imagename,thresh*noises[i],thresh*1E6*noises[i],chans[i],mom_dir,targets[i])
logger.info('Executing command: '+command)
exec(command)
cf.check_casalog(config,config_raw,logger,casalog)
command = "exportfits(imagename='{0}{1}.mom0', fitsimage='{0}{1}.mom0.fits',overwrite=True,dropstokes=True,stokeslast=True,history=True,dropdeg=True)".format(mom_dir,targets[i])
logger.info('Executing command: '+command)
exec(command)
cf.check_casalog(config,config_raw,logger,casalog)
logger.info('Completed generation of moment map(s).')
# Read configuration file with parameters
config_file = sys.argv[-1]
config,config_raw = cf.read_config(config_file)
interactive = config['global']['interactive']
# Set up your logger
logger = cf.get_logger(LOG_FILE_INFO = '{}.log'.format(config['global']['project_name']),
LOG_FILE_ERROR = '{}_errors.log'.format(config['global']['project_name'])) # Set up your logger
# Define MS file name
msfile = '{0}.ms'.format(config['global']['project_name'])
#Remove previous moment files
cf.check_casaversion(logger)
targets = config['calibration']['target_names']
mom_path = config['global']['mom_dir']+'/'
logger.info('Deleting any existing moment(s).')
for target in targets:
del_list = glob.glob(mom_path+'*.mom0')
if len(del_list) > 0:
for file_path in del_list:
try:
shutil.rmtree(file_path)
except OSError:
pass
del_list = glob.glob(mom_path+'*.mom0.fits')
for file_path in del_list:
try:
os.remove(file_path)
except OSError:
pass
#Make moment maps
moment0(config,config_raw,config_file,logger)
``` |
{
"source": "jmoldon/multilambda_catalogs",
"score": 3
} |
#### File: jmoldon/multilambda_catalogs/multilambda_catalogs.py
```python
import os
import argparse
from astropy.coordinates import SkyCoord
from astroquery.skyview import SkyView
from astropy import units as u
import requests
import astropy
def get_url(coords, surveys, radius=None, width=None, height=None):
print('Reading urls')
urls = []
valid_surveys = []
missing_surveys = []
for survey in surveys:
url = SkyView.get_image_list(position=coords,
survey=survey,
radius=radius,
width=width,
height=height)
if url != []:
if requests.get(url[0]).status_code != 404:
urls.append(url)
valid_surveys.append(survey)
print('OK: ', survey)
else:
missing_surveys.append(survey)
print('Failed: ', survey)
for u,s in zip(urls, valid_surveys):
print(u,s)
return urls, valid_surveys, missing_surveys
def filter_available(urls):
print('Checking file availability')
status = [requests.get(url).status_code for url in urls]
available = [s!=404 for s in status]
return available
def download_image(coords, surveys, name='field', radius=None, width=None, height=None):
print('Getting fitsfiles')
fitsfiles = SkyView.get_images(position=coords,
survey=surveys,
radius=radius,
width=width,
height=height)
os.system('mkdir catalogs')
for fitsfile, survey in zip(fitsfiles, surveys):
print('Writing to file: {0} ...'.format(survey), end='')
filename = f'./catalogs/{name}_{survey}.fits'.replace(' ', '_')
try:
fitsfile[0].writeto(filename, overwrite=True)
print('...Done')
except astropy.io.fits.verify.VerifyError:
print('...Failed')
def generate_png(name, casa):
casa_command = f'{casa} --nologger --nologfile --nogui -c fits2png.py ./catalogs/{name}'
print('Trying to execute casa command:')
print(casa_command)
try:
os.system(casa_command)
except:
print('Could not execute CASA script. Try specifying casa executable with -casa /path/to/bin/casa')
def get_args():
'''This function parses and returns arguments passed in'''
# Assign description to the help doc
description = 'Select dataset'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-n', '--name', dest='name', help='Name prefix for output images', default='field')
parser.add_argument('-ra', dest='ra', help='R.A. coordinates in format 00:00:00.0')
parser.add_argument('-de', dest='de', help='Dec. coordinates in format +00:00:00.0')
parser.add_argument('-r', '--radius', dest='radius', help='Search radius in armin', default=35.)
parser.add_argument('--png', dest='png', action='store_true')
parser.add_argument('--no-png', dest='png', action='store_false')
parser.set_defaults(png=True)
parser.add_argument('-c', '--casa', dest='casa', help='casa version to use', default='casa')
#default_surveys = ["NVSS","VLA FIRST (1.4 GHz)", "WENSS", "TGSS ADR1", "VLSSr", "SDSSr", "DSS", "2MASS-H"]
default_surveys = ["NVSS","VLA FIRST (1.4 GHz)", "WENSS", "TGSS ADR1", "VLSSr", "SDSSr", "DSS"]
parser.add_argument('-s', '--surveys', dest='surveys',
type=str, nargs='+',
help='Whispace separated list of surveys',
default=default_surveys)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
name = args.name
coords = SkyCoord(args.ra, args.de, unit=(u.hourangle, u.deg), frame='icrs')
radius = float(args.radius)*u.arcmin
surveys = args.surveys
print(surveys)
urls, valid_surveys, missing_surveys = get_url(coords=coords, surveys=surveys, radius=radius)
download_image(coords=coords, surveys=valid_surveys, radius=radius, name=name)
casa = args.casa
if png:
generate_png(name, casa)
# https://astroquery.readthedocs.io/en/latest/skyview/skyview.html
# https://docs.astropy.org/en/stable/coordinates/
# https://astroquery.readthedocs.io/en/latest/skyview/skyview.html
# https://github.com/aframosp/Review-Photcode/blob/96f2ddcafa73ce9148b11d4cab8be22a7a47c4c7/SkyView_Galaxies.ipynb
# https://github.com/andreww5au/Prosp/blob/849cdbdf0cebfbd76c5b9484cdb76e12a0a7556d/skyviewint.py
``` |
{
"source": "jmoldon/test_iaa_skyview",
"score": 3
} |
#### File: jmoldon/test_iaa_skyview/iaa_skyview_func.py
```python
import warnings
warnings.filterwarnings('ignore')
import sys
import os
from astroquery.skyview import SkyView
from astropy.coordinates import SkyCoord
import astropy.units as u
import aplpy
import matplotlib.pyplot as plt
from astropy.io import fits
import astropy
def coords_from_name(field_name):
"""Get ra, dec coordinates from a field name using astropy
Args:
field_name (str): Field name, e.g. 'M101'
Returns:
(float, float): ra, dec in degrees
Example:
>>> coords_from_name('M101')
(210.80242917, 54.34875)
"""
coords = SkyCoord.from_name(field_name)
return coords
def call_skyview_simple(survey, source_name, fov=1):
"""Call Skyview to download data from a survey based on input parameters
Args:
survey (str): name of survey, from https://skyview.gsfc.nasa.gov/current/cgi/survey.pl
source (str): name of astronomical source
fov (float): FOV in degrees
Examples:
>>> call_skyview('DSS', 'M31', 2.)
>>> call_skyview('NVSS', NGC6670, 0.5)
"""
coords = coords_from_name(source_name)
outname = f'{source_name}_{survey}_{fov}d.fits'
images = SkyView.get_images(coords, survey,
coordinates='J2000',
projection='Car', pixels=500,
height=fov*u.deg, width=fov*u.deg)
fitsname = f'images/{source_name}_{survey}_{fov}d.fits'
try:
images[0][0].writeto(fitsname, overwrite=True)
except astropy.io.fits.verify.VerifyError:
print('Data not available')
pass
return fitsname
def plot_fits(fits_name, plot_title=None, cmap_name='viridis', colorbar=True, contour=True):
"""Make a PNG plot out of a FITS file
Args:
fits_name (str): path of fits file
plot_title (str): plot title, default is name of the fits file
cmap_name (str): name of colormap, default is viridis
colorbar (bool): include colorbar, default is True
contour (bool): include contour, default is True
"""
f = aplpy.FITSFigure(fits_name, figsize=(10, 8))
if plot_title == None:
plot_title = fits_name.replace('.fits', '')
plt.title(plot_title)
f.show_colorscale(cmap=cmap_name, stretch='linear')
f.ticks.set_color('k')
if colorbar:
f.add_colorbar()
if 'BMAJ' in fits.open(fits_name)[0].header:
f.add_beam()
print(f'Adding beam for {fits_name}')
if contour:
f.show_contour()
output_name = fits_name.replace('.fits', '.png')
plt.savefig(output_name, dpi=200, bbox_inches='tight')
def main_func(source_name, survey, fov):
if not os.path.isdir('images'):
os.mkdir('images')
fitsname = call_skyview_simple(survey, source_name, fov)
plot_fits(fitsname, plot_title=None, cmap_name='viridis', colorbar=True)
if __name__ == '__main__':
#warnings.filterwarnings('ignore', category=AstropyDeprecationWarning, append=True)
#warnings.simplefilter('ignore', AstropyDeprecationWarning)
main_func(sys.argv[1], sys.argv[2], float(sys.argv[3]))
``` |
{
"source": "jmoles/trail-viewer",
"score": 3
} |
#### File: GATools/test/test_DBUtils.py
```python
import os
import unittest
from DBUtils import DBUtils
class TestDatabaseUtils(unittest.TestCase):
def setUp(self):
self.pgdb = DBUtils(password=os.environ['PSYCOPG2_DB_PASS'])
def testNetworkList(self):
net_s, net_i, net_l = self.pgdb.fetchNetworksList()
# Verify that the list of networks are equal.
self.assertEqual(
len(net_i), len(net_l),
"Length of network list doesn't match number of integers.")
# Verify that the net_s is a single string
self.assertEqual(
type(net_s), str,
"Network string is not a single string.")
net_s_2, net_i_2 = self.pgdb.fetchNetworkCmdPrettyPrint()
# Verify that the values returned by these two functions
# are identical.
self.assertEqual(
net_s, net_s_2,
"Network strings for command prompt don't match!")
self.assertEqual(
net_i, net_i_2,
"List of valid network integers don't match.")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmolinaso/bitcli",
"score": 2
} |
#### File: bitcli/tests/test_main.py
```python
from bitcli.main import BitCLITest
def test_bitcli(tmp):
with BitCLITest() as app:
res = app.run()
print(res)
raise Exception
def test_command1(tmp):
argv = ['command1']
with BitCLITest(argv=argv) as app:
app.run()
``` |
{
"source": "jmolinski/advent-of-code-2017",
"score": 3
} |
#### File: advent-of-code-2017/day-14/main.py
```python
from functools import reduce
import operator as op
from itertools import count
def reverse_sublist(lst, pos, sublen):
tail, head = max(pos + sublen - 256, 0), min(256, pos + sublen)
new = list(reversed(lst[pos:head] + lst[:tail]))
lst[:tail], lst[pos:head] = new[head - pos:], new[:head - pos]
def knot_hash(lengths, rounds=1):
lst, position = list(range(256)), 0
for skip_size, length in enumerate(lengths * rounds):
reverse_sublist(lst, position, length)
position = (position + length + skip_size) % 256
return lst
def make_dense_hash(lst):
hashed = [reduce(op.xor, lst[i: i + 16]) for i in range(0, 256, 16)]
return ''.join(f'{c:02x}' for c in hashed)
def hash_binary(hash):
return ''.join(f'{int(c, 16):04b}' for c in hash)
def count_regions(memory):
regions = 0
for i, row in memory.items():
while '1' in row.values():
hide_group(i, [k for k in row.keys() if row[k] == '1'][0], memory)
regions += 1
return regions
def hide_group(x, y, memory):
memory[x][y] = '0'
for x_v, y_v in ((1, 0), (0, 1), (-1, 0), (0, -1)):
if memory.get(x + x_v, {}).get(y + y_v, '0') == '1':
hide_group(x + x_v, y + y_v, memory)
key_string = open('data.txt', 'r').read()
mem_rows = [[ord(c) for c in l] + [17, 31, 73, 47, 23]
for l in [f'{key_string}-{i}' for i in range(128)]]
mem_rows = [list(hash_binary(make_dense_hash(knot_hash(r, rounds=64))))
for r in mem_rows]
enumerated_mem_rows = dict(enumerate(dict(enumerate(r)) for r in mem_rows))
answer_part_1 = sum(r.count('1') for r in mem_rows)
answer_part_2 = count_regions(enumerated_mem_rows)
print(answer_part_1, answer_part_2)
```
#### File: advent-of-code-2017/day-21/main.py
```python
from itertools import chain, product
from functools import reduce
puzzle_data = open('data.txt', 'r').read()
initial = ('.#.', '..#', '###',)
raw_patterns = [
tuple([r for r in rows])
for rows in
[[x.strip() for x in p.split('=>')]
for p in puzzle_data.split('\n')]
]
def rotate_90(matrix):
return tuple(zip(*matrix[::-1]))
def flip(matrix):
return tuple(tuple(r[::-1]) for r in matrix)
def pattern_combinations(pattern):
pattern = tuple([tuple(p) for p in pattern.split('/')])
modifiers_combinations = chain.from_iterable(
product([flip, rotate_90], repeat=i) for i in range(4)
)
comb = [reduce(lambda p, m: m(p), modifiers, pattern)
for modifiers in modifiers_combinations]
return ['/'.join(''.join(r) for r in m) for m in comb]
def enrich_patterns(original):
eneriched = dict()
for input_pattern, output_pattern in original:
eneriched.update(
dict.fromkeys(
pattern_combinations(input_pattern), output_pattern)
)
return eneriched
def split_into_chunks(l, chunk_size):
return [l[i:i + chunk_size]
for i in range(0, len(l), chunk_size)]
def split_grid(grid):
side_size = len(grid)
split_size = 2 if side_size % 2 == 0 else 3
chunks = split_into_chunks(grid, split_size)
chunks = [tuple('/'.join([r for r in rows])
for rows in zip(*[split_into_chunks(x, split_size) for x in grid_part]))
for grid_part in chunks]
return chunks
def merge_grid(subgrids):
grid = [tuple(zip(*[x.split('/') for x in row]))
for row in subgrids]
grid = [tuple(''.join(subrow) for subrow in row) for row in grid]
return tuple(chain.from_iterable(grid))
def run_simulation(grid, mapping, rounds=1):
for _ in range(rounds):
grid = merge_grid(
tuple((mapping[subgrid] for subgrid in subgrids_row))
for subgrids_row in split_grid(grid)
)
return grid
patterns = enrich_patterns(raw_patterns)
p1_grid = run_simulation(initial, patterns, rounds=5)
p2_grid = run_simulation(initial, patterns, rounds=18)
answer_part_1 = ''.join(p1_grid).count('#')
answer_part_2 = ''.join(p2_grid).count('#')
print(answer_part_1, answer_part_2)
``` |
{
"source": "jmolinski/advent-of-code-2018",
"score": 3
} |
#### File: advent-of-code-2018/day-16/main.py
```python
from collections import defaultdict
import re
def exec_op(fn):
def op(reg, a, b, c):
nreg = [x for x in reg] # because lists are mutable
nreg[c] = fn(reg, a, b)
return nreg
return op
OPERATIONS = {
"addr": exec_op(lambda reg, x, y: reg[x] + reg[y]),
"addi": exec_op(lambda reg, x, y: reg[x] + y),
"mulr": exec_op(lambda reg, x, y: reg[x] * reg[y]),
"muli": exec_op(lambda reg, x, y: reg[x] * y),
"banr": exec_op(lambda reg, x, y: reg[x] & reg[y]),
"bani": exec_op(lambda reg, x, y: reg[x] & y),
"borr": exec_op(lambda reg, x, y: reg[x] | reg[y]),
"bori": exec_op(lambda reg, x, y: reg[x] | y),
"setr": exec_op(lambda reg, x, y: reg[x]),
"seti": exec_op(lambda reg, x, y: x),
"gtir": exec_op(lambda reg, x, y: int(x > reg[y])),
"gtri": exec_op(lambda reg, x, y: int(reg[x] > y)),
"gtrr": exec_op(lambda reg, x, y: int(reg[x] > reg[y])),
"eqir": exec_op(lambda reg, x, y: int(x == reg[y])),
"eqri": exec_op(lambda reg, x, y: int(reg[x] == y)),
"eqrr": exec_op(lambda reg, x, y: int(reg[x] == reg[y])),
}
def part1():
with open("data_registers.txt") as f:
data = list([x.strip() for x in f.readlines()])
over_2_possible = 0
all_poss = defaultdict(set)
for i in range(len(data) // 4 + 1):
l1, l2, l3, *_ = data[i * 4 :]
reg = list(map(int, re.findall(r"\d", l1)))
result = list(map(int, re.findall(r"\d", l3)))
op, a, b, c = map(int, l2.split())
poss = [k for k, fn in OPERATIONS.items() if result == fn(reg, a, b, c)]
over_2_possible += int(len(poss) > 2)
for p in poss:
all_poss[p].add(op)
op_map = {}
while all_poss:
k, v = [(k, v) for k, v in all_poss.items() if len(v) == 1][0]
op_map[list(v)[0]] = k
del all_poss[k]
all_poss = {k: (rest - set(v)) for k, rest in all_poss.items()}
return over_2_possible, op_map
def part2(op_map):
with open("data_program.txt") as f:
reg = [0, 0, 0, 0]
for l2 in f.readlines():
op, a, b, c = map(int, l2.split())
opnam = op_map[op]
reg = OPERATIONS[opnam](reg, a, b, c)
return reg[0]
over_2, op_mapping = part1()
reg_0 = part2(op_mapping)
print("1)", over_2, "\n2)", reg_0)
```
#### File: advent-of-code-2018/day-18/main.py
```python
from collections import defaultdict
from copy import deepcopy
with open("data.txt") as f:
data = defaultdict(lambda: defaultdict(lambda: None))
for x, r in enumerate(f.readlines()):
r = r.strip()
for y, c in enumerate(r):
data[x][y] = c
size = len(data)
def adjecent(x, y, data):
adj = []
for dx in (-1, 0, 1):
for dy in (-1, 0, 1):
if dx == 0 and dy == 0:
continue
adj.append(data[x + dx][y + dy])
return [z for z in adj if z]
def hash_board(d):
return hash("".join(["".join(r.values()) for r in d.values()]))
def run_sim(minutes, find_cycle=False):
board = deepcopy(data)
m = defaultdict(list)
for minute in range(minutes):
if find_cycle:
m[hash_board(board)].append(minute)
new_board = deepcopy(board)
for x in range(size):
for y in range(size):
adj = adjecent(x, y, board)
field = board[x][y]
if field == "." and adj.count("|") >= 3:
new_board[x][y] = "|"
elif field == "|" and adj.count("#") >= 3:
new_board[x][y] = "#"
elif field == "#" and ("#" not in adj or "|" not in adj):
new_board[x][y] = "."
board = new_board
if find_cycle:
if any(len(v) > 1 for v in m.values()):
return [v for v in m.values() if len(v) > 1][0]
wood = sum(a == "|" for r in board.values() for a in r.values())
lumb = sum(a == "#" for r in board.values() for a in r.values())
return wood * lumb
def part2():
cycle_data = run_sim(minutes=1000, find_cycle=True)
cycle_start = min(cycle_data)
cycle_len = max(cycle_data) - min(cycle_data)
minute = 1000 * 1000 * 1000
minute = cycle_start + ((minute - cycle_start) % cycle_len)
return run_sim(minutes=minute)
print("1)", run_sim(minutes=10), "\n2)", part2())
```
#### File: advent-of-code-2018/day-19/main.py
```python
from collections import defaultdict
import re
def read_data():
with open("data.txt") as f:
ip = int(f.readline().split()[1])
data = []
for l in f.readlines():
op, a, b, c = l.strip().split()
data.append([op, int(a), int(b), int(c)])
return ip, data
def exec_op(fn):
def op(reg, a, b, c):
nreg = [x for x in reg] # because lists are mutable
nreg[c] = fn(reg, a, b)
return nreg
return op
OPERATIONS = {
"addr": exec_op(lambda reg, x, y: reg[x] + reg[y]),
"addi": exec_op(lambda reg, x, y: reg[x] + y),
"mulr": exec_op(lambda reg, x, y: reg[x] * reg[y]),
"muli": exec_op(lambda reg, x, y: reg[x] * y),
"banr": exec_op(lambda reg, x, y: reg[x] & reg[y]),
"bani": exec_op(lambda reg, x, y: reg[x] & y),
"borr": exec_op(lambda reg, x, y: reg[x] | reg[y]),
"bori": exec_op(lambda reg, x, y: reg[x] | y),
"setr": exec_op(lambda reg, x, y: reg[x]),
"seti": exec_op(lambda reg, x, y: x),
"gtir": exec_op(lambda reg, x, y: int(x > reg[y])),
"gtri": exec_op(lambda reg, x, y: int(reg[x] > y)),
"gtrr": exec_op(lambda reg, x, y: int(reg[x] > reg[y])),
"eqir": exec_op(lambda reg, x, y: int(x == reg[y])),
"eqri": exec_op(lambda reg, x, y: int(reg[x] == y)),
"eqrr": exec_op(lambda reg, x, y: int(reg[x] == reg[y])),
}
def run_simulation(reg):
ip_reg, data = read_data()
ip = 0
while ip < len(data):
op, a, b, c = data[ip]
reg[ip_reg] = ip
reg = OPERATIONS[op](reg, a, b, c)
ip = reg[ip_reg]
ip += 1
return reg[0]
part1 = run_simulation(reg=[0, 0, 0, 0, 0, 0])
# part2 = run_simulation(reg=[1, 0, 0, 0, 0, 0])
print(
"1)", part1, "\n2)", "proper simulation would take forever, check part2_solution.py"
)
```
#### File: advent-of-code-2018/day-22/main.py
```python
from collections import defaultdict
with open("data.txt") as f:
DEPTH = int(f.readline().split()[1])
TARGET = list(map(int, f.readline().strip().split()[1].split(",")))
# (0, 0) (1, 0) (2, 0) (3, 0)
# (0, 1) (1, 1) (2, 1) (3, 1)
# (0, 2) (1, 2) (2, 2) (3, 2)
N, S, W, E = (0, -1), (0, 1), (-1, 0), (1, 0)
def find_node(node_grid, x, y, dir=(0, 0)):
dx, dy = dir
nx, ny = x + dx, y + dy
return node_grid.get(nx, dict()).get(ny, None)
class Node:
def __init__(self, x, y, c="@"):
self.x = x
self.y = y
self.c = c
self.geo_ind_cache = -1
def link_nei(self, node_grid):
fn = lambda dir: find_node(node_grid, self.x, self.y, dir)
self.n = fn(N)
self.s = fn(S)
self.w = fn(W)
self.e = fn(E)
self.adj = [x for x in [self.n, self.s, self.w, self.e] if x]
def __repr__(self):
return f"N({self.x}, {self.y}, {self.c})"
def geo_index(self):
if self.geo_ind_cache != -1:
return self.geo_ind_cache
geo_ind = -1
if self.x == self.y == 0:
geo_ind = 0
elif self.x == TARGET[0] and self.y == TARGET[1]:
geo_ind = 0
elif self.y == 0:
geo_ind = self.x * 16807
elif self.x == 0:
geo_ind = self.y * 48271
else:
geo_ind = self.w.erosion_level() * self.n.erosion_level()
self.geo_ind_cache = geo_ind
return geo_ind
def erosion_level(self):
return (self.geo_index() + DEPTH) % 20183
def determine_type(self):
t = self.erosion_level() % 3
if t == 0:
self.c = "."
if t == 1:
self.c = "="
if t == 2:
self.c = "|"
self.acc_tools = {".": "ct", "=": "cn", "|": "tn"}[self.c]
def make_grid():
nodes = []
nodes_grid = defaultdict(dict)
for y in range(TARGET[1] + 50):
for x in range(TARGET[0] * 6):
n = Node(x, y)
nodes_grid[x][y] = n
nodes.append(n)
for n in nodes:
n.link_nei(nodes_grid)
for n in nodes:
n.determine_type()
return nodes_grid
def calc_risk(nodes_grid):
risk = 0
risk_map = {".": 0, "=": 1, "|": 2}
for x in range(TARGET[0] + 1):
for y in range(TARGET[1] + 1):
risk += risk_map.get(nodes_grid[x][y].c, 0)
return risk
def quickest_path_time(nodes_grid):
past_workers = []
workers = [(0, 0, "t", 0)] # x y tool time
paths = dict()
found_target = False
time_to_target = -1
while workers:
new_workers = []
for x, y, tool, time in workers:
if not (paths.get((x, y, tool), 10e9) > time):
past_workers.append((x, y, tool, time))
continue
paths[(x, y, tool)] = time
if found_target and time > time_to_target:
past_workers.append((x, y, tool, time))
continue
if x == TARGET[0] and y == TARGET[1]:
past_workers.append((x, y, tool, time))
if found_target:
time_to_target = min(time_to_target, time)
else:
found_target = True
time_to_target = time
continue
# spawn new workers
this_node = nodes_grid[x][y]
for n in this_node.adj:
if tool in n.acc_tools:
new_workers.append((n.x, n.y, tool, time + 1))
else:
for acc_tool in n.acc_tools:
new_workers.append((n.x, n.y, acc_tool, time + 8))
workers = new_workers
times = [t for x, y, _, t in past_workers if x == TARGET[0] and y == TARGET[1]]
return times[-2]
def run_simulation():
nodes_grid = make_grid()
nodes_grid[TARGET[0]][TARGET[1]].acc_tools = "t"
return calc_risk(nodes_grid), quickest_path_time(nodes_grid)
part1, part2 = run_simulation()
print("1)", part1, "\n2)", part2)
```
#### File: advent-of-code-2018/day-9/main.py
```python
from collections import defaultdict as ddict
class Marble:
def __init__(self, v, prev, next):
self.v = v
self.prev = prev
self.next = next
def add_next(self, v):
m = Marble(v, self, self.next)
self.next.prev = m
self.next = m
return m
def remove(self):
self.prev.next = self.next
self.next.prev = self.prev
return self
def calc_max_score(players, last):
pp = ddict(int)
m0, m1 = Marble(0, None, None), Marble(1, None, None)
m0.next, m0.prev = m1, m1
m1.next, m1.prev = m0, m0
curr = m1
for i in range(2, last + 1):
pid = i % players
if i % 23 == 0:
pp[pid] += i
rem = curr.prev.prev.prev.prev.prev.prev.prev.remove()
pp[pid] += rem.v
curr = rem.next
else:
curr = curr.next.add_next(i)
return max(pp.items(), key=lambda x: x[1])[1]
players, last = 423, 71944
print("1)", calc_max_score(players, last))
print("2)", calc_max_score(players, last * 100))
``` |
{
"source": "jmolinski/advent-of-code-2019",
"score": 3
} |
#### File: advent-of-code-2019/day-11/solution.py
```python
from __future__ import annotations
from typing import Any, Callable, List, NoReturn, Optional, Tuple
class CPU:
def __init__(self, vm: VM) -> None:
self.vm = vm
self.mem = vm.mem
self.operations = {
1: (self.op_1, 4),
2: (self.op_2, 4),
3: (self.op_3, 2),
4: (self.op_4, 2),
5: (self.op_5, 2),
6: (self.op_6, 3),
7: (self.op_7, 4),
8: (self.op_8, 4),
99: (self.op_99, 0),
9: (self.op_9, 2),
}
self.relative_base = 0
def run(self, op: int) -> Callable[[int, List[int]], int]:
def wrapper(pos: int, modes: List[int]) -> int:
fn, length = self.operations[op]
ret = fn(pos, modes)
return pos + length if ret is None else ret
return wrapper
def mode_aware_get(self, mode: int, pos: int) -> int:
if mode == 0:
return self.mem[self.mem[pos]]
elif mode == 1:
return self.mem[pos]
else: # mode 2
return self.mem[self.relative_base + self.mem[pos]]
def mode_aware_set(self, mode: int, pos: int, val: int) -> None:
if mode == 0:
self.mem[self.mem[pos]] = val
elif mode == 1:
raise ValueError
else: # mode 2
self.mem[self.relative_base + self.mem[pos]] = val
def op_1(self, pos: int, modes: List[int]) -> None:
a = self.mode_aware_get(modes[0], pos + 1)
b = self.mode_aware_get(modes[1], pos + 2)
self.mode_aware_set(modes[2], pos + 3, a + b)
def op_2(self, pos: int, modes: List[int]) -> None:
a = self.mode_aware_get(modes[0], pos + 1)
b = self.mode_aware_get(modes[1], pos + 2)
self.mode_aware_set(modes[2], pos + 3, a * b)
def op_3(self, pos: int, modes: List[int]) -> Optional[int]:
if not self.vm.inputs:
if self.vm.halt_on_empty_input:
self.vm.halt = True
return pos
self.mode_aware_set(modes[0], pos + 1, self.vm.inputs.pop(0))
return None
def op_4(self, pos: int, modes: List[int]) -> None:
a = self.mode_aware_get(modes[0], pos + 1)
self.vm.outputs.append(a)
def op_5(self, pos: int, modes: List[int]) -> int:
a = self.mode_aware_get(modes[0], pos + 1)
b = self.mode_aware_get(modes[1], pos + 2)
return b if a != 0 else pos + 3
def op_6(self, pos: int, modes: List[int]) -> int:
a = self.mode_aware_get(modes[0], pos + 1)
b = self.mode_aware_get(modes[1], pos + 2)
return b if a == 0 else pos + 3
def op_7(self, pos: int, modes: List[int]) -> None:
a = self.mode_aware_get(modes[0], pos + 1)
b = self.mode_aware_get(modes[1], pos + 2)
self.mode_aware_set(modes[2], pos + 3, 1 if a < b else 0)
def op_8(self, pos: int, modes: List[int]) -> None:
a = self.mode_aware_get(modes[0], pos + 1)
b = self.mode_aware_get(modes[1], pos + 2)
self.mode_aware_set(modes[2], pos + 3, 1 if a == b else 0)
def op_9(self, pos: int, modes: List[int]) -> None:
a = self.mode_aware_get(modes[0], pos + 1)
self.relative_base += a
def op_99(self, pos: int, modes: List[int]) -> NoReturn:
raise StopIteration
class VM:
def __init__(self, mem: List[int], *, halt_on_empty_input=False) -> None:
self.mem = mem[:]
self.mem = self.mem + [0 for _ in range(20000)]
self.inputs: List[int] = []
self.halt_on_empty_input = halt_on_empty_input
self.halt = False
self.outputs: List[int] = []
self.p = 0
self.is_finished = False
self.cpu = CPU(self)
def exec(
self, inputs: List[int], *, init_p: int = 0
) -> Tuple[List[int], List[int]]:
self.inputs += inputs
self.p = init_p
try:
while True:
self.p = self.run_op(self.p)
if self.halt:
return self.mem, self.outputs
except StopIteration:
self.is_finished = True
return self.mem, self.outputs
def resume(self, inputs: List[int]) -> Tuple[List[int], List[int]]:
self.halt = False
return self.exec(inputs, init_p=self.p)
def run_op(self, p: int) -> int:
opcode = "0000" + str(self.mem[p])
op = int(opcode[-2:])
modes = [int(opcode[-3]), int(opcode[-4]), int(opcode[-5])]
return self.cpu.run(op)(p, modes)
def clear_output(self) -> None:
self.outputs = []
BLACK, WHITE = 0, 1
def run_robot(inp: List[int], starting_color: int) -> dict:
# Board indexing:
# (1, 0) (1, 1)
# (0, 0) (0, 1)
up, down, left, right = (1, 0), (-1, 0), (0, -1), (0, 1)
right_turn = {up: right, right: down, down: left, left: up}
left_turn = {up: left, left: down, down: right, right: up}
position = (0, 0)
move_vector = up
tile_colors = {position: starting_color}
vm = VM(inp, halt_on_empty_input=True)
vm.exec(inputs=[])
while not vm.is_finished:
vm.clear_output()
_, [color_to_paint, turn_direction] = vm.resume(
inputs=[tile_colors.get(position, BLACK)]
)
tile_colors[position] = color_to_paint
move_vector = (left_turn if turn_direction == 0 else right_turn)[move_vector]
position = (position[0] + move_vector[0], position[1] + move_vector[1])
return tile_colors
def part1(inp: List[int]) -> int:
return len(run_robot(inp, BLACK))
def part2(inp: List[int]) -> None:
tiles = run_robot(inp, WHITE)
min_x, max_x = min(x for (y, x) in tiles), max(x for (y, x) in tiles)
min_y, max_y = min(y for (y, x) in tiles), max(y for (y, x) in tiles)
for y in range(max_y, min_y - 1, -1):
for x in range(min_x, max_x + 1):
color = " "
if tiles.get((y, x)) == WHITE:
color = "[X]"
print(color, end="")
print()
def main() -> None:
with open("input.txt") as f:
inp = list(map(int, f.read().strip().split(",")))
print("Part 1:", part1(inp))
print("Part 2:")
part2(inp)
main()
```
#### File: advent-of-code-2019/day-12/solution.py
```python
import itertools
import math
from typing import Dict, Iterable, List, Set, Tuple, cast
def vec_sum(a: Iterable[int], b: Iterable[int]) -> Tuple[int, int, int]:
return cast(Tuple[int, int, int], tuple([x + y for (x, y) in zip(a, b)]))
def lcm(a: int, b: int) -> int:
return abs(a * b) // math.gcd(a, b)
def mkchange(a: int, b: int) -> int:
if a > b:
return -1
if a == b:
return 0
return 1
def umoon(
moon: Tuple[int, int, int],
moon_vel: Tuple[int, int, int],
moons: Set[Tuple[int, int, int]],
) -> Tuple[int, int, int]:
x, y, z = moon
xc, yc, zc = moon_vel
for m in moons:
dx, dy, dz = m
xc, yc, zc = xc + mkchange(x, dx), yc + mkchange(y, dy), zc + mkchange(z, dz)
return (xc, yc, zc)
def update_velocities(
v_m: Dict[Tuple[int, int, int], Tuple[int, int, int]]
) -> Dict[Tuple[int, int, int], Tuple[int, int, int]]:
ks = set(v_m.keys())
mp = {m: ks - {m} for m in ks}
return {m: umoon(m, v_m[m], mp[m]) for m in mp}
def energy(m: Tuple[int, int, int], v: Tuple[int, int, int]) -> int:
(x, y, z), (a, b, c) = m, v
pot = abs(x) + abs(y) + abs(z)
kin = abs(a) + abs(b) + abs(c)
return pot * kin
def part1(inp: List[Tuple[int, int, int]]) -> int:
v_m = {m: (0, 0, 0) for m in inp}
for step in range(1000):
v_m = update_velocities(v_m)
v_m = {vec_sum(m, v): v for (m, v) in v_m.items()} # apply velocity
return sum(energy(m, k) for m, k in v_m.items())
def coordinate_repeats_after(inp: Iterable[int]) -> int:
v_m = [(m, 0) for m in inp]
first = tuple(v_m)
for step in itertools.count(1):
v_m = [
(m, v + sum(mkchange(m, dx) for dx in moons))
for ((m, v), moons) in [(m, [x[0] for x in v_m if x != m]) for m in v_m]
]
v_m = [(m + v, v) for (m, v) in v_m] # apply velocity
if first == tuple(v_m) and step > 1:
return step
return -1
def part2(inp: List[Tuple[int, int, int]]) -> int:
coords = list(itertools.chain.from_iterable(inp))
x, y, z = coords[::3], coords[1::3], coords[2::3]
return lcm(
coordinate_repeats_after(x),
lcm(coordinate_repeats_after(y), coordinate_repeats_after(z)),
)
def main() -> None:
inp = [(16, -11, 2), (0, -4, 7), (6, 4, -10), (-3, -2, -4)]
print("Part 1:", part1(inp))
print("Part 2:", part2(inp))
main()
```
#### File: advent-of-code-2019/day-14/solution.py
```python
from __future__ import annotations
from typing import Callable
def calculate_cost(
recipes: dict[str, tuple[int, dict[str, int]]], wanted_fuel: int
) -> int:
ore = 0
have = {name: 0 for name in recipes}
have["FUEL"] = -wanted_fuel
while any(v < 0 for v in have.values()):
for name in have:
while have[name] < 0:
output, ingredients = recipes[name]
times = (-have[name]) // output
if times * output < (-have[name]):
times += 1
have[name] += output * times
for iname, iamount in ingredients.items():
if iname == "ORE":
ore += iamount * times
else:
have[iname] -= iamount * times
return ore
def input_pair(s: str) -> tuple[str, int]:
num, name = s.strip().split()
return name.strip(), int(num)
def binsearch(
value: int, fun: Callable[[int], int], low: int = 0, high: int = 100_000_000
) -> int:
midpoint = (low + high) // 2
v_midpoint = fun(midpoint)
if v_midpoint < value <= fun(midpoint + 1):
return midpoint
if v_midpoint > value:
return binsearch(value, fun, low, midpoint)
return binsearch(value, fun, midpoint, high)
def main() -> None:
with open("input.txt") as f:
inp = [[a.strip() for a in l.split("=>")] for l in f.read().strip().split("\n")]
recipes = {}
for inputs, output in inp:
name, amount = input_pair(output)
recipes[name] = (amount, dict(input_pair(i) for i in inputs.split(",")))
print("Part 1:", calculate_cost(recipes, wanted_fuel=1))
print(
"Part 2:",
binsearch(1_000_000_000_000, lambda a: calculate_cost(recipes, wanted_fuel=a)),
)
main()
```
#### File: advent-of-code-2019/day-1/solution.py
```python
from typing import List
def required_fuel(mass: int) -> int:
return max((mass // 3) - 2, 0)
def part1(inp: List[int]) -> int:
return sum(required_fuel(module) for module in inp)
def part2(inp: List[int]) -> int:
def fuel_for_module(m):
last_added = fuel = required_fuel(m)
while (to_add := required_fuel(last_added)) > 0:
fuel += to_add
last_added = to_add
return fuel
return sum(fuel_for_module(m) for m in inp)
def main() -> None:
with open("input.txt") as f:
inp = list(map(int, f.read().split()))
print("Part 1:", part1(inp))
print("Part 2:", part2(inp))
main()
```
#### File: advent-of-code-2019/day-2/solution.py
```python
from typing import List, NoReturn
def op_1(l: List[int], pos: int) -> int:
a = l[l[pos + 1]]
b = l[l[pos + 2]]
l[l[pos + 3]] = a + b
return 4
def op_2(l: List[int], pos: int) -> int:
a = l[l[pos + 1]]
b = l[l[pos + 2]]
l[l[pos + 3]] = a * b
return 4
def op_99(l: List[int], pos: int) -> NoReturn:
raise StopIteration
class VM:
operations = {
1: op_1,
2: op_2,
99: op_99,
}
def __init__(self, l: List[int]) -> None:
self.l = l[:]
def exec(self, noun: int, verb: int) -> List[int]:
self.l[1] = noun
self.l[2] = verb
p = 0
try:
while True:
p = self.run_op(p)
except StopIteration:
return self.l
def run_op(self, p: int) -> int:
return p + self.operations[self.l[p]](self.l, p)
def part1(inp: List[int]) -> int:
return VM(inp).exec(12, 2)[0]
def part2(inp: List[int]) -> int:
for noun in range(100):
for verb in range(100):
if VM(inp).exec(noun, verb)[0] == 19690720:
return 100 * noun + verb
raise ValueError
def main() -> None:
with open("input.txt") as f:
inp = list(map(int, f.read().strip().split(",")))
print("Part 1:", part1(inp))
print("Part 2:", part2(inp))
main()
```
#### File: advent-of-code-2019/day-3/solution.py
```python
from typing import List, Tuple, Set, Dict
def add_vec(v1: Tuple[int, int], v2: Tuple[int, int]) -> Tuple[int, int]:
return v1[0] + v2[0], v1[1] + v2[1]
def map_moves(steps: List[Tuple[str, int]]) -> Dict[Tuple[int, int], int]:
movement_vec = {
"U": (1, 0),
"D": (-1, 0),
"R": (0, 1),
"L": (0, -1),
}
pos, step = (0, 0), 1
visited: Dict[Tuple[int, int], int] = {}
for direction, length in steps:
for i in range(1, length + 1):
pos = add_vec(pos, movement_vec[direction])
visited[pos] = visited.get(pos, step)
step += 1
return visited
def part1(inp: List[List[Tuple[str, int]]]) -> int:
cable1, cable2, *a = inp
path1, path2 = map_moves(cable1), map_moves(cable2)
intersections = set(path1.keys()) & set(path2.keys())
distances = [abs(a) + abs(b) for (a, b) in intersections]
return min(distances)
def part2(inp: List[List[Tuple[str, int]]]) -> int:
cable1, cable2, *a = inp
path1, path2 = map_moves(cable1), map_moves(cable2)
combined_steps = {
length + path2[position]
for position, length in path1.items()
if position in path2
}
return min(combined_steps)
def main() -> None:
with open("input.txt") as f:
inp = [
list(map(lambda o: (o[0], int(o[1:])), x.strip().split(",")))
for x in f.readlines()
]
print("Part 1:", part1(inp))
print("Part 2:", part2(inp))
main()
``` |
{
"source": "jmolinski/djangorestframework-stubs",
"score": 2
} |
#### File: djangorestframework-stubs/scripts/ignored.py
```python
MOCK_OBJECTS = ['MockRequest', 'MockView', 'MockTimezone', 'MockLazyStr', 'MockQueryset']
IGNORED_ERRORS = [
*MOCK_OBJECTS,
'Need type annotation for',
'URLPattern', # moved in django 2.0+
'URLResolver', # moved in django 2.0+
'Invalid signature "def (self: Any) -> Any"',
'already defined on line',
'already defined (possibly by an import)',
# 'variable has type Module',
# 'Invalid base class',
# 'Invalid type "self"',
# re.compile(r'Item "None" of "Optional\[[a-zA-Z0-9]+\]" has no attribute'),
# 'Optional[List[_Record]]',
# '"Callable[..., Any]" has no attribute "initkwargs"',
# 'Cannot assign to a type',
# 'Cannot assign to a method',
# '"Type[NonTimeThrottle]" has no attribute "called"',
# 'BaseTokenAuthTests',
# re.compile(r'Dict entry [0-9] has incompatible type "[a-zA-Z]+": "None"; expected "object": "bool"'),
# 'Incompatible types in assignment (expression has type "None", variable has type "List[Any]")',
# 'Value of type "Optional[str]" is not indexable',
# 'Argument 1 to "QueryDict" has incompatible type "Dict[<nothing>, <nothing>]";'
# 'Argument "queryset" to "BaseUniqueForValidator" has incompatible type "object"; expected "QuerySet[Any]"',
# 'has incompatible type "Dict[<nothing>, <nothing>]"; expected "Request"',
# 'Argument 1 to "render" has incompatible type "Dict[<nothing>, <nothing>]"; expected "HttpRequest"',
'Cannot infer type of lambda',
'"type" has no attribute',
]
``` |
{
"source": "jmolinski/synacor-challenge",
"score": 2
} |
#### File: synacor-challenge/python/vm_structs.py
```python
from collections import namedtuple
Token = namedtuple('Token', 'raw, value, stbyte, ndbyte')
OP_ARITY = {
0: 0, 1: 2, 2: 1, 3: 1, 4: 3, 5: 3,
6: 1, 7: 2, 8: 2, 9: 3, 10: 3, 11: 3,
12: 3, 13: 3, 14: 2, 15: 2, 16: 2,
17: 1, 18: 0, 19: 1, 20: 1, 21: 0
}
class TokenTree:
@classmethod
def from_bytes(clx, b):
return TokenTree(clx.tokenize(b))
@staticmethod
def parse_token(b):
value = int.from_bytes(b, byteorder='little')
stbyte, ndbyte = (value << 8) >> 8, (value >> 8)
return Token(b, value, stbyte, ndbyte)
@classmethod
def tokenize(clx, rawcode, token_len=2):
return [clx.parse_token(rawcode[i:i + token_len])
for i in range(0, len(rawcode), token_len)]
def __init__(self, tokens, start=0):
self.tokens = tokens
self.position = 0
def get_op_with_args(self):
op = self.take(1)[0]
args = self.take(OP_ARITY[op.value])
return op, args
def take(self, n=1):
r = self.tokens[self.position:self.position + n]
self.position += n
return r
def check(self, n=1):
return self.tokens[self.position:self.position + n]
def has_next(self):
return self.position != len(self.tokens)
def nth(self, n):
return self.tokens[n]
def jump_to(self, p):
self.position = p
def update_token(self, pos, val):
stbyte, ndbyte = (val << 8) >> 8, (val >> 8)
self.tokens[pos] = Token(b'', val, stbyte, ndbyte)
class Memory:
def __init__(self):
self.stack = []
self.registers = {}
``` |
{
"source": "jmolinski/traktpy",
"score": 2
} |
#### File: tests/inferfaces_tests/test_comments.py
```python
import pytest
from tests.test_data.comments import (
ATTACHED_SHOW,
COMMENT,
LIKED_USER,
TRENDING_COMMENTS,
)
from tests.test_data.movies import MOVIE1
from tests.test_data.shows import SHOW
from tests.utils import get_last_req, mk_mock_client
from trakt.core.exceptions import ArgumentError
from trakt.core.json_parser import parse_tree
from trakt.core.paths.response_structs import Comment, Sharing
PAG_H = {"X-Pagination-Page-Count": 1}
@pytest.fixture
def comments_client():
return mk_mock_client(
{
r".*comments/.*/item.*": [ATTACHED_SHOW, 200],
r".*comments/.*/likes.*": [[LIKED_USER], 200, PAG_H],
r".*comments/trending.*": [TRENDING_COMMENTS, 200, PAG_H],
r".*comments/recent.*": [TRENDING_COMMENTS, 200, PAG_H],
r".*comments/updates.*": [TRENDING_COMMENTS, 200, PAG_H],
}
)
def test_post_comment():
client = mk_mock_client({".*comments.*": [COMMENT, 201]})
text = "a b c d e f" # at least 5 words
sharing = Sharing(twitter=True)
comment = client.comments.post_comment(
item=123, comment=text, spoiler=True, sharing=sharing
)
assert comment.id == COMMENT["id"]
def test_get_comment():
client = mk_mock_client({".*comments.*": [COMMENT, 200]})
comment = parse_tree(COMMENT, Comment)
comment = client.comments.get_comment(id=comment)
assert comment.user.name == COMMENT["user"]["name"]
def test_update_comment():
client = mk_mock_client({".*comments.*": [COMMENT, 200]})
with pytest.raises(ArgumentError):
client.comments.update_comment(id=123, comment="a b")
comment = client.comments.update_comment(id=123, comment="a b c d e f")
assert comment.replies == COMMENT["replies"]
def test_delete_comment():
client = mk_mock_client({".*comments.*": [{}, 204]})
client.comments.delete_comment(id=123)
assert get_last_req(client.http)["method"] == "DELETE"
def test_get_replies():
client = mk_mock_client({".*comments.*": [[COMMENT], 200, PAG_H]})
comments = list(client.comments.get_replies(id=123))
assert comments[0].id == COMMENT["id"]
def test_post_reply():
client = mk_mock_client({".*comments.*": [COMMENT, 201]})
reply = client.comments.post_reply(id=123, comment="a b c d e f")
assert reply.id == COMMENT["id"]
def test_get_item(comments_client):
item = comments_client.comments.get_item(id=123)
assert item.type == "show"
assert item.show.ids.trakt == SHOW["ids"]["trakt"]
def test_get_users(comments_client):
likes = list(comments_client.comments.get_likes(id=123))
assert likes[0].user.username == LIKED_USER["user"]["username"]
def test_like_comment():
client = mk_mock_client({".*comments.*": [{}, 200]})
client.comments.like_comment(id=50)
assert get_last_req(client.http)["method"] == "POST"
def test_remove_like():
client = mk_mock_client({".*comments.*": [{}, 204]})
client.comments.remove_like(id=50)
assert get_last_req(client.http)["method"] == "DELETE"
def test_trending(comments_client):
comments = list(comments_client.comments.get_trending(include_replies=True))
assert comments[0].type == "movie"
def test_recently_created(comments_client):
comments = list(comments_client.comments.get_recently_created(sort="newest"))
assert comments[0].movie.title == MOVIE1["title"]
def test_recently_updated(comments_client):
comments = list(comments_client.comments.get_recently_updated(type="movies"))
assert comments[0].comment.id == TRENDING_COMMENTS[0]["comment"]["id"]
```
#### File: tests/inferfaces_tests/test_movies.py
```python
import pytest
from tests.test_data.comments import COMMENTS
from tests.test_data.lists import LIST
from tests.test_data.movies import (
ALIASES,
ANTICIPATED_MOVIES,
BOX_OFFICE,
EXTENDED_MOVIE,
MOVIE_STATS,
MOVIES,
PLAYED_MOVIES,
RATINGS,
RELATED_MOVIES,
RELEASES,
TRANSLATIONS,
TRENDING_MOVIES,
UPDATED_MOVIES,
)
from tests.test_data.people import MOVIE_ALL_PEOPLE
from tests.test_data.user import USER
from tests.utils import mk_mock_client
from trakt.core.exceptions import ArgumentError
@pytest.fixture
def movies_client():
PAG_H = {"X-Pagination-Page-Count": 1}
return mk_mock_client(
{
r".*movies/trending.*": [TRENDING_MOVIES, 200, PAG_H],
r".*movies/popular.*": [MOVIES, 200, PAG_H],
r".*movies/played.*": [PLAYED_MOVIES, 200, PAG_H],
r".*movies/watched.*": [PLAYED_MOVIES, 200, PAG_H],
r".*movies/collected.*": [PLAYED_MOVIES, 200, PAG_H],
r".*movies/anticipated.*": [ANTICIPATED_MOVIES, 200, PAG_H],
r".*movies/boxoffice.*": [BOX_OFFICE, 200],
r".*movies/updates.*": [UPDATED_MOVIES, 200, PAG_H],
r".*movies/.*/aliases.*": [ALIASES, 200],
r".*movies/.*/releases.*": [RELEASES, 200],
r".*movies/.*/translations.*": [TRANSLATIONS, 200],
r".*movies/.*/comments.*": [COMMENTS, 200, PAG_H],
r".*movies/.*/lists.*": [[LIST], 200, PAG_H],
r".*movies/.*/people.*": [MOVIE_ALL_PEOPLE, 200, PAG_H],
r".*movies/.*/ratings.*": [RATINGS, 200],
r".*movies/.*/related.*": [RELATED_MOVIES, 200, PAG_H],
r".*movies/.*/stats.*": [MOVIE_STATS, 200],
r".*movies/.*/watching.*": [[USER], 200],
}
)
def test_trending(movies_client):
movies = list(movies_client.movies.get_trending())
assert len(movies) == 2
assert movies[0].watchers == TRENDING_MOVIES[0]["watchers"]
def test_popular(movies_client):
movies = list(movies_client.movies.get_popular())
assert len(movies) == 2
assert movies[0].title == MOVIES[0]["title"]
def test_played(movies_client):
movies = list(movies_client.movies.get_most_played(countries="us", period="weekly"))
assert len(movies) == 2
assert movies[0].watcher_count == PLAYED_MOVIES[0]["watcher_count"]
with pytest.raises(ArgumentError):
movies_client.movies.get_most_played(countries="xyz")
with pytest.raises(ArgumentError):
movies_client.movies.get_most_played(period="xyz")
def test_watched(movies_client):
movies = list(movies_client.movies.get_most_watched(genres="de"))
assert movies[0].play_count == PLAYED_MOVIES[0]["play_count"]
def test_collected(movies_client):
movies = list(movies_client.movies.get_most_collected(period="all"))
assert movies[0].movie.title == PLAYED_MOVIES[0]["movie"]["title"]
def test_anticipated(movies_client):
movies = list(movies_client.movies.get_most_anticipated())
assert movies[0].list_count == ANTICIPATED_MOVIES[0]["list_count"]
def test_box_office(movies_client):
movies = list(movies_client.movies.get_box_office())
assert movies[0].revenue == BOX_OFFICE[0]["revenue"]
def test_updated(movies_client):
movies = list(movies_client.movies.get_recently_updated(start_date="2012-05-05"))
assert movies[0].movie.title == UPDATED_MOVIES[0]["movie"]["title"]
with pytest.raises(ArgumentError):
movies_client.movies.get_recently_updated(start_date="2012-14-5")
def test_summary():
client = mk_mock_client({".*movies.*": [EXTENDED_MOVIE, 200]})
movie = client.movies.get_summary(movie=123, extended=True)
assert movie.homepage == EXTENDED_MOVIE["homepage"]
def test_aliases(movies_client):
aliases = movies_client.movies.get_aliases(movie=123)
assert aliases[0].title == ALIASES[0]["title"]
def test_releases(movies_client):
releases = movies_client.movies.get_releases(movie=123, country="us")
assert releases[0].release_type == RELEASES[0]["release_type"]
def test_translations(movies_client):
translations = movies_client.movies.get_translations(movie=123, language="de")
assert translations[0].title == TRANSLATIONS[0]["title"]
def test_comments(movies_client):
with pytest.raises(ArgumentError):
movies_client.movies.get_comments(movie=123, sort="xtz")
comments = list(movies_client.movies.get_comments(movie=123))
assert comments[0].comment == COMMENTS[0]["comment"]
def test_lists(movies_client):
lists = list(movies_client.movies.get_lists(movie=123))
assert lists[0].comment_count == LIST["comment_count"]
def test_get_people(movies_client):
people = movies_client.movies.get_people(movie=123)
assert people.cast[0].character == MOVIE_ALL_PEOPLE["cast"][0]["character"]
def test_ratings(movies_client):
ratings = movies_client.movies.get_ratings(movie=123)
assert ratings.rating == RATINGS["rating"]
def test_related(movies_client):
related = list(movies_client.movies.get_related(movie=123))
assert related[0].title == RELATED_MOVIES[0]["title"]
def test_stats(movies_client):
stats = movies_client.movies.get_stats(movie=123)
assert stats.watchers == MOVIE_STATS["watchers"]
def test_watching(movies_client):
watching = list(movies_client.movies.get_users_watching(movie=123))
assert watching[0].name == USER["name"]
```
#### File: tests/inferfaces_tests/test_people.py
```python
import pytest
from tests.test_data.lists import LIST
from tests.test_data.people import MOVIE_CREDITS, PERSON, SHOW_CREDITS
from tests.utils import mk_mock_client
from trakt.core.exceptions import ArgumentError
from trakt.core.json_parser import parse_tree
from trakt.core.models import Person
def test_get_person():
client = mk_mock_client({r".*people.*": [PERSON, 200]})
person = parse_tree(PERSON, Person)
with pytest.raises(ArgumentError):
client.people.get_person(person=0.5)
assert client.people.get_person(person=person.ids.trakt).name == PERSON["name"]
assert client.people.get_person(person=person).name == PERSON["name"]
def test_get_movie_credits():
client = mk_mock_client({r".*people.*": [MOVIE_CREDITS, 200]})
credits = client.people.get_movie_credits(person=123)
assert credits.cast[0].character == MOVIE_CREDITS["cast"][0]["character"]
def test_get_show_credits():
client = mk_mock_client({r".*people.*": [SHOW_CREDITS, 200]})
credits = client.people.get_show_credits(person=123)
expected = SHOW_CREDITS["crew"]["production"][0]["job"]
assert credits.crew.production[0].job == expected
def test_get_lists():
client = mk_mock_client({r".*people.*": [[LIST], 200]})
lists = list(client.people.get_lists(person=123))
assert len(lists) == 1
assert lists[0].name == LIST["name"]
```
#### File: tests/inferfaces_tests/test_scrobble.py
```python
import pytest
from tests.test_data.scrobble import EPISODE, MOVIE1, RESP_EPISODE, RESP_MOVIE, SHOW
from tests.utils import mk_mock_client
from trakt.core.exceptions import ArgumentError
from trakt.core.json_parser import parse_tree
from trakt.core.models import Episode, Movie, Show
def test_start_scrobble_movie():
client = mk_mock_client({".*scrobble.*": [RESP_MOVIE, 201]})
movie = parse_tree(MOVIE1, Movie)
episode = parse_tree(EPISODE, Episode)
with pytest.raises(ArgumentError):
client.scrobble.start_scrobble(progress=5)
with pytest.raises(ArgumentError):
client.scrobble.start_scrobble(progress=5, episode=episode, movie=movie)
resp = client.scrobble.start_scrobble(movie=movie, progress=5)
assert resp.movie.title == MOVIE1["title"]
def test_start_scrobble_episode():
client = mk_mock_client({".*scrobble.*": [RESP_EPISODE, 201]})
episode = parse_tree(EPISODE, Episode)
show = parse_tree(SHOW, Show)
resp = client.scrobble.start_scrobble(episode=episode, show=show, progress=5)
assert resp.show.title == SHOW["title"]
def test_pause_scrobble_movie():
client = mk_mock_client({".*scrobble.*": [RESP_MOVIE, 201]})
movie = parse_tree(MOVIE1, Movie)
episode = parse_tree(EPISODE, Episode)
with pytest.raises(ArgumentError):
client.scrobble.pause_scrobble(progress=5)
with pytest.raises(ArgumentError):
client.scrobble.pause_scrobble(progress=5, episode=episode, movie=movie)
resp = client.scrobble.pause_scrobble(movie=movie, progress=5)
assert resp.movie.title == MOVIE1["title"]
def test_pause_scrobble_episode():
client = mk_mock_client({".*scrobble.*": [RESP_EPISODE, 201]})
resp = client.scrobble.pause_scrobble(episode=123, show=123, progress=5)
assert resp.show.title == SHOW["title"]
def test_stop_scrobble_movie():
client = mk_mock_client({".*scrobble.*": [RESP_MOVIE, 201]})
with pytest.raises(ArgumentError):
client.scrobble.stop_scrobble(progress=5)
with pytest.raises(ArgumentError):
client.scrobble.stop_scrobble(progress=5, episode=123, movie=123)
resp = client.scrobble.stop_scrobble(movie=123, progress=5)
assert resp.movie.title == MOVIE1["title"]
def test_stop_scrobble_episode():
client = mk_mock_client({".*scrobble.*": [RESP_EPISODE, 201]})
resp = client.scrobble.stop_scrobble(episode=123, show=123, progress=5)
assert resp.show.title == SHOW["title"]
```
#### File: core/components/cache.py
```python
from __future__ import annotations
from datetime import datetime, timedelta
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict
if TYPE_CHECKING: # pragma: no cover
from trakt.api import TraktApi
class CacheLevel(Enum):
NO = "no"
BASIC = "basic"
FULL = "full"
class CacheManager:
client: TraktApi
_cache: Dict[FrozenRequest, datetime]
CACHE_LEVELS = (CacheLevel.NO, CacheLevel.BASIC, CacheLevel.FULL)
def __init__(self, client: TraktApi) -> None:
self.client = client
self._cache = {}
def accepted_level(self, level: CacheLevel) -> bool:
max_allowed = CacheLevel(self.client.config["cache"]["cache_level"])
if level == CacheLevel.NO:
return False
elif level == CacheLevel.BASIC:
return max_allowed in {CacheLevel.BASIC, CacheLevel.FULL}
else: # "full"
return max_allowed == CacheLevel.FULL
def get(self, wanted: FrozenRequest) -> FrozenRequest:
if not self.has(wanted):
raise LookupError("Request not in cache")
return [r for r in self._cache.keys() if r == wanted][0]
def set(self, req: FrozenRequest) -> None:
cache_timeout = self.client.config["cache"]["timeout"]
valid_till = datetime.now() + timedelta(seconds=cache_timeout)
self._cache[req] = valid_till
def has(self, req: FrozenRequest) -> bool:
if req not in self._cache:
return False
valid_till = self._cache[req]
if datetime.now() > valid_till:
del self._cache[req]
return False
return True
class FrozenRequest:
def __init__(
self,
path: str,
query_args: Dict[str, str],
headers: Dict[str, str],
response: Any = None,
) -> None:
self.path = path
self.query_args = query_args
self.headers = headers
self.response = response
@property
def _unique_id(self) -> str:
qargs_repr = repr(sorted(self.query_args.items()))
headers_repr = repr(sorted(self.headers.items()))
return self.path + qargs_repr + headers_repr
def __hash__(self):
return hash(self._unique_id)
def __eq__(self, other: Any) -> bool:
if isinstance(other, FrozenRequest):
return self._unique_id == other._unique_id
return False # pragma: no cover
```
#### File: paths/endpoint_mappings/comments.py
```python
from __future__ import annotations
from dataclasses import asdict
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from trakt.core.models import Episode, Movie, Season, Show
from trakt.core.paths.path import Path
from trakt.core.paths.response_structs import (
Comment,
CommentAndItem,
CommentItemOnly,
CommentLiker,
CommentResponse,
Sharing,
)
from trakt.core.paths.suite_interface import SuiteInterface
from trakt.core.paths.validators import (
AuthRequiredValidator,
PerArgValidator,
Validator,
)
if TYPE_CHECKING: # pragma: no cover
from trakt.core.executors import PaginationIterator
COMMENT_TEXT_VALIDATOR = PerArgValidator(
"comment", lambda c: isinstance(c, str) and len(c.split(" ")) > 4
)
COMMENT_ID_VALIDATOR = PerArgValidator("id", lambda c: isinstance(c, int))
COMMENT_TYPES = ["all", "reviews", "shouts"]
MEDIA_TYPES = ["all", "movies", "shows", "seasons", "episodes", "lists"]
TRENDING_RECENT_UPDATED_VALIDATORS: List[Validator] = [
PerArgValidator("comment_type", lambda c: c in COMMENT_TYPES),
PerArgValidator("type", lambda c: c in MEDIA_TYPES),
PerArgValidator("include_replies", lambda i: isinstance(i, bool)),
]
class CommentsI(SuiteInterface):
name = "comments"
paths = {
"post_comment": Path(
"comments",
CommentResponse,
methods=["POST"],
validators=[AuthRequiredValidator(), COMMENT_TEXT_VALIDATOR],
),
"get_comment": Path("comments/!id", Comment, validators=[COMMENT_ID_VALIDATOR]),
"update_comment": Path(
"comments/!id",
Comment,
methods="PUT",
validators=[COMMENT_ID_VALIDATOR, COMMENT_TEXT_VALIDATOR],
),
"delete_comment": Path(
"comments/!id", {}, methods="DELETE", validators=[COMMENT_ID_VALIDATOR]
),
"get_replies": Path(
"comments/!id/replies",
[Comment],
validators=[COMMENT_ID_VALIDATOR],
pagination=True,
),
"post_reply": Path(
"comments/!id/replies",
Comment,
validators=[
AuthRequiredValidator(),
COMMENT_ID_VALIDATOR,
COMMENT_TEXT_VALIDATOR,
],
),
"get_item": Path(
"comments/!id/item",
CommentItemOnly,
validators=[COMMENT_ID_VALIDATOR],
extended=["full"],
),
"get_likes": Path(
"comments/!id/likes",
[CommentLiker],
validators=[COMMENT_ID_VALIDATOR],
pagination=True,
),
"like_comment": Path(
"comments/!id/like",
{},
methods=["POST"],
validators=[AuthRequiredValidator(), COMMENT_ID_VALIDATOR],
),
"remove_like": Path(
"comments/!id/like",
{},
methods=["DELETE"],
validators=[AuthRequiredValidator(), COMMENT_ID_VALIDATOR],
),
"get_trending": Path(
"comments/trending/?comment_type/?type",
[CommentAndItem],
validators=TRENDING_RECENT_UPDATED_VALIDATORS,
pagination=True,
extended=["full"],
),
"get_recently_created": Path(
"comments/recent/?comment_type/?type",
[CommentAndItem],
validators=TRENDING_RECENT_UPDATED_VALIDATORS,
pagination=True,
extended=["full"],
),
"get_recently_updated": Path(
"comments/updates/?comment_type/?type",
[CommentAndItem],
validators=TRENDING_RECENT_UPDATED_VALIDATORS,
pagination=True,
extended=["full"],
),
}
def post_comment(
self,
*,
item: Union[str, int, Movie, Season, Show, Episode],
comment: str,
spoiler: bool = False,
sharing: Optional[Union[Sharing, Dict[str, bool]]] = None,
**kwargs
) -> CommentResponse:
body: Dict[str, Union[str, int, Dict[str, bool]]] = {
"item_id": self._generic_get_id(item),
"comment": comment,
"spoiler": spoiler,
}
if sharing:
if isinstance(sharing, Sharing):
sharing = asdict(sharing)
body["sharing"] = sharing
return self.run("post_comment", **kwargs, body=body, comment=comment)
def get_comment(self, *, id: Union[Comment, str, int], **kwargs) -> Comment:
id = int(self._generic_get_id(id))
return self.run("get_comment", **kwargs, id=id)
def update_comment(
self,
*,
id: Union[Comment, str, int],
comment: str,
spoiler: bool = False,
**kwargs
) -> Comment:
body = {"id": self._generic_get_id(id), "comment": comment, "spoiler": spoiler}
return self.run("update_comment", **kwargs, body=body, id=id, comment=comment)
def delete_comment(self, *, id: Union[Comment, str, int], **kwargs) -> None:
id = int(self._generic_get_id(id))
self.run("delete_comment", **kwargs, id=id)
def get_replies(
self, *, id: Union[Comment, str, int], **kwargs
) -> PaginationIterator[Comment]:
id = int(self._generic_get_id(id))
return self.run("get_replies", **kwargs, id=id)
def post_reply(
self,
*,
id: Union[Comment, str, int],
comment: str,
spoiler: bool = False,
**kwargs
) -> PaginationIterator[Comment]:
id = int(self._generic_get_id(id))
body = {"comment": comment, "spoiler": spoiler}
return self.run("post_reply", **kwargs, id=id, body=body, comment=comment)
def get_item(self, *, id: Union[Comment, str, int], **kwargs) -> CommentItemOnly:
id = int(self._generic_get_id(id))
return self.run("get_item", **kwargs, id=id)
def get_likes(
self, *, id: Union[Comment, str, int], **kwargs
) -> List[CommentLiker]:
id = int(self._generic_get_id(id))
return self.run("get_likes", **kwargs, id=id)
def like_comment(self, *, id: Union[Comment, str, int], **kwargs) -> None:
id = int(self._generic_get_id(id))
self.run("like_comment", **kwargs, id=id)
def remove_like(self, *, id: Union[Comment, str, int], **kwargs) -> None:
id = int(self._generic_get_id(id))
self.run("remove_like", **kwargs, id=id)
def get_trending(
self,
*,
comment_type: str = "all",
type: str = "all",
include_replies: bool = False,
**kwargs
) -> List[CommentAndItem]:
return self.run(
"get_trending",
**kwargs,
comment_type=comment_type,
type=type,
include_replies=include_replies
)
def get_recently_created(
self,
*,
comment_type: str = "all",
type: str = "all",
include_replies: bool = False,
**kwargs
) -> List[CommentAndItem]:
return self.run(
"get_recently_created",
**kwargs,
comment_type=comment_type,
type=type,
include_replies=include_replies
)
def get_recently_updated(
self,
*,
comment_type: str = "all",
type: str = "all",
include_replies: bool = False,
**kwargs
) -> List[CommentAndItem]:
return self.run(
"get_recently_updated",
**kwargs,
comment_type=comment_type,
type=type,
include_replies=include_replies
)
```
#### File: paths/endpoint_mappings/scrobble.py
```python
from typing import Any, Dict, Optional, Union
from trakt.core.exceptions import ArgumentError
from trakt.core.models import Episode, Movie
from trakt.core.paths.path import Path
from trakt.core.paths.response_structs import EpisodeScrobble, MovieScrobble, Show
from trakt.core.paths.suite_interface import SuiteInterface
from trakt.core.paths.validators import AuthRequiredValidator, PerArgValidator
PROGRESS_VALIDATOR = PerArgValidator(
"progress", lambda p: isinstance(p, (int, float)) and 100 >= p >= 0
)
class ScrobbleI(SuiteInterface):
name = "scrobble"
base_paths = {
"start_scrobble_movie": ["start", MovieScrobble],
"start_scrobble_episode": ["start", EpisodeScrobble],
"pause_scrobble_movie": ["pause", MovieScrobble],
"pause_scrobble_episode": ["pause", EpisodeScrobble],
"stop_scrobble_movie": ["stop", MovieScrobble],
"stop_scrobble_episode": ["stop", EpisodeScrobble],
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for k, r in self.base_paths.items():
self.paths[k] = self._make_path(*r)
def _make_path(self, resource_path: str, return_type: Any) -> Path:
return Path(
self.name + "/" + resource_path,
return_type,
validators=[AuthRequiredValidator(), PROGRESS_VALIDATOR],
)
def start_scrobble(
self,
*,
movie: Optional[Union[Movie, Dict[str, Any]]] = None,
episode: Optional[Union[Episode, Dict[str, Any]]] = None,
show: Optional[Union[Show, Dict[str, Any]]] = None,
progress: float,
**kwargs: Any,
) -> Union[MovieScrobble, EpisodeScrobble]:
if movie and episode:
raise ArgumentError("you must provide exactly one of: [episode, movie]")
if movie:
return self.start_scrobble_movie(movie=movie, progress=progress, **kwargs)
elif episode:
return self.start_scrobble_episode(
episode=episode, show=show, progress=progress, **kwargs
)
else:
raise ArgumentError("you must provide exactly one of: [episode, movie]")
def start_scrobble_movie(
self, *, movie: Union[Movie, Dict[str, Any]], progress: float, **kwargs
) -> MovieScrobble:
data = self._prepare_movie_data(movie=movie, progress=progress)
return self.run("start_scrobble_movie", **kwargs, body=data, progress=progress)
def start_scrobble_episode(
self, *, episode: Union[Episode, Dict[str, Any]], progress: float, **kwargs
) -> EpisodeScrobble:
data = self._prepare_episode_data(episode, progress, show=kwargs.get("show"))
return self.run(
"start_scrobble_episode", **kwargs, body=data, progress=progress
)
def pause_scrobble(
self,
*,
movie: Optional[Union[Movie, Dict[str, Any]]] = None,
episode: Optional[Union[Episode, Dict[str, Any]]] = None,
show: Optional[Union[Show, Dict[str, Any]]] = None,
**kwargs: Any,
) -> Union[MovieScrobble, EpisodeScrobble]:
if movie and episode:
raise ArgumentError("you must provide exactly one of: [episode, movie]")
if movie:
return self.pause_scrobble_movie(movie=movie, **kwargs)
elif episode:
return self.pause_scrobble_episode(episode=episode, show=show, **kwargs)
else:
raise ArgumentError("you must provide exactly one of: [episode, movie]")
def pause_scrobble_movie(
self, *, movie: Union[Movie, Dict[str, Any]], progress: float, **kwargs
) -> MovieScrobble:
data = self._prepare_movie_data(movie=movie, progress=progress)
return self.run("pause_scrobble_movie", **kwargs, body=data, progress=progress)
def pause_scrobble_episode(
self, *, episode: Union[Episode, Dict[str, Any]], progress: float, **kwargs
) -> EpisodeScrobble:
data = self._prepare_episode_data(episode, progress, show=kwargs.get("show"))
return self.run(
"pause_scrobble_episode", **kwargs, body=data, progress=progress
)
def stop_scrobble(
self,
*,
movie: Optional[Union[Movie, Dict[str, Any]]] = None,
episode: Optional[Union[Episode, Dict[str, Any]]] = None,
show: Optional[Union[Show, Dict[str, Any]]] = None,
**kwargs: Any,
) -> Union[MovieScrobble, EpisodeScrobble]:
if movie and episode:
raise ArgumentError("you can either provide episode or movie, not both")
if movie:
return self.stop_scrobble_movie(movie=movie, **kwargs)
elif episode:
return self.stop_scrobble_episode(episode=episode, show=show, **kwargs)
else:
raise ArgumentError("missing both episode and movie arguments")
def stop_scrobble_movie(
self, *, movie: Union[Movie, Dict[str, Any]], progress: float, **kwargs
) -> MovieScrobble:
data = self._prepare_movie_data(movie=movie, progress=progress)
return self.run("stop_scrobble_movie", **kwargs, body=data, progress=progress)
def stop_scrobble_episode(
self, *, episode: Union[Episode, Dict[str, Any]], progress: float, **kwargs
) -> EpisodeScrobble:
data = self._prepare_episode_data(episode, progress, show=kwargs.get("show"))
return self.run("stop_scrobble_episode", **kwargs, body=data, progress=progress)
def _prepare_episode_data(
self,
episode: Union[Episode, Dict[str, Any]],
progress: float,
show: Optional[Union[Show, int, str]] = None,
) -> Dict[str, Any]:
data: Dict[str, Any] = {"progress": progress}
if isinstance(episode, Episode):
episode = {"ids": {"trakt": self._generic_get_id(episode)}}
data["episode"] = episode
if show:
if isinstance(show, Show):
data["show"] = {"ids": {"trakt": self._generic_get_id(show)}}
else:
data["show"] = show
return data
def _prepare_movie_data(
self, progress: float, movie: Union[Movie, Dict[str, Any]]
) -> Dict[str, Any]:
data: Dict[str, Any] = {"progress": progress}
if isinstance(movie, Movie):
data["movie"] = {"ids": {"trakt": self._generic_get_id(movie)}}
else:
data["movie"] = movie
return data
``` |
{
"source": "jmolinski/youtube-rss",
"score": 2
} |
#### File: platforma/platforma/send_draft.py
```python
import io
import subprocess
import xmlrpc
import xmlrpc.client
from datetime import datetime
from typing import Optional
from django.conf import settings
from django.utils.text import slugify
import requests
from PIL import Image
from platforma.platforma import models
from platforma.platforma.services import get_file_data
def send_drafts():
if not settings.DRAFTS_ENABLED:
return
# posortuj tak zeby najstarsze lecialy najpierw
eps = models.Episode.objects.filter(draft_posted=False)
eps = [e for e in eps if e.is_visible()]
eps_dates = [(e, get_file_data(e.get_filename())["upload_date"]) for e in eps]
eps = [e[0] for e in sorted(eps_dates, key=lambda x: x[1])]
for ep in eps:
if ep.is_visible():
send_draft(ep)
def image_to_byte_array(image, format="jpeg"):
byte_stream = io.BytesIO()
image.save(byte_stream, format=format)
return byte_stream.getvalue()
def get_cropped_image_bytes(url):
r = requests.get(url, stream=True)
if not r.status_code == 200:
return
try:
image = Image.open(io.BytesIO(r.content))
w, h = image.size
w_ratio = w / 846
resized = image.resize((int(w // w_ratio), int(h // w_ratio)))
cropped = resized.crop((0, 0, 846, 256)) # moze sie wywalic
return image_to_byte_array(cropped)
except Exception as e:
print("Failed to crop thumbnail")
print("Error datails:", str(e), repr(e))
return
def upload_thumbnail(thumbnail_url, episode, client):
if not thumbnail_url:
print("No thumbnail data for episode", episode.youtube_id)
return
thumbnail_url = thumbnail_url.replace("hqdefault.jpg", "maxresdefault.jpg")
cropped_img = get_cropped_image_bytes(thumbnail_url)
if cropped_img:
try:
ret = client.wp.uploadFile(
0,
settings.NR_USERNAME,
settings.NR_PASSWD,
{
"name": episode.youtube_id + "_img.jpg",
"type": "image/jpeg",
"bits": xmlrpc.client.Binary(cropped_img),
},
)
print("Uploaded thumbnail for", episode.youtube_id)
return ret
except:
print("Failed to upload thumbnail for", episode.youtube_id)
def upload_mp3_file_to_remote_server(episode, ep_data):
date = f"{ep_data['day']:02d}_{ep_data['month']:02d}_{ep_data['year']:04}"
slugified_title = slugify(ep_data["title"])
remote_name = f"{slugified_title}-{date}-nr-yt-{episode.youtube_id}.mp3"
episode.remote_filename = remote_name
episode.save()
try:
ret = subprocess.call(
[
"scp",
"-o",
"StrictHostKeyChecking=no",
f"/app/shared/media/{episode.get_filename()}",
f"<EMAIL>:~/audycje/automat/{remote_name}",
],
shell=False,
)
if ret != 0:
raise ValueError(
f"Error: scp returncode={ret} for episode {episode.get_filename()}"
)
return ret
except:
print("Error uploading file", episode.get_filename(), "to remote server")
raise
def prepare_wordpress_post_content(episode, ep_data, img_data):
date = f"{ep_data['day']:02d}.{ep_data['month']:02d}.{ep_data['year']:04}"
original_title = ep_data["title"]
current_time = datetime.today().strftime("%Y-%m-%d-%H:%M:%S")
original_description = ep_data["description"].split("Donejty na")[
0
] # cut the footer about tipanddonation
padded_description = (
original_description.strip() if original_description.strip() else ""
)
if not padded_description:
padded_description = "<!-- brak opisu na youtube (tylko stopka o donejtach) -->"
description = (
"<!--Tekst w takich smiesznych nawiasach NIE JEST WIDOCZNY dla sluchaczy-->\n\n"
"<!--Oryginalny opis audycji na YT-->\n"
f"{padded_description}\n\n"
"<!--Data publikacji na YT-->\n"
f"Data publikacji na YT: {date}\n"
"<!--Link do audycji na YT-->\n"
f'YouTube: <a href="https://www.youtube.com/watch?v={episode.youtube_id}">klik</a>\n'
"\n\n"
f"<!--Tagi z youtube: {str(ep_data.get('tags', []) or 'BRAK TAGOW')}-->\n"
"<!--Przed publikacja audycji zaznacz kategorie (np. Luzne Gatki) "
"i sprawdz, czy automatycznie wygenerowana miniaturka jest akceptowalna-->\n\n"
"<!--Prosze NIE usuwac identyfikatora, jest uzywany przez bota-->\n"
f"<!--ID###{episode.youtube_id} nr-yt==v0.0.3 {current_time}###-->\n"
)
content = {
"post_type": "post",
"post_title": original_title,
# "post_excerpt": "test_excerpt",
"post_content": description,
"post_format": "standard",
"post_status": "publish",
"comment_status": "open",
"custom_fields": [
{
"key": "enclosure",
"value": f"https://archiwum.nocneradio.pl/automat/{episode.remote_filename}\n\naudio/mpeg",
},
],
}
if img_data and "attachment_id" in img_data:
content["post_thumbnail"] = int(img_data["attachment_id"])
return content
def send_new_post_to_wordpress(episode, client, ep_data, img_data):
try:
content = prepare_wordpress_post_content(episode, ep_data, img_data)
except Exception as e:
print("Problem preparing post data for draft", episode.youtube_id)
print("Error datails:", str(e), repr(e))
return
try:
client.wp.newPost(0, settings.NR_USERNAME, settings.NR_PASSWD, content)
except Exception as e:
print("Problem submitting draft post for", episode.youtube_id)
print("Error datails:", str(e), repr(e))
else:
print("Submitted draft for", episode.youtube_id)
episode.draft_posted = True
episode.save()
def send_draft(episode: models.Episode):
print("Submitting draft", episode.youtube_id)
if episode.hidden or episode.draft_posted:
print("Aborting sending draft for", episode.youtube_id)
episode.hidden = True
episode.save()
client = xmlrpc.client.ServerProxy("http://nocneradio.pl/xmlrpc.php")
ep_data = get_file_data(episode.get_filename())
try:
img_data: Optional[dict] = upload_thumbnail(
ep_data["thumbnail"], episode, client
)
upload_mp3_file_to_remote_server(episode, ep_data)
send_new_post_to_wordpress(episode, client, ep_data, img_data)
except Exception as e:
print("Problem submitting draft post for", episode.youtube_id)
print("Error datails:", str(e), repr(e))
finally:
episode.hidden = False
episode.save()
```
#### File: platforma/platforma/services.py
```python
import json
import os
from django.conf import settings
from platforma.platforma import models
from django.shortcuts import get_object_or_404
def audio_format_to_mime(fmt):
if fmt == "mp3":
return "audio/mpeg"
return "audio/" + fmt
def get_saved_only_ids():
for root, dirs, files in os.walk("/app/shared/media/"):
for file in files:
if file.count(".") == 1 and "download" not in file:
yield file.split(".")[0]
def get_file_data(filename):
video_id, ext = filename.split(".")
path_vid, path_info = (
os.path.join("/app/shared/media", filename),
os.path.join("/app/shared/media", video_id + ".info.json"),
)
with open(path_info) as info_f:
parsed_info = json.loads(info_f.read())
x = parsed_info["upload_date"]
yr, mnth, day = map(int, [x[:4], x[4:6], x[6:]])
raw_thumbnail = parsed_info.get("thumbnails", [{"url": "https://example.com"}])[0][
"url"
]
thumbnail = (
raw_thumbnail
if ".jpg" not in raw_thumbnail
else (raw_thumbnail.split(".jpg")[0] + ".jpg")
)
thumbnail = thumbnail.replace("hqdefault.jpg", "maxresdefault.jpg")
return {
"media_url": settings.NR_FEED_DOMAIN + "feeds/media/" + filename,
"size": os.path.getsize(path_vid),
"id": video_id,
"title": parsed_info["title"],
"upload_date": parsed_info["upload_date"],
"url": parsed_info["webpage_url"],
"filesize_raw": parsed_info["filesize"],
"filltitle": parsed_info["fulltitle"],
"channel_id": parsed_info["channel_id"],
"description": parsed_info["description"],
"thumbnail": thumbnail,
"extension": ext,
"duration": parsed_info["duration"],
"sortby": (parsed_info["upload_date"], parsed_info["duration"]),
"year": yr,
"month": mnth,
"day": day,
"tags": parsed_info.get("tags", []),
}
def remove_file_and_mark_as_to_download(youtube_id: str) -> bool:
episode = get_object_or_404(models.Episode, youtube_id=youtube_id)
if not episode or not episode.file_downloaded:
return False
remove_episode_files(episode)
episode.file_downloaded = False
episode.draft_posted = False
episode.save()
return True
def remove_episode_files(episode: models.Episode) -> None:
vid_path = os.path.join("/app/shared/media/", episode.youtube_id + ".mp3")
metadata_path = os.path.join(
"/app/shared/media/", episode.youtube_id + ".info.json"
)
print("Removing video:", vid_path, metadata_path)
os.remove(vid_path)
os.remove(metadata_path)
``` |
{
"source": "jmollard/typhon",
"score": 2
} |
#### File: arts/workspace/workspace.py
```python
import ctypes as c
import numpy as np
import ast
from ast import iter_child_nodes, parse, NodeVisitor, Call, Attribute, Name, \
Expression, Expr, FunctionDef, Starred, Module, expr
from inspect import getsource, getclosurevars
from contextlib import contextmanager
from copy import copy
from functools import wraps
import os
from typhon.arts.workspace.api import arts_api, VariableValueStruct, \
data_path_push, data_path_pop, \
include_path_push, include_path_pop
from typhon.arts.workspace.methods import WorkspaceMethod, workspace_methods
from typhon.arts.workspace.variables import WorkspaceVariable, group_names, group_ids, \
workspace_variables
from typhon.arts.workspace.agendas import Agenda
from typhon.arts.workspace import variables as V
from typhon.arts.workspace.output import CoutCapture
from typhon.arts.workspace.utility import unindent
imports = dict()
################################################################################
# ARTS Agenda Macro
################################################################################
class Include:
"""Simple helper class to handle INCLUDE statements in agenda definitions.
Attributes:
agenda: The included controlfile or agenda as
typhon.arts.workspace.agenda.Agenda object.
"""
def __init__(self, agenda):
""" Create include from argument.
Args:
agenda (str, Agenda): Argument to the INCLUDE statement. This can
either be a string or an Agenda object.
"""
if type(agenda) == str:
if not agenda in imports:
self.agenda = Agenda.parse(agenda)
imports[agenda] = self.agenda
else:
self.agenda = imports[agenda]
elif type(agenda) == Agenda:
self.agenda = agenda
else:
raise Exception("agenda argument must be either a controlfile"
" name or a typhon.arts.workspace.agenda.Agenda object.")
def arts_agenda(func):
"""
Parse python method as ARTS agenda
This decorator can be used to define ARTS agendas using python function syntax.
The function should have one arguments which is assumed to be a Workspace instance.
All expressions inside the function must be calls to ARTS WSMs. The result is an
Agenda object that can be used to copied into a named ARTS agenda
Example:
>>> @arts_agenda
>>> def inversion_iterate_agenda(ws):
>>> ws.x2artsStandard()
>>> ws.atmfields_checkedCalc()
>>> ws.atmgeom_checkedCalc()
>>> ws.yCalc()
>>> ws.VectorAddVector(ws.yf, ws.y, ws.y_baseline)
>>> ws.jacobianAdjustAfterIteration()
>>>
>>> ws.Copy(ws.inversion_iterate_agenda, inversion_iterate_agenda)
"""
source = getsource(func)
source = unindent(source)
ast = parse(source)
func_ast = ast.body[0]
if not type(func_ast) == FunctionDef:
raise Exception("ARTS agenda definition can only decorate function definiitons.")
args = func_ast.args.args
try:
arg_name = func_ast.args.args[0].arg
except:
raise Exception("Agenda definition needs workspace arguments.")
ws = Workspace()
context = copy(func.__globals__)
context.update({arg_name : ws})
# Add resolved non-local variables from closure.
nls, _, _, _ = getclosurevars(func)
context.update(nls)
#
# Helper functions
#
callback_body = []
def callback_make_fun(body):
"""
Helper function that creates a wrapper function around
python code to be executed withing an ARTS agenda.
"""
m = Module(body)
def callback(ptr):
try:
context[arg_name].ptr = ptr
eval(compile(m , "<unknown>", 'exec'), context)
except Exception as e:
print(r"Exception in Python callback:\n", e)
context[arg_name].ptr = None
callback_body = []
return callback
def eval_argument(expr):
"""
Evaluate argument of workspace method call.
"""
if not hasattr(expr, "lineno"):
setattr(expr, "lineno", 0)
return eval(compile(Expression(expr), "<unknown>", 'eval'), context)
# Create agenda
a_ptr = arts_api.create_agenda(func.__name__.encode())
agenda = Agenda(a_ptr)
illegal_statement_exception = Exception(
"Agenda definitions may only contain calls to WSMs of the"
"workspace argument " + arg_name + " or INCLUDE statements.")
#
# Here the body of the function definition is traversed. Cases
# that are treated specieal are INCLUDE statements and calls
# of workspace methods. Remaining statements are accumulated
# in callback_body and then added to the agenda as a single callback.
#
for e in func_ast.body:
if not isinstance(e, Expr):
callback_body += [e]
continue
else:
call = e.value
if not isinstance(call, Call):
callback_body += [e]
continue
# Include statement
if type(call.func) == Name:
if not call.func.id == "INCLUDE":
callback_body += [e]
else:
args = []
for a in call.args:
args.append(eval_argument(a))
include = Include(*args)
if len(callback_body) > 0:
agenda.add_callback(callback_make_fun(callback_body))
callback_body = []
arts_api.agenda_append(agenda.ptr, include.agenda.ptr)
else:
att = call.func.value
if not att.id == arg_name:
callback_body += [e]
continue
# Extract method name.
name = call.func.attr
# m is not a workspace method
if not name in workspace_methods:
callback_body += [e]
continue
# m is a workspace method.
m = workspace_methods[name]
args = [ws, m]
for a in call.args:
# Handle starred expression
if type(a) == Starred:
bs = eval_argument(a.value)
for b in bs:
args.append(b)
continue
args.append(eval_argument(a))
# Extract keyword arguments
kwargs = dict()
for k in call.keywords:
kwargs[k.arg] = eval(
compile(Expression(k.value), "<unknown>", 'eval'),
context)
# Add function to agenda
if len(callback_body) > 0:
agenda.add_callback(callback_make_fun(callback_body))
callback_body = []
agenda.add_method(*args, **kwargs)
# Check if there's callback code left to add to the agenda.
if len(callback_body) > 0:
agenda.add_callback(callback_make_fun(callback_body))
callback_body = []
return agenda
################################################################################
# Workspace Method Wrapper Class
################################################################################
class WSMCall:
"""
Wrapper class for workspace methods. This is necessary to be able to print
the method doc as __repr__, which doesn't work for python function objects.
Attributes:
ws: The workspace object to which the method belongs.
m: The WorkspaceMethod object
"""
def __init__(self, ws, m):
self.ws = ws
self.m = m
self.__doc__ = m.__doc__
def __call__(self, *args, **kwargs):
self.m.call(self.ws, *args, **kwargs)
def __repr__(self):
return repr(self.m)
################################################################################
# The Workspace Class
################################################################################
class Workspace:
"""
The Workspace class represents an ongoing ARTS simulation. Each Workspace object
holds its own ARTS workspace and can be used to execute ARTS workspace methods or
access workspace variables.
All workspace methods taken from workspace_methods in the methods module are added
as attributed on creation and are thus available as class methods.
Attributes:
ptr(ctypes.c_void_p): object pointing to the ArtsWorkspace instance of the
ARTS C API
_vars(dict): Dictionary holding local variables that have been created
interactively using the one of Create ARTS WSMs.
"""
def __init__(self, verbosity=1, agenda_verbosity=0):
"""
The init function just creates an instance of the ArtsWorkspace class of the
C API and sets the ptr attributed to the returned handle.
It also adds all workspace methods as attributes to the object.
Parameters:
verbosity (int): Verbosity level (0-3), 1 by default
agenda_verbosity (int): Verbosity level for agendas (0-3),
0 by default
"""
self.__dict__["_vars"] = dict()
self.ptr = arts_api.create_workspace(verbosity, agenda_verbosity)
self.workspace_size = arts_api.get_number_of_variables()
for name in workspace_methods:
m = workspace_methods[name]
setattr(self, m.name, WSMCall(self, m))
self.__verbosity_init__()
def __del__(self):
"""
Cleans up the C API.
"""
if not self.ptr is None:
if not arts_api is None:
arts_api.destroy_workspace(self.ptr)
def __getstate__(self):
return None
def __setstate__(self):
pass
def __verbosity_init__(self):
"""
Executes verbosityInit WSM directly through the ARTS api to suppress
output.
"""
wsm = workspace_methods["verbosityInit"]
(m_id, args_out, args_in, ts) = wsm._parse_output_input_lists(self, [], {})
arg_out_ptr = c.cast((c.c_long * len(args_out))(*args_out),
c.POINTER(c.c_long))
arg_in_ptr = c.cast((c.c_long * len(args_in))(*args_in),
c.POINTER(c.c_long))
with CoutCapture(self, silent = True):
e_ptr = arts_api.execute_workspace_method(self.ptr, m_id, len(args_out),
arg_out_ptr, len(args_in), arg_in_ptr)
for t in ts[::-1]:
t.erase()
def create_variable(self, group, name):
"""
Create a workspace variable.
Args:
group: The group name of the variable to create.
name: The name of the variable to create. If None, the
ARTS API will assign a unique name.
"""
if not name is None:
name = name.encode()
group_id = group_ids[group]
ws_id = arts_api.add_variable(self.ptr, group_id, name)
v = arts_api.get_variable(ws_id)
wsv = WorkspaceVariable(ws_id,
v.name.decode(),
group_names[group_id],
"User defined variable.",
self)
self._vars[wsv.name] = wsv
return wsv
def add_variable(self, var):
"""
This will try to copy a given python variable to the ARTS workspace and
return a WorkspaceVariable object representing this newly created
variable.
Types are natively supported by the C API are int, str, [str], [int], and
numpy.ndarrays. These will be copied directly into the newly created WSV.
In addition to that all typhon ARTS types the can be stored to XML can
be set to a WSV, but in this case the communication will happen through
the file systs (cf. WorkspaceVariable.from_typhon).
The user should not have to call this method explicitly, but instead it
is used by the WorkspaceMethod call function to transfer python
variable arguments to the ARTS workspace.
Args:
var: Python variable of type int, str, [str], [int] or np.ndarray
which should be copied to the workspace.
"""
if type(var) == WorkspaceVariable:
return var
# Create WSV in ARTS Workspace
group = group_names[WorkspaceVariable.get_group_id(var)]
wsv = self.create_variable(group, None)
# Set WSV value using the ARTS C API
s = VariableValueStruct(var)
if s.ptr:
e = arts_api.set_variable_value(self.ptr, wsv.ws_id, wsv.group_id, s)
if e:
arts_api.erase_variable(self.ptr, wsv.ws_id, wsv.group_id)
raise Exception("Setting of workspace variable through C API "
" failed with the " + "following error:\n"
+ e.decode("utf8"))
# If the type is not supported by the C API try to write the type to XML
# and read into ARTS workspace.
else:
try:
wsv.from_typhon(var)
except:
raise Exception("Could not add variable since + "
+ str(type(var)) + " is neither supported by "
+ "the C API nor typhon XML IO.")
self._vars[wsv.name] = wsv
return wsv
def __dir__(self):
return {**self._vars, **workspace_variables, **self.__dict__}
def __getattr__(self, name):
""" Lookup the given variable in the local variables and the ARTS workspace.
Args:
name(str): Name of the attribute (variable)
Raises:
ValueError: If the variable is not found.
"""
group_id = None
if name in self._vars:
var = self._vars[name]
var.update()
return var
else:
i = arts_api.lookup_workspace_variable(name.encode())
if i < 0:
raise AttributeError("No workspace variable " + str(name) + " found.")
vs = arts_api.get_variable(i)
group_id = vs.group
description = vs.description.decode("utf8")
# Get its symbolic representation
wsv = WorkspaceVariable(i, name, group_names[group_id], description, self)
return wsv
def __setattr__(self, name, value):
""" Set workspace variable.
This will lookup the workspace variable name and try to set it to value.
Args:
name(str): Name of the attribute (variable)
value(obj): The value to set the workspace variable to.
Raises:
ValueError: If the variable is not found or if value cannot uniquely converted to
a value of a workspace variable.
"""
try:
v = self.__getattr__(name)
except:
self.__dict__[name] = value
return None
# Handle empty list or None values.
if value is None or (isinstance(value, list) and not value):
arts_api.set_variable_value(self.ptr, v.ws_id, v.group_id,
VariableValueStruct.empty())
return None
if type(value) == Agenda:
arts_api.set_variable_value(self.ptr, v.ws_id, v.group_id,
VariableValueStruct(value))
return None
t = self.add_variable(value)
if not t.group_id == v.group_id:
raise Exception("Incompatible groups: Workspace variable " + name +
" of group " + group_names[v.group_id] + " and value " + str(value)
+ " of group " + group_names[t.group_id] + ".")
self.Copy(v, t)
# Remove t only if it wasn't an existing WSV already before.
if not type(value) == WorkspaceVariable:
t.erase()
def execute_agenda(self, agenda):
""" Execute agenda on workspace.
Args:
agenda (typhon.arts.workspace.agenda.Agenda): Agenda object to execute.
Raises:
ValueError: If argument is not of type typhon.arts.workspace.agenda.Agenda
"""
value_error = ValueError("Argument must be of type agenda.")
if not type(agenda) is Agenda:
raise value_error
include_path_push(os.getcwd())
data_path_push(os.getcwd())
agenda.execute(self)
include_path_pop()
data_path_pop()
def execute_controlfile(self, name):
""" Execute controlfile or agenda on workspace.
This method looks recursively for a controlfile with the given name in the current
directory and the arts include path. If such a file has been found it will be parsed
and executed on the workspace.
Args:
name(str): Name of the controlfile
Raises:
Exception: If parsing of the controlfile fails.
Returns:
The controlfile as parsed Agenda object.
"""
if not name in imports:
agenda = Agenda.parse(name)
imports[name] = agenda
else:
agenda = imports[name]
self.execute_agenda(agenda)
return agenda
```
#### File: typhon/oem/error.py
```python
from typhon.oem import common
__all__ = [
'smoothing_error',
'retrieval_noise',
]
def smoothing_error(x, x_a, A):
"""Return the smoothing error through the averaging kernel.
Parameters:
x (ndarray): Atmospherice profile.
x_a (ndarray): A priori profile.
A (ndarray): Averaging kernel matrix.
Returns:
ndarray: Smoothing error due to correlation between layers.
"""
return A @ (x - x_a)
def retrieval_noise(K, S_a, S_y, e_y):
"""Return the retrieval noise.
Parameters:
K (np.array): Simulated Jacobians.
S_a (np.array): A priori error covariance matrix.
S_y (np.array): Measurement covariance matrix.
e_y (ndarray): Total measurement error.
Returns:
ndarray: Retrieval noise.
"""
return common.retrieval_gain_matrix(K, S_a, S_y) @ e_y
```
#### File: typhon/typhon/trees.py
```python
from collections import Iterable
import pandas as pd
import numba
import numpy as np
from sklearn.neighbors import BallTree, KDTree
__all__ = [
"IntervalTree",
"RangeTree",
]
class IntervalTreeNode:
"""Helper class for IntervalTree.
"""
def __init__(self, center_point, center, left, right):
self.center_point = center_point
self.center = np.asarray(center)
self.left = left
self.right = right
class IntervalTree:
"""Tree to implement fast 1-dimensional interval searches.
Based on the description in Wikipedia
(https://en.wikipedia.org/wiki/Interval_tree#Centered_interval_tree)
and the GitHub repository by tylerkahn
(https://github.com/tylerkahn/intervaltree-python).
Examples:
Check 1000 intervals on 1000 other intervals:
.. code-block:: python
import numpy as np
from typhon.trees import IntervalTree
intervals = np.asarray([np.arange(1000)-0.5, np.arange(1000)+0.5]).T
tree = IntervalTree(intervals)
query_intervals = [[i-1, i+1] for i in range(1000)]
results = tree.query(query_intervals)
"""
def __init__(self, intervals):
"""Creates an IntervalTree object.
Args:
intervals: A list or numpy.array containing the intervals (list of
two numbers).
"""
if not isinstance(intervals, np.ndarray):
intervals = np.asarray(intervals)
# Check the intervals whether they are valid:
self.left = np.min(intervals)
self.right = np.max(intervals)
# We want to return the indices of the intervals instead of their
# actual bounds. But the original indices will be lost due resorting.
# Hence, we add the original indices to the intervals themselves.
indices = np.arange(intervals.shape[0]).reshape(intervals.shape[0], 1)
indexed_intervals = np.hstack([intervals, indices])
self.root = self._build_tree(np.sort(indexed_intervals, axis=0))
def __contains__(self, item):
if isinstance(item, (tuple, list)):
return bool(self._query(item, self.root, check_extreme=True))
else:
return bool(self._query_point(item, self.root, check_extreme=True))
# @numba.jit
def _build_tree(self, intervals):
if not intervals.any():
return None
center_point = self._get_center(intervals)
# Sort the intervals into bins
center = intervals[(intervals[:, 0] <= center_point)
& (intervals[:, 1] >= center_point)]
left = intervals[intervals[:, 1] < center_point]
right = intervals[intervals[:, 0] > center_point]
return IntervalTreeNode(
center_point, center,
self._build_tree(left), self._build_tree(right)
)
@staticmethod
def _get_center(intervals):
return intervals[int(intervals.shape[0]/2), 0]
@staticmethod
def interval_overlaps(interval1, interval2):
"""Checks whether two intervals overlap each other.
Args:
interval1: A tuple of two numbers: the lower and higher bound of
the first interval.
interval2: A tuple of two numbers: the lower and higher bound of
the second interval.
Returns:
True if the intervals overlap.
"""
return interval1[0] <= interval2[1] and interval1[1] >= interval2[0]
@staticmethod
def interval_contains(interval, point):
"""Checks whether a point lies in a interval.
Args:
interval: A tuple of two numbers: the lower and higher bound of the
first interval.
point: The point (just a number)
Returns:
True if point lies in the interval.
"""
return interval[0] <= point <= interval[1]
# @numba.jit
def query(self, intervals):
"""Find all overlaps between this tree and a list of intervals.
Args:
intervals: A list of intervals. Each interval is a tuple/list of
two elements: its lower and higher boundary.
Returns:
List of lists which contain the overlapping intervals of this tree
for each element in `intervals`.
"""
return [self._query(interval, self.root, check_extreme=True)
for interval in intervals]
# @numba.jit
def _query(self, query_interval, node, check_extreme=False):
# Check this special case: the bounds of the query interval lie outside
# of the bounds of this tree:
if (check_extreme
and IntervalTree.interval_contains(query_interval, self.left)
and IntervalTree.interval_contains(query_interval, self.right)):
return [] # TODO: Return all intervals
# Let's start with the centered intervals
intervals = [int(interval[2]) for interval in node.center
if IntervalTree.interval_overlaps(interval, query_interval)]
if query_interval[0] <= node.center_point and node.left is not None:
intervals.extend(self._query(query_interval, node.left))
if query_interval[1] >= node.center_point and node.right is not None:
intervals.extend(self._query(query_interval, node.right))
return intervals
# @numba.jit
def query_points(self, points):
"""Find all intervals of this tree which contain one of those points.
Args:
points: A list of points.
Returns:
List of lists which contain the enclosing intervals of this tree
for each element in `points`.
"""
return [self._query_point(point, self.root, check_extreme=True)
for point in points]
# @numba.jit
def _query_point(self, point, node, check_extreme=False):
# Check this special case: the query point lies outside of the bounds
# of this tree:
if check_extreme \
and not IntervalTree.interval_contains(
(self.left, self.right), point):
return []
# Let's start with the centered intervals
intervals = [int(interval[2])
for interval in node.center
if IntervalTree.interval_contains(interval, point)]
if point < node.center_point and node.left is not None:
intervals.extend(self._query_point(point, node))
if point > node.center_point and node.right is not None:
intervals.extend(self._query_point(point, node))
return intervals
class RangeTree:
def __init__(self, points, shuffle=True, tree_class=None):
# KD- or ball trees have a very poor building performance for sorted
# data (such as from SEVIRI) as discussed in this issue:
# https://github.com/scikit-learn/scikit-learn/issues/7687
# Hence, we shuffle the data points before inserting them.
if shuffle:
self.shuffler = np.arange(points.shape[0])
np.random.shuffle(self.shuffler)
points = points[self.shuffler]
else:
# The user does not want to shuffle
self.shuffler = None
if tree_class is None or tree_class == "Ball":
tree_class = BallTree
elif tree_class == "KD":
tree_class = KDTree
tree_points = np.column_stack([points, np.zeros_like(points)])
self.tree = tree_class(tree_points)
def query_radius(self, points, r):
query_points = np.column_stack([points, np.zeros_like(points)])
jagged_pairs = self.tree.query_radius(query_points, r)
# Build the list of pairs:
pairs = np.array([
[build_point, query_point]
for query_point, build_points in enumerate(jagged_pairs)
for build_point in build_points
]).T
if self.shuffler is None:
return pairs
else:
# We shuffled the build points in the beginning, so the current
# indices in the second row (the collocation indices from the build
# points) are not correct
pairs[0, :] = self.shuffler[pairs[0, :]]
return pairs
``` |
{
"source": "jmoller93/1cpn-model",
"score": 2
} |
#### File: test/integ_tests/1cpn_inte_test.py
```python
import numpy as np
import os, glob, sys
import subprocess
import math
import copy
def ener_conserve():
if not os.path.isfile("energy.dat"):
print("No energy file cannot test system")
return False
d=np.loadtxt("energy.dat")
d_ener = d[:,2]
etot0 =d[0,2]
tol = 1E-1*etot0
for i in range(len(d_ener)):
etot = d_ener[i]
if (np.abs(etot - etot0) > tol):
print "TEST FAILED! etot difference at step %d is %f!" %(d[i,0], np.abs(etot-etot0))
return False
return True
def mom_conserve():
if not os.path.isfile("traj-angmom.dump"):
print("No momentum file found. Cannot test system")
return True
f = open("traj-angmom.dump",'r')
lines = f.readlines()
momentum = 0.0
momentum_0 = 0.0
flag = 0
for line in lines:
q = line.split()
if len(q) == 12:
#Calculate system and individual angular momentum
#L = R x P + sum(I_i * w_i)
mass = float(q[8])
if flag == 0:
momentum_0 = momentum_0 + mass*(float(q[9]) + float(q[10]) + float(q[11]))
else:
momentum = momentum + mass*(float(q[9]) + float(q[10]) + float(q[11]))
# Check after each frame for the system
elif 'TIMESTEP' in line:
if abs(momentum-momentum_0) > 1E-3:
print("Warning system is not conserving momentum (%f)\n" % abs(momentum-momentum_0))
return False
#Reinitialize everything
momentum = 0.0
if flag == 0:
flag = 1
return True
def amom_conserve():
if not os.path.isfile("traj-angmom.dump"):
print("No momentum file found. Cannot test system")
return True
f = open("traj-angmom.dump",'r')
lines = f.readlines()
ang_mom = 0.0
mtot = 0
natoms = int(lines[3])
com = np.zeros(3)
v_com = np.zeros(3)
v = []
r = []
flag = 0
for line in lines:
q = line.split()
if len(q) == 12:
#Calculate system and individual angular momentum
#L = R x P + sum(I_i * w_i)
ang_mom = ang_mom + float(q[5]) + float(q[6]) + float(q[7])
mass = float(q[8])
v.append(np.array([float(q[9]),float(q[10]),float(q[11])]))
r.append(np.array([float(q[2]),float(q[3]),float(q[4])]))
com = com + r[-1]*mass
v_com = v_com + v[-1]*mass
mtot = mtot + mass
# Check after each frame for the system
elif 'TIMESTEP' in line and mtot != 0:
com = com*1.0/mtot
v_com = v_com*1.0/mtot
#Calculate initial angular momentum
if flag == 0:
ang_mom_0 = ang_mom
for i in range(0,natoms):
ang_mom_0 = ang_mom_0 + np.sum(mass*np.cross(r[i]-com,v[i]-v_com))
ang_mom = ang_mom_0
flag = 1
else:
for i in range(0,natoms):
ang_mom = ang_mom + np.sum(mass*np.cross(r[i]-com,v[i]-v_com))
if abs(ang_mom-ang_mom_0) > 1E-3:
print("Warning system is not conserving angular momentum (%f)\n" % abs(ang_mom-ang_mom_0))
return False
#Reinitialize everything
mtot = 0.0
com = 0.0*com
v_com = 0.0*v_com
ang_mom = 0.0
v = []
r = []
return True
def onecpn_test(indir,lammpsfile):
if not os.path.isfile("%s" % (lammpsfile)):
print("Executable file (%s) does not exist" % lammpsfile)
sys.exit(1)
#Run lammps
stdout = open('stdout.txt','wb')
stderr = open('stderr.txt','wb')
subprocess.call(["lmp_mpi", "-in", lammpsfile],stdout=stdout,stderr=stderr)
ener_test = ener_conserve()
mom_test = mom_conserve()
amom_test = amom_conserve()
if ener_test and mom_test and amom_test:
print("%s conserves necessary observable quantities. Continuing..." % indir)
else:
if not ener_test:
print("%s does not conserve energy" % indir)
sys.exit(2)
if not mom_test:
print("Warning: %s does not conserve momentum" % indir)
sys.exit(3)
if not amom_test:
print("Warning: %s does not conserve angular momentum" % indir)
#sys.exit(4)
print("Running 1CPN integration tests")
#Currently only providing this option for the mpi version of lammps
#Will probably throw an option to change later
lammps_exec = "lmp_serial"
#Run all tests in the directory
owd = os.getcwd()
for (dirpath,dirnames,filenames) in os.walk("."):
for file in filenames:
if 'in.zewdie' in file or 'in.wlc' in file or 'in.1cpn' in file:
os.chdir(dirpath)
test = onecpn_test(dirpath,file)
os.chdir(owd)
print("Tests have now finished")
```
#### File: utils/blender/mesh_zewdie.py
```python
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from stl import mesh
'''This is a helper script to generate a zewdie isosurface given a set of parameters.
the isosurface is written to zewdie.stl and is read in by import_dump.py when
vizualizing 1CPN in blender'''
import pdb
# define zewdie parameters
# these are parameters from first fit to 3spn data
ps0 = 45.3796835435872
ps000 = 0.905835010046335
pscc2 = -0.458169670802355
ps220 = 1.74891489040291
ps222 = -1.33065657833228
ps224 = 1.03998788841889
pe0 = 43.2151374522855
pe000 = 0.128604190793286
pecc2 = -0.431844707607592
pe220 = 0.915662953592864
pe222 = 1.72160108874421
pe224 = 4.74995077023779
# these are parameters from Kepper2008, Stehr2008
#ps0 = 1.0; ps000 = 1.6957; pscc2 = -0.7641; ps220 = -0.1480; ps222 = -0.2582; ps224 = 0.5112;
#pe0 = 1.0; pe000 = 1.9152; pecc2 = 2.7322; pe220 = 1.2633; pe222 = 2.3440; pe224 = 1.0101;
#Zewdie
#ps0 = 1.0; ps000 = 2.34 ; pscc2 = -1.52 ; ps220 = -0.64 ; ps222 = -0.69 ; ps224 = 1.97 ;
#pe0 = 1.0; pe000 = 2.24 ; pecc2 = 3.58 ; pe220 = 3.16 ; pe222 = 4.29 ; pe224 = 1.30 ;
def zewdie(rn,f0,f1,f2):
# Business time
S000 = 1.;
S202 = (3*f1*f1 - 1)/(2.*np.sqrt(5));
S022 = (3*f2*f2 - 1)/(2.*np.sqrt(5));
S220 = (3*f0*f0 - 1)/(2.*np.sqrt(5));
S222 = (2 - 3*f1*f1 - 3*f2*f2 - 3*f0*f0 + 9*f0*f1*f2)/np.sqrt(70);
S224 = (1 + 2*f0*f0 - 5*f1*f1 - 5*f2*f2 - 20*f0*f1*f2 + 35*f1*f1*f2*f2)/(4.*np.sqrt(70));
sig = ps0*(S000*ps000 + S220*ps220 + S222*ps222 + S224*ps224 + (S022 + S202)*pscc2);
eps = pe0*(S000*pe000 + S220*pe220 + S222*pe222 + S224*pe224 + (S022 + S202)*pecc2);
U = 4*eps*(pow(ps0,12)/pow(ps0 + rn - sig,12) - pow(ps0,6)/pow(ps0 + rn - sig,6));
return U
def find_zero(phi1,phi2,theta):
'''Return r that zeros the function at given phi'''
# parameters
r=ps0*5.0;
dr = 1;
factor = 0.1
TOLERENCE = 1e-6
#find zero
f1 = np.cos(phi1)
f2 = np.cos(phi2)
f0 = np.cos(theta)
eprev = -1;
while True:
e = zewdie(r,1.0,f1,f2)
if (e > 0):
if (e < TOLERENCE):
break
else:
r = r+dr # go back a step
dr = dr*factor # shrink step size
r = r-dr
return r
def main():
nphi = 40
ntheta = 20
surf_xy = np.zeros((nphi,2))
n= (nphi-2)*(ntheta) + 2
#n= ntheta*nphi
surf_xyz = np.zeros((n,3))
phis = np.linspace(0,np.pi,nphi)
#thetas = np.linspace(0,2*np.pi,ntheta,endpoint=False)
thetas = np.linspace(0,2*np.pi,ntheta) # this removed striping
count = 0;
for i in range(nphi):
phi = phis[i]
r = find_zero(phi,phi,0)
r *= 0.5
surf_xy[i,0] = r*np.cos(phi)
surf_xy[i,1] = r*np.sin(phi)
for j in range(ntheta):
theta = thetas[j]
surf_xyz[count,0] = r*np.cos(phi)
surf_xyz[count,1] = r*np.sin(phi)*np.cos(theta)
surf_xyz[count,2] = r*np.sin(phi)*np.sin(theta)
count += 1
if i == 0 or i == nphi-1:
break
## now get facets
facets = []
for i in range(nphi-1):
for j in range(ntheta):
if i == 0: #triangles at pole
if j != ntheta -1:
a = 0
b = a+j+1
c = b+1
#else:
# a = 0
# b = a+j+1
# c = a+0+1
facets.append([a,b,c])
elif i==nphi-2: # triangles at pole
if j != ntheta -1:
a = (i-1)*ntheta + j + 1 + 1
b = a - 1
c = n-1
#else:
# a = (i-1)*ntheta + j + 1
# b = (i-1)*ntheta + 0 + 1
# c = n-1
facets.append([a,b,c])
elif j == ntheta - 1: # wrap in shape
b = (i-1)*ntheta + j + 1
a = (i-1)*ntheta + 0 + 1
c = i*ntheta + j + 1
d = i*ntheta + 0 + 1
#facets.append([a,b,d]) #draw 2 triangles
#facets.append([b,c,d])
else:
a = (i-1)*ntheta + j + 1
b = a + 1
c = i*ntheta + j + 1 + 1
d = c - 1
facets.append([a,b,c]) #draw 2 triangles
facets.append([a,c,d])
#np.savetxt('surf_xy.dat',surf_xy)
#np.savetxt('surf_xyz.dat',surf_xyz)
# write stl mesh
data = np.zeros(len(facets),dtype=mesh.Mesh.dtype)
for i in range(len(facets)):
a = surf_xyz[facets[i][0]]
b = surf_xyz[facets[i][1]]
c = surf_xyz[facets[i][2]]
tmp = np.append(np.append(a,b),c)
tmp = np.reshape(tmp,(3,3))
data['vectors'][i] = tmp
mymesh = mesh.Mesh(data)
mymesh.save('zewdie.stl')
#plot
#x,y,z = surf_xyz[:,0], surf_xyz[:,1], surf_xyz[:,2]
#fig = plt.figure()
#ax = fig.add_subplot(111,projection='3d')
#ax.scatter(x,y,z)
#ax.set_xlabel('x')
#ax.set_ylabel('y')
#plt.show()
if __name__ == "__main__":
main()
``` |
{
"source": "jmolloy1/instawizardry",
"score": 3
} |
#### File: instawizardry/where the magic happens/AddRow.py
```python
import sys
import csv
with open('Posts.csv','r') as postFile:
posts = csv.writer(postFile)
def AddRow(row):
with open('Posts.csv','w') as postFile:
posts = csv.writer(postFile)
print(row)
posts.writerow([row])
``` |
{
"source": "jmolloy/pedigree",
"score": 2
} |
#### File: scripts/installer/Installer.py
```python
import os
import hashlib
import shutil
import curses
import curses.wrapper
from progressBar import progressBar
# Something went wrong during installation
class InstallerException(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class Installer:
def __init__(self, stdscr, package, filesdir = "./files", installdir = "./install"):
self.stdscr = stdscr
self.filesdir = filesdir
self.installdir = installdir
self.packageName = package
def setupCurses(self):
self.titlewin = self.stdscr.subwin(1, 80, 0, 0)
self.mainwin = self.stdscr.subwin(23, 80, 1, 0)
self.progwin = self.stdscr.subwin(10, 60, 6, 10)
self.statwin = self.stdscr.subwin(1, 80, 24, 0)
self.progBar = progressBar(0, 100, 56)
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_CYAN)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_WHITE)
curses.init_pair(4, curses.COLOR_RED, curses.COLOR_WHITE)
self.titlewin.bkgd(' ', curses.color_pair(1))
self.statwin.bkgd(' ', curses.color_pair(1))
self.mainwin.bkgd(' ', curses.color_pair(2))
self.titlewin.addstr(0, 0, "Installing " + self.packageName)
self.statwin.addstr(0, 0, "Copying files...")
self.resetProgWin()
self.stdscr.refresh()
def resetProgWin(self):
self.progwin.clear()
self.progwin.bkgd(' ', curses.color_pair(1))
self.progwin.box()
self.stdscr.move(24, 79)
def statusUpdate(self, msg):
self.statwin.clear()
self.statwin.addstr(0, 1, msg)
self.statwin.refresh()
def drawAlert(self, msg, title, colour_pair):
# split the message into more manageable chunks
msgLines = msg.rstrip().split("\n")
height = len(msgLines) + 4
errwin = self.mainwin.subwin(height, 50, (24 / 2) - (height / 2), 15)
errwin.overlay(self.progwin)
errwin.clear()
errwin.bkgd(' ', curses.color_pair(1))
errwin.box()
errwin.addstr(0, 2, " " + title + " ", curses.color_pair(colour_pair))
self.statusUpdate("Press ENTER to acknowledge")
y = 2
for i in msgLines:
if(len(i) > 50):
firstPart = i[0:46]
secondPart = i[46:]
errwin.addstr(y, 2, firstPart)
y += 1
errwin.addstr(y, 2, secondPart)
else:
errwin.addstr(y, 2, i)
y += 1
errwin.refresh()
# Wait for ENTER
while 1:
c = self.stdscr.getch()
if(c == 13 or c == 10):
break
self.mainwin.clear()
self.mainwin.refresh()
self.resetProgWin()
def drawError(self, msg, title = "Error"):
self.drawAlert(msg, title, 4)
def drawWarning(self, msg, title = "Warning"):
self.drawAlert(msg, title, 3)
def drawProgress(self, action, fileName, progress):
self.progwin.addstr(1, 2, action + ", please wait...")
self.progwin.addstr(3, 2, fileName)
self.progBar.updateAmount(progress)
self.progwin.addstr(5, 2, str(self.progBar))
self.progwin.refresh()
self.resetProgWin()
def InstallerPage(self, msg):
introwin = self.mainwin.subwin(20, 70, 3, 5)
introwin.clear()
introwin.box()
introwin.bkgd(' ', curses.color_pair(1))
msgLines = msg.split("\n")
msgNum = len(msgLines)
y = (20 / 2) - (msgNum / 2)
for i in msgLines:
introwin.addstr(y, (70 / 2) - (len(i) / 2), i)
y += 1
introwin.refresh()
self.waitForKeyPress()
self.mainwin.clear()
self.mainwin.refresh()
def introPage(self):
msg = "Welcome to the " + self.packageName + " installation!"
msg += "\n\n\n"
msg += "The next steps will guide you through the installation of " + self.packageName + "."
msg += "\n\n"
msg += "Press ENTER to continue."
self.InstallerPage(msg)
def done(self):
msg = self.packageName + " is now installed!"
msg += "\n\n\n"
msg += "Remove the CD from the disk drive and press any key to reboot."
self.InstallerPage(msg)
def selectDest(self):
pass
def installFiles(self):
# Open the file listing. This file contains information about each file that
# we are to install.
try:
fileList = open(self.filesdir + "/filelist.txt")
except:
# Pass it up to the caller
self.drawError("Couldn't open file list for reading.")
raise
self.statusUpdate("Please wait...")
# Start copying files
fileLines = fileList.read().rstrip().split("\n")
nFiles = len(fileLines)
currFileNum = 0
myProgress = 0
for line in fileLines:
# Remove trailing whitespace and split on spaces
# File format:
# <source path> <dest path> <md5> <compulsory>
line = line.rstrip()
set = line.split(" ")
if(len(set) != 4):
self.drawError("Bad set in file list:\n" + line + "\nThis set only has " + str(len(set)) + " entries")
continue
# Create directory structure if required
dirSplit = set[1].split("/")
dirSplit = dirSplit[1:-1]
if(len(dirSplit) > 0):
currPath = "/"
for dir in dirSplit:
os.mkdir(self.installdir + currPath)
# Update the progress
currFileNum += 1
myProgress = (currFileNum / float(nFiles)) * 100.0
self.drawProgress("Copying files", self.installdir + set[1], myProgress)
# Some files are meant to be empty, but present
if(len(set[0]) == 0):
f = open(self.installdir + set[1], "w")
f.close()
continue
# Copy the file
shutil.copy(self.filesdir + set[0], self.installdir + set[1])
# MD5 the newly copied file
newFile = open(self.installdir + set[1])
hex = hashlib.md5(newFile.read()).hexdigest()
newFile.close()
# Ensure the MD5 matches
if(hex != set[2]):
if(set[3] == "yes"):
self.drawError("Compulsory file failed verification:\n" + self.installdir + set[1])
raise
else:
self.drawWarning("File " + str(currFileNum) + " failed verification, continuing anyway:\n" + self.installdir + set[1])
fileList.close()
self.statusUpdate("Copy complete.")
def postInstall(self):
self.statusUpdate("Please wait...")
# Files copied, run post-install scripts now
try:
postInstallFile = open(self.filesdir + "/postinstall.txt")
contents = postInstallFile.read()
contents.rstrip()
if(len(contents)):
num = 0
for line in contents.split("\n"):
num += 1
self.drawProgress("Running script", line, (num / float(len(contents))) * 100.0)
try:
p = os.popen(line)
print p.read()
p.close()
except:
self.drawWarning("Post-install script '" + str(line) + "' failed, continuing")
else:
raise
postInstallFile.close()
except:
self.statusUpdate("Post-install scripts complete.")
def waitForKeyPress(self):
self.stdscr.getch()
```
#### File: pedigree/scripts/manage_users.py
```python
import os, sys, sqlite3, getpass
scriptdir = os.path.dirname(os.path.realpath(__file__))
imagedir = os.path.realpath(scriptdir + "/../images/local")
usersdir = os.path.join(imagedir, "users")
configdb = os.path.realpath(scriptdir + "/../build/config.db") # TODO: build dir is not always 'build'
# Try to connect to the database
try:
conn = sqlite3.connect(configdb)
except:
print "Configuration database is not available. Please run 'scons build/config.db'."
exit(2)
# Check for an existing users directory
if not os.path.exists(usersdir):
os.mkdir(usersdir)
def ignore():
pass
def getnextuid():
q = conn.execute("select uid from users order by uid desc limit 1")
u = q.fetchone()
return u[0] + 1
def safe_input():
try:
line = raw_input("> ")
except KeyboardInterrupt:
print # a newline
conn.close()
exit(1)
return line
def main_menu(funcmap):
print "Select an option below:"
print "1. Add a new user"
print "2. Change the password of an existing user"
print "3. Change attributes of an existing user"
print "4. Delete user"
print "5. Exit"
option = 0
while True:
line = safe_input()
try:
option = int(line)
except ValueError:
print "Please enter a number."
continue
if not option in funcmap.keys():
print "Please enter a valid option."
continue
break
funcmap[option]()
userattrs = [["fullname", "New User", False], ["groupname", "users", False], ["homedir", "/users/user", False], ["shell", "/applications/bash", False], ["password", "", True]]
def adduser():
print "Please type the username for the new user."
username = safe_input()
# Make sure the user does not exist.
q = conn.execute("select uid from users where username=?", [username])
user = q.fetchone()
if not user is None:
print "User '%s' already exists." % (username,)
return
uid = getnextuid()
newattrs = [uid, username]
# Get all attributes
for attr in userattrs:
if attr[2]:
while True:
# Secure.
a = getpass.getpass("%s: " % (attr[0],))
b = getpass.getpass("Confirm %s: " % (attr[0],))
if not a == b:
print "Passwords do not match."
else:
newattrs += [a]
break
else:
data = raw_input("%s [default=%s]: " % (attr[0], attr[1]))
if data == "":
data = attr[1]
newattrs += [data]
# Insert.
conn.execute("insert into users (uid, username, fullname, groupname, homedir, shell, password) values (?, ?, ?, ?, ?, ?, ?)", newattrs)
conn.commit()
# Home directory.
homedir = newattrs[4][1:]
os.mkdir(os.path.join(imagedir, homedir))
print "Created user '%s'" % (username,)
def validuser(username):
# Check for a valid user in the database.
q = conn.execute("select uid, password from users where username=?", [username])
user = q.fetchone()
if user is None:
print "The user '%s' does not exist." % (username,)
return False
return True
def changepassword():
print "Please type the username of the user for which you want to change password."
username = safe_input()
# Check for a valid user in the database.
if not validuser(username):
return
# Confirm the password
print "Changing password for '%s'..." % (username,)
curr = getpass.getpass("Current password: ")
if curr == user[1]:
new = getpass.getpass("New password: ")
# Commit to the DB
conn.execute("update users set password=? where uid=?", [new, user[0]])
conn.commit()
else:
print "Incorrect password."
print "Changed password for user '%s'" % (username,)
def changeattr():
print "Please type the username of the user for which you want to change attributes."
username = safe_input()
# Check for a valid user in the database.
if not validuser(username):
return
newattrs = []
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
old_factory = conn.row_factory
conn.row_factory = dict_factory
q = conn.execute("select uid, fullname, groupname, homedir, shell from users where username=?", [username])
user = q.fetchone()
# Get all attributes
n = 0
for attr in userattrs:
if not attr[2]:
data = raw_input("%s [current=%s]: " % (attr[0], user[attr[0]]))
if data == "":
data = user[attr[0]]
newattrs += [data]
n += 1
newattrs += [user["uid"]]
# Update.
conn.execute("update users set fullname=?, groupname=?, homedir=?, shell=? where uid=?", newattrs)
conn.commit()
# Create home directory in case it changed.
homedir = newattrs[2]
if True or (not homedir == user["homedir"]):
oldhome = os.path.join(imagedir, user["homedir"][1:])
newhome = os.path.join(imagedir, homedir[1:])
if not os.path.exists(oldhome):
os.mkdir(newhome)
else:
os.rename(oldhome, newhome)
conn.row_factory = old_factory
def deleteuser():
print "Please type the username for the user to delete."
username = safe_input()
# Check for a valid user in the database.
if not validuser(username):
return
# Delete the user.
conn.execute("delete from users where uid=?", [user[0]])
conn.commit()
print "Deleted user '%s'" % (username,)
options = {1 : adduser, 2 : changepassword, 3 : changeattr, 4 : deleteuser, 5 : ignore}
main_menu(options)
conn.close()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.