metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jlboat/query_qtl_atlas",
"score": 3
} |
#### File: jlboat/query_qtl_atlas/query_qtl_atlas.py
```python
import sys
import argparse
from sqlalchemy import create_engine
def parse_arguments():
"""Parse arguments passed to script"""
parser = argparse.ArgumentParser(description="This script was " +
"designed to find any QTLs near SNPs in the Sorghum " +
"QTL Atlas (https://aussorgm.org.au/sorghum-qtl-atlas/search/)\n\n")
requiredNamed = parser.add_argument_group('required arguments')
requiredNamed.add_argument(
"--input",
type=str,
required=True,
help="The name of the input file (CSV). " +
"File structure: markerName,chromosome,position " +
"Or: markerName,chromosome,start,stop -- both without a header",
action="store")
requiredNamed.add_argument(
"--trait",
type=str,
required=True,
help="The trait table from which QTL are pulled. " +
"Trait categories include: leaf, maturity, panicle, " +
"resistance_abiotic, resistance_biotic, stem_composition, " +
"and stem_morphology",
action="store")
requiredNamed.add_argument(
"--output",
type=str,
required=True,
help="The output file to be created (CSV).",
action="store")
parser.add_argument(
"--database",
type=str,
required=False,
default="/zfs/tillers/Reference_Genomes/BTx623/v3.1.1/" +
"annotation/SorghumQtlAtlas.db",
help="The SorghumQtlAtlas database to use.",
action="store")
parser.add_argument(
"--distance",
type=float,
required=False,
default=10.0,
help="The distance in kb from a SNP to search for QTL (default: 10).",
action="store")
parser.add_argument(
"--column",
type=str,
required=False,
default="None",
help="To search for specific substrings within columns, use " +
"the following index numbers: " +
"{ 0: index, 1: QTL Id, 2: Publication, 3: Population, 4: Trait Description, 5: LG:Start-End (v3.0), 6: Genes Under QTL (v3.0), 7: Synteny, 8: Chr, 9: Start, 10: Stop}. Ex. --column 2 (this will use the Publication column to look for substrings). Multiple columns may also be designated: --column 2,8",
action="store"
)
parser.add_argument(
"--substring",
type=str,
required=False,
default="None",
help="Substring to search for within column (designated in " +
"--column). Ex. --column 4 --substring wax. Multiple substrings " +
"may be designated: --column 3,8 --substring bap,2",
action="store"
)
return parser.parse_args()
def file_to_list(filename):
with open(filename) as f:
output = f.read().splitlines()
return output
def build_filter(trait, engine, column, substring):
if "," in column:
column_list = column.split(',')
else:
column_list = [column]
if "," in substring:
substring_list = substring.split(',')
else:
substring_list = [substring]
if len(column_list) != len(substring_list):
sys.stderr.write("Different numbers in Column and Substring variables\n")
sys.exit(1)
column_dict = {}
for i in engine.execute(f"PRAGMA table_info('{trait}')").fetchall():
# column name [4][1]
column_dict[str(i[0])] = i[1]
additional_filter = ""
for column_value, substring_value in zip(column_list, substring_list):
column_name = column_dict[column_value]
additional_filter = f"{additional_filter} \"{column_name}\" LIKE '%{substring_value}%' AND"
return additional_filter
def find_overlapping_features(input_file, output,
trait, distance, database, column, substring):
engine = create_engine("sqlite:///" + database)
additional_filter = ""
if ((column.lower() != "none") and (substring.lower() != "none")):
additional_filter = build_filter(trait, engine, column, substring)
with open(output, 'w') as f:
f.write("Marker_Name,MyChr,MyPosition,QtlID,QtlSpan," +
"Trait_Description,Publication,Population," +
"Chrom:Start-End_v3.0," +
"Genes_Under_QTL_v3.0,Synteny,QtlChr,QtlStart,QtlStop\n")
for line in input_file:
split_line = line.split(",")
chrom = int(split_line[1])
position = int(split_line[2])
try:
stop = split_line[3]
except IndexError:
stop = ""
if stop == "":
results = engine.execute(f"SELECT * FROM {trait} WHERE {additional_filter} Chr == {chrom} AND {position} BETWEEN (Start - {distance}) AND (Stop + {distance})").fetchall()
else:
results = engine.execute(f"SELECT * FROM {trait} WHERE {additional_filter} Chr == {chrom} AND (({position} BETWEEN (Start - {distance}) AND (Stop + {distance})) OR ({stop} BETWEEN (Start - {distance}) AND (Stop + {distance})) OR ({position} <= Start AND {stop} >= Stop))").fetchall()
for result in results:
writeable_results = [str(i) for i in list(result[1:])]
url_link = "( https://aussorgm.org.au/" +\
"sorghum-qtl-atlas/study-details/?study_name=" +\
"%20".join(writeable_results[1].replace(",","%2C")
.replace("&","%26")
.split()) + " )"
span = [str(int(writeable_results[9]) - int(writeable_results[8]))]
writeable_results[1] = writeable_results[1] + url_link
f.write(",".join(split_line[:3] +
[writeable_results[0]] + span
+ [writeable_results[3]] + writeable_results[1:3] + writeable_results[4:] ) + "\n")
def main(args):
"""Main exection function"""
input_file = file_to_list(args.input)
distance = int(args.distance * 1000)
trait = (args.trait).lower()
if args.column == "None" and args.substring != "None":
sys.stderr.write("Column parameter required with substring.\n")
sys.exit(1)
elif args.column != "None" and args.substring == "None":
sys.stderr.write("Substring parameter required with column.\n")
sys.exit(1)
find_overlapping_features(input_file,
args.output,
trait,
distance,
args.database,
args.column,
args.substring)
if __name__ == "__main__":
args = parse_arguments()
main(args)
``` |
{
"source": "jlboat/Tragopogon_castellanus",
"score": 3
} |
#### File: Tragopogon_castellanus/scripts/merge_counts_bayes.py
```python
import sys
import pandas as pd
if len(sys.argv) != 3:
print("python {0} counts.csv bayes.out.csv".format(sys.argv[0]))
sys.exit(1)
def merge_counts_bayes():
"""Merge both CSVs using pandas"""
counts = pd.read_csv(sys.argv[1])
bayes = pd.read_csv(sys.argv[2])
df = pd.merge(counts, bayes)
output = sys.argv[1].split(".csv")[0]
df.to_csv(output + "_counts_bayes.csv", index=False)
if __name__ == "__main__":
merge_counts_bayes()
```
#### File: Tragopogon_castellanus/scripts/putative_homeolog_loss.py
```python
from sys import argv, stderr
import argparse
from datetime import datetime
import pandas as pd
def parse_arguments():
"""Parse arguments passed to script"""
parser = argparse.ArgumentParser(description=
"This script is designed to check for putative loss between homeologs \
\nusing counts from homeolog COREs used in the PG analysis.\n\n \
Example: python {0} -c ase_bayes_Cast2_lam_croc_flag.csv -b bayes_flag_sig_Cast2.csv".format(argv[0]),
formatter_class = argparse.RawDescriptionHelpFormatter)
requiredNamed = parser.add_argument_group('required arguments')
requiredNamed.add_argument("-c", "--COUNTS", type=str, required=True,
help="Bayes flagged CSV of read counts for Line and Tester", action="store")
requiredNamed.add_argument("-b", "--BIAS", type=str, required=True,
help="Bayes flagged CSV of loci demonstrating HSE", action="store")
return parser.parse_args()
def find_putative_loss(file_name):
"""Given file name, determine loci that are putatively lost based upon lack of expression"""
df = pd.read_csv(file_name)
output_base = file_name.split(".csv")[0]
df["Line_mean"] = df[["LINE_TOTAL_1","LINE_TOTAL_2","LINE_TOTAL_3"]
].mean(axis=1)
df["Line_std"] = df[["LINE_TOTAL_1","LINE_TOTAL_2","LINE_TOTAL_3"]
].std(axis=1)
df["Tester_mean"] = df[["TESTER_TOTAL_1","TESTER_TOTAL_2","TESTER_TOTAL_3"]
].mean(axis=1)
df["Tester_std"]=df[["TESTER_TOTAL_1","TESTER_TOTAL_2","TESTER_TOTAL_3"]
].std(axis=1)
df["Line_zero"] = ((df["Tester_mean"] > 0 ) & (df["Line_mean"] == 0 ))
df["Tester_zero"] = ((df["Line_mean"] > 0 ) & (df["Tester_mean"] == 0 ))
return df
def verify_HSE(df, hse_file):
"""Verify that each locus does not overlap 0.5 based upon 95% HDI"""
flag_sig = pd.read_csv(hse_file)
sig_true = flag_sig.columns[-1]
merged_df = pd.merge(df, flag_sig)
output_df = merged_df[["commonID","Line_zero","Tester_zero","Line_mean","Tester_mean",sig_true]]
lost = output_df[(output_df[sig_true]==1) & (output_df["Line_zero"] | output_df["Tester_zero"])]
output_base = hse_file.split(".csv")[0] + "_putative_loss.csv"
lost.to_csv(output_base, index=False)
if __name__ == "__main__":
start = datetime.now()
args = parse_arguments()
stderr.write("Executed: python {0} -c {1} -b {2}\n".format(argv[0],args.COUNTS, args.BIAS))
df = find_putative_loss(args.COUNTS)
verify_HSE(df, args.BIAS)
stop = datetime.now()
stderr.write("Runtime: {0}\n".format(str(stop - start)))
``` |
{
"source": "jlbrewe/hub",
"score": 2
} |
#### File: manager/accounts/tasks.py
```python
from celery import shared_task
from accounts.models import Account
@shared_task
def set_image_from_url(account_id: int, url: str):
"""
Set the image of an account from a URL.
"""
account = Account.objects.get(id=account_id)
account.set_image_from_url(url)
@shared_task
def set_image_from_socialaccount(account_id: int, provider: str):
"""
Set the image of an account from a social account.
"""
account = Account.objects.get(id=account_id)
account.set_image_from_socialaccount(provider)
@shared_task
def set_image_from_socialaccounts(account_id: int):
"""
Set the image of an account from one of its social accounts.
"""
account = Account.objects.get(id=account_id)
account.set_image_from_socialaccounts()
```
#### File: ui/views/users.py
```python
from django.contrib.auth.decorators import login_required
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from accounts.api.views import AccountsUsersViewSet
@login_required
def update(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""Update an account."""
viewset = AccountsUsersViewSet.init("list", request, args, kwargs)
context = viewset.get_response_context()
return render(request, "accounts/users/update.html", context)
```
#### File: manager/accounts/webhooks.py
```python
from djstripe import webhooks
from djstripe.models import Customer, Subscription
@webhooks.handler("customer.updated")
def customer_updated(event, **kwargs):
"""
Update a customer details.
When customer updates billing information on Stripe, transfer to account fields.
"""
customer = Customer.objects.get(id=event.data["object"]["id"])
account = customer.account
account.billing_email = customer.email
account.save()
@webhooks.handler("customer.subscription.created", "customer.subscription.updated")
def subscription_updated(event, **kwargs):
"""
Update a subscription.
When the subscription is created (e.g. manually in the Stripe
Dashboard) or updated (e.g. via the Stripe Customer Portal),
change the account's tier.
"""
from accounts.models import AccountTier
subscription = Subscription.objects.get(id=event.data["object"]["id"])
if subscription.is_valid():
tier = subscription.plan.product.account_tier
else:
tier = AccountTier.free_tier()
account = subscription.customer.account
account.tier = tier
account.save()
@webhooks.handler("customer.subscription.deleted")
def subscription_deleted(event, **kwargs):
"""
Delete a subscription.
When the subscription is deleted, put the account on the free tier.
"""
from accounts.models import AccountTier
subscription = Subscription.objects.get(id=event.data["object"]["id"])
account = subscription.customer.account
account.tier = AccountTier.free_tier()
account.save()
```
#### File: manager/dois/models_tests.py
```python
from datetime import datetime
from dois.models import Doi, receive_registration_email
from jobs.models import Job
from projects.models.nodes import Node
success_email = {
"from": "CrossRef Query System <<EMAIL>>",
"subject": "CrossRef submission ID: 1430891300",
"text": """
<?xml version="1.0" encoding="UTF-8"?>
<doi_batch_diagnostic status="completed" sp="a-cs1">
<submission_id>1430924839</submission_id>
<batch_id>[email protected]</batch_id>
<record_diagnostic status="Success">
<doi>10.47704/54320</doi>
<msg>Successfully added</msg>
</record_diagnostic>
<batch_data>
<record_count>1</record_count>
<success_count>1</success_count>
<warning_count>0</warning_count>
<failure_count>0</failure_count>
</batch_data>
</doi_batch_diagnostic>
""",
}
failure_email = {
"from": "CrossRef Query System <<EMAIL>>",
"subject": "CrossRef submission ID: 1430891300",
"text": """
<?xml version="1.0" encoding="UTF-8"?>
<doi_batch_diagnostic status="completed" sp="a-cs1">
<submission_id>1430924840</submission_id>
<batch_id>[email protected]</batch_id>
<record_diagnostic status="Failure">
<doi>10.47704/54321</doi>
<msg>Error processing relations: Relation target DOI does not exist: 10.5555/54320</msg>
</record_diagnostic>
<batch_data>
<record_count>1</record_count>
<success_count>0</success_count>
<warning_count>0</warning_count>
<failure_count>1</failure_count>
</batch_data>
</doi_batch_diagnostic>
""",
}
def test_success(db):
article = Node.objects.create(json={})
doi = Doi.objects.create(node=article)
# Simulate creating a job to register the DOI
job = doi.register()
assert list(job.params.keys()) == ["node", "doi", "url", "batch"]
# Simulate callback on job completion
doi.register_callback(
Job(
result=dict(
deposited="2020-11-20T22:03:57.603438Z",
deposit_request=dict(),
deposit_response=dict(),
deposit_success=True,
)
)
)
assert isinstance(doi.deposited, datetime)
assert isinstance(doi.deposit_request, dict)
assert isinstance(doi.deposit_response, dict)
assert doi.deposit_success is True
# Simulate receiving response email
receive_registration_email(None, success_email)
doi = Doi.objects.get(id=doi.id)
assert doi.registered is not None
assert doi.registration_success
assert doi.registration_response == success_email["text"]
def test_failure(db, caplog):
article = Node.objects.create(json={})
doi = Doi.objects.create(node=article)
# Simulate deposit failure
doi.register()
doi.register_callback(Job(result=dict(deposit_success=False)))
assert doi.deposit_success is False
assert "Error depositing DOI" in caplog.text
# Simulate receiving failure response email
receive_registration_email(None, failure_email)
doi = Doi.objects.get(id=doi.id)
assert doi.registered is not None
assert not doi.registration_success
assert doi.registration_response == failure_email["text"]
assert "Error registering DOI" in caplog.text
# Simulate not matching batch id
receive_registration_email(None, {"from": failure_email["from"], "text": ""})
assert "Error registering DOI" in caplog.text
```
#### File: jobs/api/serializers.py
```python
import logging
import re
from django.conf import settings
from django.shortcuts import reverse
from rest_framework import serializers
from accounts.models import Account
from jobs.models import Job, JobMethod, Queue, Worker, WorkerHeartbeat, Zone
from manager.api.helpers import get_object_from_ident
from manager.api.validators import FromContextDefault
from projects.models.projects import Project
logger = logging.getLogger(__name__)
class JobListSerializer(serializers.ModelSerializer):
"""
A job serializer for the `list` action.
This serializer includes all model fields except potentially
large JSON fields.
Some are made read only in derived serializers
(e.g. can not be set in `create` or `update` views).
Also adds "cheap to calculate" properties derived from other
fields. e.g. `summary_string`
"""
status_message = serializers.CharField(read_only=True)
summary_string = serializers.CharField(read_only=True)
runtime_formatted = serializers.CharField(read_only=True)
urls = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
class Meta:
model = Job
exclude = ["params", "result", "error", "log", "secrets"]
def get_urls(self, job: Job):
"""
Get the URLs to connect to the job from outside the local network.
A map of protocols to URLs e.g.
```json
"urls": {
"http": "https://hub.stenci.la/api/projects/1/jobs/16/connect?key=<KEY>",
"ws": "wss://hub.stenci.la/api/projects/1/jobs/16/connect?key=<KEY>"
},
```
Will be `null` if the job does not have any internal URLs or has ended.
"""
if job.urls and job.is_active:
if settings.JOB_URLS_LOCAL:
return job.urls
else:
request = self.context.get("request")
urls = {}
for protocol, url in job.urls.items():
# Get the URL that gives (and records) access to the job
url = request.build_absolute_uri(
reverse(
"api-projects-jobs-connect",
kwargs=dict(project=job.project.id, job=job.id),
)
+ f"?protocol={protocol}&key={job.key}"
)
# The `build_absolute_uri` function will always return
# `http` or `https` so replace with the protocol of the URL if necessary.
# Note: this will result a `wss://` URL if the request is a secure one (i.e. HTTPS).
if protocol == "ws":
url = re.sub(r"^http", "ws", url)
urls[protocol] = url
return urls
def get_url(self, job: Job):
"""
Get the Websocket URL of the job.
This field is deprecated, use `urls` instead. It is provided for
backwards compatability with previous API versions and may be removed
in the future.
"""
urls = self.get_urls(job)
return urls.get("ws") if urls else None
class JobRetrieveSerializer(JobListSerializer):
"""
A job serializer for the `retrieve` action.
Adds all fields as well as the `position` of the job in the
queue (which involves another database query for each job
so is probably best to avoid for jobs in a list).
"""
position = serializers.IntegerField(read_only=True)
children = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
params = serializers.SerializerMethodField()
class Meta:
model = Job
exclude = ["secrets"]
def get_params(self, job: Job):
"""
Remove `secrets` from the job's parameters (if present).
Removes any secrets in the job parameters.
Secrets should not be placed in job parameters in the first case.
This is a further precaution to avoid leakage if they are present
and as such logs a warning if they are.
"""
if job.params and "secrets" in job.params:
logger.warning("Secrets were present in job params", extra={"job": job.id})
del job.params["secrets"]
return job.params
class JobCreateSerializer(JobRetrieveSerializer):
"""
A job serializer for the `create` action.
Makes most fields readonly (ie. can not be set),
makes some fields required.
"""
class Meta:
model = Job
exclude = ["secrets"]
read_only_fields = [
"creator",
"created",
"project",
"zone",
"queue",
"status",
"began",
"ended",
"result",
"urls",
"log",
"runtime",
"users",
"retries",
"worker",
]
project = serializers.HiddenField(
default=FromContextDefault(
lambda context: get_object_from_ident(
Project, context["view"].kwargs["project"]
)
)
)
creator = serializers.HiddenField(default=serializers.CurrentUserDefault())
method = serializers.ChoiceField(choices=JobMethod.as_choices(), required=True)
params = serializers.JSONField(required=False)
# TODO: allow for project and zone to be specified; validate each against user / account
def create(self, validated_data):
"""
Create and dispatch a job.
"""
job = super().create(validated_data)
job.dispatch()
return job
class JobUpdateSerializer(JobRetrieveSerializer):
"""
A job serializer for the `update` and `partial_update` actions.
Intended for internal services to update job status.
Makes some fields read only (should not be
changed after creation) but allows updating of the rest.
"""
class Meta:
model = Job
exclude = ["secrets"]
read_only_fields = ["creator", "created", "method", "params", "zone", "queue"]
class ZoneSerializer(serializers.ModelSerializer):
"""
A zone serializer.
Includes all model fields.
"""
class Meta:
model = Zone
fields = "__all__"
class ZoneCreateSerializer(ZoneSerializer):
"""
A zone serializer for the `create` action.
Makes `account` readonly, and based on the URL parameter
so that it is not possible to create a zone for a different account.
Also validates `name` is unique within an account.
"""
class Meta:
model = Zone
fields = "__all__"
account = serializers.HiddenField(
default=FromContextDefault(
lambda context: get_object_from_ident(
Account, context["view"].kwargs["account"]
)
)
)
def validate(self, data):
"""Validate that the zone name is unique for the account."""
if Zone.objects.filter(account=data["account"], name=data["name"]).count() != 0:
raise serializers.ValidationError(
dict(name="Zone name must be unique for account.")
)
return data
class QueueSerializer(serializers.ModelSerializer):
"""
A queue serializer.
Given this is only ever used read-only, it includes all model fields.
"""
class Meta:
model = Queue
fields = "__all__"
class WorkerSerializer(serializers.ModelSerializer):
"""
A worker serializer.
Given this is only ever used read-only, it includes all model fields.
"""
active = serializers.BooleanField(read_only=True)
class Meta:
model = Worker
fields = "__all__"
class WorkerHeartbeatSerializer(serializers.ModelSerializer):
"""
A worker heartbeat serializer.
Given this is only used for a particular worker, it excludes the
both the heartbeat's and the worker's id fields.
"""
class Meta:
model = WorkerHeartbeat
exclude = ["id", "worker"]
```
#### File: jobs/api/views_tests.py
```python
from rest_framework import status
from manager.testing import DatabaseTestCase
class ProjectsJobsViewsTest(DatabaseTestCase):
"""Test creating and retrieving jobs for a project."""
# Type specific CRUD methods for Jobs
def create_job(self, user, project, data):
return self.create(
user, "api-projects-jobs-list", data, kwargs={"project": project.id}
)
def retrieve_job(self, user, project, job_id, job_key=None):
return self.retrieve(
user,
"api-projects-jobs-detail",
kwargs={"project": project.id, "job": job_id},
data=dict(key=job_key) if job_key else {},
)
# Testing methods
def test_access_with_and_without_keys(self):
"""
Tests access to a job, with and without a key, for a public project.
"""
for project in (self.ada_public, self.ada_private):
response = self.create_job(self.ada, project, dict(method="sleep"))
assert response.status_code == status.HTTP_201_CREATED
job_id = response.data.get("id")
job_key = response.data.get("key")
# Ada can get the job details with or without key and
# even with a bad key
response = self.retrieve_job(self.ada, project, job_id)
assert response.status_code == status.HTTP_200_OK
response = self.retrieve_job(self.ada, project, job_id, job_key)
assert response.status_code == status.HTTP_200_OK
response = self.retrieve_job(self.ada, project, job_id, "foo")
assert response.status_code == status.HTTP_200_OK
# Bob and anon...
for user in (self.bob, None):
# can't get job without key
response = self.retrieve_job(user, project, job_id)
assert response.status_code == status.HTTP_404_NOT_FOUND
# can get job with key
response = self.retrieve_job(user, project, job_id, job_key)
assert response.status_code == status.HTTP_200_OK
# can't get job with bad key
response = self.retrieve_job(user, project, job_id, "foo")
assert response.status_code == status.HTTP_404_NOT_FOUND
```
#### File: manager/jobs/jobs.py
```python
import datetime
import logging
import time
from celery import Celery, signature
from celery.result import AsyncResult
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.utils import timezone
from jobs.models import Job, JobMethod, JobStatus, Queue, Worker
logger = logging.getLogger(__name__)
# Setup the Celery app
app = Celery("manager", broker=settings.BROKER_URL, backend=settings.CACHE_URL)
app.conf.update(
# By default Celery will keep on trying to connect to the broker forever
# This overrides that. Initially try again immediately, then add 0.5 seconds for each
# subsequent try (with a maximum of 3 seconds).
# See https://github.com/celery/celery/issues/4296
broker_transport_options={
"max_retries": 10,
"interval_start": 0,
"interval_step": 0.5,
"interval_max": 3,
},
# Needed to ensure STARTED state is emitted
task_track_started=True,
)
def dispatch_job(job: Job) -> Job:
"""
Send a job to a queue.
Decides which queue a job should be sent to and sends it.
The queue can depend upon both the project and the account (either the
account that the project is linked to, or the default account of the job
creator).
"""
if not JobMethod.is_member(job.method):
raise ValueError("Unknown job method '{}'".format(job.method))
if job.method in settings.JOB_METHODS_STAFF_ONLY and (
not job.creator or not job.creator.is_staff
):
raise PermissionDenied
if JobMethod.is_compound(job.method):
children = job.children.all().order_by("id")
if len(children) == 0:
# If there are no children (e.g. a pull job for a project with no sources)
# then job is immediately finished
job.runtime = 0
job.is_active = False
job.status = JobStatus.SUCCESS.value
else:
if job.method == JobMethod.parallel.value:
# Dispatch all child jobs simultaneously
for child in children:
dispatch_job(child)
else:
# Dispatch the first child; subsequent children
# will be status WAITING and will get dispatched later
# on update of the parent.
for index, child in enumerate(children):
if index == 0:
dispatch_job(child)
else:
child.is_active = True
child.status = JobStatus.WAITING.value
child.save()
job.is_active = True
job.status = JobStatus.DISPATCHED.value
else:
# Find queues that have active workers on them
# order by descending priority
queues = list(
Queue.objects.filter(
workers__in=Worker.objects.filter(
# Has not finished
finished__isnull=True,
# Has been updated in the last x minutes
updated__gte=timezone.now() - datetime.timedelta(minutes=15),
),
).order_by("priority")
)
# Fallback to the default Stencila queue
# Apart from anything else having this fallback is useful in development
# because if means that the `overseer` service does not need to be running
# in order keep track of the numbers of workers listening on each queue
# (during development `worker`s listen to the default queue)
if len(queues) == 0:
logger.warning("No queues found with active workers")
queue, _ = Queue.get_or_create(
account_name="stencila", queue_name="default"
)
else:
if job.creator is None or job.project is None:
# Jobs created by anonymous users go on the lowest
# priority queue
priority = 1
else:
# The priority of other jobs is determined by the
# account tier of the project
priority = job.project.account.tier.id
queue = queues[min(len(queues), priority) - 1]
# Add the job's project id, key and secrets to it's kwargs.
# Doing this here ensures it is done for all jobs
# and avoids putting the secrets in the job's `params` field.
kwargs = dict(**job.params) if job.params else {}
kwargs["project"] = job.project.id if job.project else None
kwargs["key"] = job.key
kwargs["secrets"] = job.secrets
# Send the job to the queue
task = signature(
job.method, kwargs=kwargs, queue=queue.name, task_id=str(job.id), app=app,
)
task.apply_async()
job.queue = queue
job.is_active = True
job.status = JobStatus.DISPATCHED.value
job.save()
return job
def update_job(job: Job, data={}, force: bool = False) -> Job:
"""
Update a job.
This method is triggered by a PATCH request from the
`overseer` service. It updates the status, and other fields of
the job, and if the job has a parent, updates it's status too.
See https://stackoverflow.com/a/38267978 for important considerations
in using AsyncResult.
"""
# Avoid unnecessary update
if not job.is_active and not force:
return job
was_active = job.is_active
if JobMethod.is_compound(job.method):
# Update the status of compound jobs based on children
status = job.status
is_active = False
all_previous_succeeded = True
any_previous_failed = False
for child in job.get_children():
# If the child has a 'higher' status then update the
# status of the compound job
status = JobStatus.highest([status, child.status])
# If the child is still waiting then...
if child.status == JobStatus.WAITING.value:
# If all previous have succeeded, dispatch it
if all_previous_succeeded:
dispatch_job(child)
# If any previous have failed, cancel it
elif any_previous_failed:
cancel_job(child)
if child.status != JobStatus.SUCCESS.value:
all_previous_succeeded = False
if child.status == JobStatus.FAILURE.value:
any_previous_failed = True
# If the child is still active then the compound job is active
if child.is_active:
is_active = True
job.is_active = is_active
job.status = JobStatus.RUNNING.value if is_active else status
else:
status = data.get("status")
assert status
# Do not do anything if the new status is lower rank than the
# existing status. This can exist for example when a job is
# terminated (the SUCCESS state is sent after TERMINATED)
if JobStatus.rank(status) < JobStatus.rank(job.status):
return job
# Update fields sent by `overseer` service, including `status`
for key, value in data.items():
setattr(job, key, value)
def async_result():
return AsyncResult(str(job.id), app=app)
# If job succeeded then get the result if we haven't already
if status == JobStatus.SUCCESS.value and job.result is None:
response = None
attempts = 0
while not response and attempts < 5:
try:
response = async_result().get(timeout=30)
except Exception:
# Catch all errors, but log them. Occasional
# errors encountered in prod include ResponseError and TimeoutError
logger.warning(
"Error getting async result",
exc_info=True,
extra=dict(id=job.id, method=job.method, attempts=attempts),
)
time.sleep(1)
attempts += 1
if response:
job.result = response.get("result")
job.log = response.get("log")
else:
logger.error(
"Unable to get async result",
extra=dict(id=job.id, method=job.method, attempts=attempts),
)
job.status = JobStatus.FAILURE.value
job.error = dict(
type="RuntimeError", message="Unable to get result of job"
)
# If job failed then get the error
# For FAILURE, `info` is the raised Exception
elif status == JobStatus.FAILURE.value:
info = async_result().info
if info:
job.error = dict(type=type(info).__name__, message=str(info))
# If the job has just ended then mark it as inactive
if JobStatus.has_ended(status):
job.is_active = False
# If the job is no longer active clear its secrets and run its callback
if was_active and not job.is_active:
job.secrets = None
job.run_callback()
# Save before updating parent (and then this again)
job.save()
# If the job has a parent then update it too
if job.parent:
update_job(job.parent)
return job
def cancel_job(job: Job) -> Job:
"""
Cancel a job.
This uses Celery's terminate options which will kill the worker child process.
This is not normally recommended but in this case is OK because there is only
one task per process.
See `worker/worker.py` for the reasoning for using `SIGUSR1`.
See https://docs.celeryproject.org/en/stable/userguide/workers.html#revoke-revoking-tasks
"""
if job.is_active:
if JobMethod.is_compound(job.method):
for child in job.children.all():
cancel_job(child)
else:
app.control.revoke(str(job.id), terminate=True, signal="SIGUSR1")
job.status = JobStatus.CANCELLED.value
job.is_active = False
job.secrets = None
job.save()
return job
```
#### File: manager/jobs/models_tests.py
```python
import pytest
from accounts.models import Account, AccountTier
from jobs.models import Queue, Zone
@pytest.mark.django_db
def test_queue_get_or_create():
AccountTier.objects.create()
acme = Account.objects.create(name="acme")
# Existing zone
north = Zone.objects.create(account=acme, name="north")
queue, created = Queue.get_or_create(queue_name="north", account_name="acme")
assert queue.zone == north
assert queue.priority == 0
assert queue.untrusted is False
assert queue.interrupt is False
# Implicitly created zone
queue, created = Queue.get_or_create(queue_name="south:2", account_name="acme")
south = Zone.objects.get(account=acme, name="south")
assert queue.zone == south
assert queue.priority == 2
assert queue.untrusted is False
assert queue.interrupt is False
# Queue that accepts untrusted jobs
queue, created = Queue.get_or_create(
queue_name="north:2:untrusted", account_name="acme"
)
assert queue.zone == north
assert queue.priority == 2
assert queue.untrusted is True
assert queue.interrupt is False
# Queue that accepts untrusted and interruptable jobs
queue, created = Queue.get_or_create(
queue_name="north:2:untrusted:interrupt", account_name="acme"
)
assert queue.zone == north
assert queue.priority == 2
assert queue.untrusted is True
assert queue.interrupt is True
```
#### File: manager/api/authentication.py
```python
import base64
import binascii
import logging
from django.conf import settings
from knox.auth import TokenAuthentication
from rest_framework.authentication import HTTP_HEADER_ENCODING, BaseAuthentication
from rest_framework.authentication import BasicAuthentication as DRFBasicAuthentication
from rest_framework.authentication import (
SessionAuthentication,
get_authorization_header,
)
from rest_framework.exceptions import AuthenticationFailed
from users.socialaccount.tokens import refresh_user_access_token
logger = logging.getLogger(__name__)
class BasicAuthentication(BaseAuthentication):
"""
HTTP Basic authentication allowing a token as username.
This class is based on `rest_framework.authentication.BasicAuthentication` but
expects the username part of the header to be the `knox` token. This allows
for easier use of `curl` and `httpie` with the API. e.g
curl https://hub.stenci.la/api/users/me -u c3ddcf8be55cd6baa66da51efb0e8cae24aeb9c7b66f4c2a291a18afe2c09d1f:
The trailing colon prevents curl from asking for a password.
Inspired by [Stripe's approach](https://stripe.com/docs/api/authentication).
Basic authentication using username/password is usually not allowed in production
since that may encourage API users to store username/password unsafely in client applications.
However, it can be turned on during development by setting `settings.API_BASIC_AUTH = True`.
"""
def authenticate(self, request):
"""
Authenticate a request.
Returns a `User` if a valid token has been supplied
using HTTP Basic authentication. Otherwise returns `None`.
"""
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != b"basic":
return None
if len(auth) == 1:
raise AuthenticationFailed(
"Invalid Basic authorization header. No credentials provided."
)
elif len(auth) > 2:
raise AuthenticationFailed(
"Invalid Basic authorization header. Credentials string should not contain spaces."
)
try:
auth_parts = (
base64.b64decode(auth[1]).decode(HTTP_HEADER_ENCODING).split(":")
)
except (TypeError, UnicodeDecodeError, binascii.Error):
raise AuthenticationFailed(
"Invalid Basic authorization header. Credentials not correctly base64 encoded."
)
username, password = (
auth_parts if len(auth_parts) >= 2 else (auth_parts[0], None)
)
if password:
if settings.API_BASIC_AUTH:
return DRFBasicAuthentication().authenticate_credentials(
username, password, request
)
else:
raise AuthenticationFailed(
"Basic authorization with a password is not allowed; use an API token instead."
)
else:
# Treat the username as a token; pass it on to `knox.TokenAuthentication`
token = username.encode("utf-8")
return TokenAuthentication().authenticate_credentials(token)
class RefreshOAuthTokenAuthentication(BaseAuthentication):
"""
Authentication that allows for adding / updating a `SocialToken` for a `SocialApp`.
Most of the time users will take the normal `allauth` based flow to authenticate
using a third-party provider e.g. Google. In these cases, we store a
access token to use for that provider on behalf of the user (and usually a
refresh token).
However, in other cases, such as a client using the Hub's API, the user may
NOT yet have gone through this flow but we still want to be able to pass along
an access token for a for a `SocialApp` (e.g. for pulling a source from that provider).
Pass the access token in the `OAuth-Token` header with the name of the provider e.g.
OAuth-Token: <PASSWORD>...
This header name was chosen as it does not clash with any of those registered
(https://www.iana.org/assignments/message-headers/message-headers.xhtml) and
the `X-` prefix for custom headers is deprecated.
This would ideally be middleware (it's not an authenticator) but DRF does
not support that and this seems to be the best alternative.
See https://github.com/encode/django-rest-framework/issues/7607
"""
def authenticate(self, request):
"""
Implement authenticator interface.
See https://www.django-rest-framework.org/api-guide/authentication/#custom-authentication
"""
# Try to authenticate with the "real" authenticators
user_auth = BasicAuthentication().authenticate(request)
if not user_auth:
user_auth = TokenAuthentication().authenticate(request)
# Not authenticated so leave
if not user_auth:
return None
# Authenticated, so look for special header...
# This is wrapped in a try/catch because we really don't want
# this side-effect to stop the authentication process
try:
user, auth = user_auth
header = request.META.get("HTTP_OAUTH_TOKEN")
if header:
parts = header.split(" ")
if len(parts) == 2:
provider, token = parts
refresh_user_access_token(user, provider, token)
except Exception:
logger.error("Error attempting to refresh OAuth token", exc_info=True)
return user_auth
class CsrfExemptSessionAuthentication(SessionAuthentication):
"""
Django Session authentication without CSRF check.
For some API views it may be necessary to disable the CSRF protection.
This enables that, use it by adding this to the view:
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication, knox.auth.TokenAuthentication)
"""
def enforce_csrf(self, request):
"""
Do not enforce CSRF.
"""
return # To not perform the csrf check previously happening
```
#### File: api/views/emails.py
```python
import logging
from rest_framework import generics, serializers
from rest_framework.parsers import JSONParser, MultiPartParser
from rest_framework.request import Request
from rest_framework.response import Response
from manager.signals import email_received
logger = logging.getLogger(__name__)
class EmailSerializer(serializers.Serializer):
"""
A serializer for emails.
This serializer is mainly for documentation and is based on the fields
sent by SendGrid. Note that Sendrid send `from` and `to` instead of
`sender` and `recipient`.
See https://sendgrid.com/docs/for-developers/parsing-email/setting-up-the-inbound-parse-webhook/#default-parameters
"""
sender = serializers.CharField(
help_text="Email sender, as taken from the message headers."
)
recipient = serializers.CharField(
help_text="Email recipient field, as taken from the message headers."
)
subject = serializers.CharField(help_text="Email subject.")
text = serializers.CharField(help_text="Email body in plaintext formatting.")
html = serializers.CharField(
help_text="HTML body of email. If not set, email did not have an HTML body."
)
spf = serializers.CharField(
help_text="The results of the Sender Policy Framework verification of the message sender and receiving IP address." # noqa
)
class EmailsView(generics.GenericAPIView):
"""
A view for receiving parsed email payloads.
"""
permission_classes: list = []
parser_classes = [MultiPartParser, JSONParser]
serializer_class = EmailSerializer
def post(self, request: Request) -> Response:
"""
Receive an email.
Returns an empty response.
"""
email = request.data
logger.info(f"Email received from {email.get('from')} to {email.get('to')}")
email_received.send_robust(sender=self.__class__, email=email)
return Response()
```
#### File: api/views/status_tests.py
```python
from unittest import mock
from django.db import OperationalError
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from manager.version import __version__
class StatusAPIViewTests(APITestCase):
"""Test that unauthenticated user can get status."""
url = reverse("api-status")
def test_ok(self):
response = self.client.get(self.url)
assert response.status_code == status.HTTP_200_OK
assert response.data["time"] is not None
assert response.data["version"] == __version__
def test_pending(self):
with mock.patch(
"manager.api.views.status.migrations_pending", new=migrations_pending_true,
):
response = self.client.get(self.url)
assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE
def test_db_error(self):
with mock.patch(
"manager.api.views.status.migrations_pending",
new=migrations_pending_operational_error,
):
response = self.client.get(self.url)
assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE
def test_other_error(self):
with mock.patch(
"manager.api.views.status.migrations_pending",
new=migrations_pending_other_error,
):
response = self.client.get(self.url)
assert response.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR
assert response.data["message"] == "beep boop"
def migrations_pending_true():
return True
def migrations_pending_operational_error():
raise OperationalError("could not connect to server")
def migrations_pending_other_error():
raise RuntimeError("beep boop")
```
#### File: manager/manager/settings_tests.py
```python
import re
from django.conf import settings
def test_CORS_ALLOWED_ORIGIN_REGEXES():
"""Test that regexes math orgins for Google add-ons. etc"""
for origin in (
# These are origin URLS used by our Google Docs add-on
"https://n-zoqnmwqnnhslxffeq3hh6ne46wicurqczwe4csa-0lu-script.googleusercontent.com",
"https://n-zoqnmwqnnhslxffeq3hh6ne46wicurqczwe4csa-11u-script.googleusercontent.com",
):
matched = False
for regex in settings.CORS_ALLOWED_ORIGIN_REGEXES:
if re.match(regex, origin):
matched = True
assert matched, f"Unmached CORS origin {origin}"
```
#### File: api/views/files.py
```python
from typing import Any, Dict, List, Optional, Union
from django.db.models import QuerySet, TextField, Value
from django.db.models.functions import Concat, StrIndex, Substr
from django.http import Http404
from rest_framework import permissions, viewsets
from rest_framework.request import Request
from rest_framework.response import Response
from manager.api.helpers import HtmxDestroyMixin, HtmxListMixin, HtmxRetrieveMixin
from projects.api.serializers import FileListSerializer, FileSerializer
from projects.api.views.projects import get_project
from projects.models.files import File
from projects.models.projects import Project
from projects.models.snapshots import Snapshot
from projects.models.sources import Source
class ProjectsFilesViewSet(
HtmxListMixin, HtmxRetrieveMixin, HtmxDestroyMixin, viewsets.GenericViewSet,
):
"""A view set for project files."""
lookup_url_kwarg = "file"
object_name = "file"
queryset_name = "files"
def get_permissions(self):
"""
Get the permissions that the current action requires.
Actions `list` and `retreive` do not require authentication
for public projects (i.e. anon users can view sources).
"""
if self.action in ["list", "retrieve"]:
return [permissions.AllowAny()]
return [permissions.IsAuthenticated()]
def get_project(self) -> Project:
"""
Get the project for the current action and check user has roles.
Requires that user has read access to the project.
"""
if not hasattr(self, "project"):
self.project = get_project(self.kwargs, self.request.user)
return self.project
def get_prefix(self) -> str:
"""
Get the prefix for the current request.
Ensures that it has a trailing slash.
"""
prefix = self.request.GET.get("prefix", "").strip()
if prefix and not prefix.endswith("/"):
prefix += "/"
return prefix
def get_queryset(
self,
project: Optional[Project] = None,
source: Optional[Source] = None,
snapshot: Optional[Snapshot] = None,
):
"""
Get project files.
Allows for filtering:
- using a search string
- using path prefix (e.g for subdirectory listing)
- using a mimetype
Allows for aggregation (the default) by directory.
"""
project = project or self.get_project()
# Avoid using select_related and prefetch_related here
# as it can slow down queries significantly
queryset = File.objects.filter(project=project).order_by("path")
source = source or self.request.GET.get("source")
if source:
queryset = queryset.filter(source=source)
snapshot = snapshot or self.request.GET.get("snapshot")
if snapshot:
queryset = queryset.filter(snapshot=snapshot)
else:
queryset = queryset.filter(snapshot__isnull=True, current=True)
prefix = self.get_prefix()
if prefix:
queryset = queryset.filter(path__startswith=prefix)
search = self.request.GET.get("search", "").strip()
if search:
queryset = queryset.filter(path__istartswith=prefix + search)
mimetype = self.request.GET.get("mimetype", "").strip()
if mimetype:
queryset = queryset.filter(mimetype__startswith=mimetype)
queryset = queryset.annotate(
name=Substr(
"path",
pos=len(prefix) + 1,
length=StrIndex(
# Add a trailing slash to ensure all paths
# will have a length
Concat(Substr("path", len(prefix) + 1), Value("/")),
Value("/"),
)
- 1,
output_field=TextField(),
)
)
expand = self.request.GET.get("expand")
if expand is not None:
return queryset
# Fetch the files, limiting to 10,000 so the following grouping
# does not take forever
# TODO: Put a message in the result if this limit is reached
files = list(queryset.all()[:10000])
groups: Dict[str, Dict[str, Any]] = {}
for file in files:
if "/" in file.path[len(prefix) + 1 :]:
name = file.name
info = groups.get(name)
if not info:
groups[name] = dict(
path=prefix + name,
name=file.name,
is_directory=True,
count=1,
source=[file.source_id],
size=file.size,
modified=file.modified,
)
else:
info["count"] += 1
info["size"] += file.size
info["source"] += [file.source_id]
info["modified"] = (
file.modified
if file.modified > info["modified"]
else info["modified"]
)
else:
file.is_directory = False
groups[file.path] = file
# Return items sorted by path again
return [value for key, value in sorted(groups.items())]
def get_object(self, project: Optional[Project] = None) -> File:
"""
Get a current file.
"""
project = project or self.get_project()
id_or_path = self.kwargs["file"]
try:
identifier = dict(id=int(id_or_path))
except ValueError:
identifier = dict(path=id_or_path)
try:
return File.get_latest(project=project, current=True, **identifier)
except IndexError:
raise Http404
def get_pipeline(
self, file: Optional[File] = None, upstream_limit: int = 5, downstream_limit=5
) -> QuerySet:
"""
Get a file's pipeline.
Returns a queryset of file entries that match the path within the project.
The file does not need to be current.
Currently, this only considers the first upstream and downstream. It does
not collect branching upstreams or downstreams (e.g. more than one downstream)
"""
file = file or self.get_object()
here = file
upstreams: List[Union[File, Source]] = []
while len(upstreams) < upstream_limit:
if here.source:
upstreams.append(here.source)
break
else:
ups = here.upstreams.all()
if len(ups):
here = ups[0]
upstreams.append(here)
else:
break
here = file
downstreams: List[File] = []
while len(downstreams) < downstream_limit:
downs = here.downstreams.all()
if len(downs):
here = downs[0]
downstreams.append(here)
else:
break
return upstreams, downstreams
def get_history(self, project: Optional[Project] = None) -> QuerySet:
"""
Get a file's history.
Returns a queryset of file entries that match the path within the project.
The file does not need to be current.
"""
project = project or self.get_project()
return (
File.objects.filter(project=project, path=self.kwargs["file"])
.order_by("-created")
.select_related(
"job", "job__creator", "job__creator__personal_account", "source",
)
.prefetch_related("upstreams")
)
def get_serializer_class(self):
"""
Get the serializer class for the current action.
"""
if self.action == "list":
return FileListSerializer
if self.action == "destroy":
return None
return FileSerializer
def get_response_context(self, *args, **kwargs):
"""
Add breadcrumbs to template rendering context.
"""
context = super().get_response_context(*args, **kwargs)
context["project"] = self.get_project()
source = self.request.GET.get("source")
if source:
context["source"] = Source.objects.get(id=source)
snapshot = self.request.GET.get("snapshot")
if snapshot:
context["snapshot"] = Snapshot.objects.get(id=snapshot)
prefix = self.get_prefix()
if prefix:
context["prefix"] = prefix
breadcrumbs = [("root", "")]
path = ""
for name in prefix.split("/"):
if name:
path += name + "/"
breadcrumbs.append((name, path))
context["breadcrumbs"] = breadcrumbs
return context
# Additional API actions. These do not use the `@action` decorator
# because they are declared as routes on the `ProjectsFilesRouter`
# directly
def history(self, request: Request, *args, **kwargs) -> Response:
"""
Get the a file's history.
Returns a paginated history of the file
"""
queryset = self.get_history()
pages = self.paginate_queryset(queryset)
serializer = self.get_serializer(pages, many=True)
return self.get_paginated_response(serializer.data)
def convert(self, request: Request, *args, **kwargs) -> Response:
"""
Convert a file to another format.
Confirms that the destination path and other options are
correct, creates a job and redirects to it.
"""
project = self.get_project()
file = self.get_object(project)
path = self.request.data.get("path")
format = self.request.data.get("format")
job = file.convert(request.user, path)
job.dispatch()
return Response(
dict(project=project, file=file, path=path, format=format, job=job)
)
```
#### File: projects/models/files.py
```python
import enum
import mimetypes
from datetime import datetime
from typing import Dict, List, NamedTuple, Optional, Tuple
import pygments
import pygments.lexer
import pygments.lexers
from django.db import models, transaction
from django.http import HttpResponse
from django.shortcuts import reverse
from django.utils import timezone
from pygments.lexers.data import JsonLexer
from pygments.lexers.markup import MarkdownLexer
from pygments.lexers.special import TextLexer
from jobs.models import Job, JobMethod
from projects.models.projects import Project
from projects.models.sources import GoogleSourceMixin, Source, SourceAddress
from users.models import User
class FileFormat(NamedTuple):
"""
Specification of a file format.
The `format_id` should be a lowercase string and
map to a "codec" in Encoda.
In the future, these specs may be generated from Encoda
codec modules which also define the equivalent of
`mimetypes` and `extensions`.
"""
format_id: str
label: str
kind: str
mimetype: str
extensions: List[str]
lexer: pygments.lexer.Lexer
icon_class: str
minor: bool = False
@property
def default_extension(self) -> str:
"""Get the default extension for the format."""
return self.extensions[0] if self.extensions else ("." + self.format_id)
@staticmethod
def default_icon_class() -> str:
"""Get the default icon class."""
return "ri-file-3-line"
@property
def is_image(self) -> bool:
"""Is the format an image format."""
return self.kind == "image" or self.mimetype.startswith("image/")
@property
def is_binary(self) -> bool:
"""Is the format be considered binary when determining the type of HTTP response."""
return self.format_id not in ("html", "json", "jsonld", "md", "rmd", "xml")
@property
def convert_to_options(self) -> List[Tuple[str, str]]:
"""
Get a list of file formats that a user can convert to from the given format.
"""
if self.kind in ("exchange", "text"):
to_kinds = ("exchange", "text")
else:
return []
return [
f.value
for f in FileFormats
if f.value.kind in to_kinds and not f.value.minor
]
@staticmethod
def default_convert_to_options() -> List["FileFormat"]:
"""Get the default options for converting the format to."""
return [f.value for f in FileFormats if not f.value.minor]
def file_format(
format_id: str,
label: Optional[str] = None,
kind: Optional[str] = None,
mimetype: Optional[str] = None,
extensions: Optional[List[str]] = None,
lexer: Optional[pygments.lexer.Lexer] = None,
icon_class: Optional[str] = None,
minor: bool = False,
):
"""
Create a `FileFormat` with fallbacks.
Necessary because you can't override `__init__` for a
named tuple.
"""
if label is None:
label = format_id.upper()
if mimetype is None:
mimetype, encoding = mimetypes.guess_type("file." + format_id)
if mimetype is None:
raise RuntimeError(
"Can not guess a MIME type for format {0}".format(format_id)
)
if extensions is None:
extensions = mimetypes.guess_all_extensions(mimetype)
if lexer is None:
try:
lexer = pygments.lexers.get_lexer_for_mimetype(mimetype)
except pygments.util.ClassNotFound:
lexer = TextLexer
if kind is None:
if mimetype.startswith("image/"):
kind = "image"
else:
kind = "text"
if icon_class is None:
if mimetype.startswith("image/"):
icon_class = "ri-image-line"
else:
icon_class = FileFormat.default_icon_class()
return FileFormat(
format_id, label, kind, mimetype, extensions, lexer, icon_class, minor
)
class FileFormats(enum.Enum):
"""
List of file formats.
When adding `icon_class`es below, note that they may need to be
added to the `purgecss` whitelist in `postcss.config.js` to
avoid them from being purged (if they are not used in any of the HTML templates)
"""
docx = file_format("docx", label="Microsoft Word", icon_class="ri-file-word-line")
gdoc = file_format(
"gdoc",
label="Google Doc",
mimetype="application/vnd.google-apps.document",
icon_class="ri-google-line",
lexer=JsonLexer,
)
gsheet = file_format(
"gsheet",
label="Google Sheet",
mimetype="application/vnd.google-apps.spreadsheet",
icon_class="ri-google-line",
kind="spreadsheet",
lexer=JsonLexer,
)
gif = file_format("gif", icon_class="ri-file-gif-line")
html = file_format("html", icon_class="ri-file-code-line")
ipynb = file_format(
"ipynb", label="Jupyter Notebook", mimetype="application/x-ipynb+json"
)
jats = file_format(
"jats",
label="JATS XML",
mimetype="application/jats+xml",
extensions=[".jats.xml"],
icon_class="ri-file-code-line",
)
jpg = file_format("jpg")
json = file_format("json", kind="exchange", minor=True)
json5 = file_format(
"json5", mimetype="application/json5", kind="exchange", minor=True
)
jsonld = file_format(
"jsonld", label="JSON-LD", mimetype="application/ld+json", kind="exchange"
)
md = file_format(
"md", label="Markdown", mimetype="text/markdown", icon_class="ri-markdown-line"
)
odt = file_format("odt")
pandoc = file_format(
"pandoc",
label="Pandoc JSON",
mimetype="application/pandoc+json",
kind="exchange",
minor=True,
)
pdf = file_format("pdf", icon_class="ri-file-pdf-line")
png = file_format("png")
rmd = file_format(
"rmd", label="R Markdown", mimetype="text/r+markdown", lexer=MarkdownLexer,
)
rnb = file_format(
"rnb", mimetype="text/rstudio+html", extensions=[".nb.html"], minor=True
)
text = file_format("txt", label="Plain text")
yaml = file_format("yaml", mimetype="application/x-yaml", kind="exchange")
xlsx = file_format("xlsx", kind="spreadsheet", icon_class="ri-file-excel-line")
xml = file_format("xml", kind="exchange", minor=True)
@classmethod
def from_id(cls, format_id: str) -> "FileFormat":
"""
Get a file format from it's id.
"""
for f in cls:
if f.value.format_id == format_id:
return f.value
raise ValueError("No such member with id {}".format(format_id))
@classmethod
def from_mimetype(cls, mimetype: str) -> "FileFormat":
"""
Get a file format from it's MIME type.
"""
for f in cls:
if mimetype == f.value.mimetype:
return f.value
raise ValueError("No such member with mimetype {}".format(mimetype))
@classmethod
def from_id_or_mimetype(
cls, format_id: Optional[str] = None, mimetype: Optional[str] = None
) -> "FileFormat":
"""
Get a file format from it's id or MIME type.
"""
if format_id:
return FileFormats.from_id(format_id)
elif mimetype:
return FileFormats.from_mimetype(mimetype)
else:
raise ValueError("Must provide format id or MIME type")
@classmethod
def from_url_or_mimetype(
cls, url: Optional[str] = None, mimetype: Optional[str] = None
) -> "FileFormat":
"""
Get a file format from a URL (including file name or path) or mimetype.
"""
if mimetype:
return cls.from_mimetype(mimetype)
elif url:
mimetype, encoding = mimetypes.guess_type(url)
if not mimetype:
raise ValueError("Unable to determine MIME type for URL {}".format(url))
return cls.from_mimetype(mimetype)
else:
raise ValueError("Must provide a URL or MIME type")
class File(models.Model):
"""
A file associated with a project.
Files are created by a `job` and may be derived from a `source`,
or from another `upstream` file.
A `File` object (i.e. a row in the corresponding database table) is
never deleted. Instead, it is made `current=False`. That allows us
to easily maintain a history of each file in a project.
"""
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
related_name="files",
null=False,
blank=False,
help_text="The project that the file is associated with.",
)
path = models.TextField(
null=False,
blank=False,
db_index=True,
help_text="The path of the file within the project.",
)
job = models.ForeignKey(
Job,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="files",
help_text="The job that created the file e.g. a source pull or file conversion.",
)
source = models.ForeignKey(
Source,
on_delete=models.CASCADE,
null=True,
blank=True,
related_name="files",
help_text="The source from which the file came (if any). "
"If the source is removed from the project, so will this file.",
)
upstreams = models.ManyToManyField(
"File",
blank=True,
related_name="downstreams",
help_text="The files that this file was derived from (if any).",
)
snapshot = models.ForeignKey(
"Snapshot",
on_delete=models.CASCADE,
null=True,
blank=True,
related_name="files",
help_text="The snapshot that the file belongs. "
"If the snapshot is deleted so will the files.",
)
current = models.BooleanField(
default=True,
help_text="Is the file currently in the project? "
"Used to retain a history for file paths within a project.",
)
created = models.DateTimeField(
auto_now_add=True, help_text="The time the file info was created."
)
updated = models.DateTimeField(
null=True,
blank=True,
help_text="The time the file info was updated. "
"This field will have the last time this row was altered (i.e. changed from current, to not).",
)
modified = models.DateTimeField(
null=True, blank=True, help_text="The file modification time."
)
size = models.PositiveIntegerField(
null=True, blank=True, help_text="The size of the file in bytes",
)
mimetype = models.CharField(
max_length=512, null=True, blank=True, help_text="The mimetype of the file.",
)
encoding = models.CharField(
max_length=512,
null=True,
blank=True,
help_text="The encoding of the file e.g. gzip",
)
fingerprint = models.CharField(
max_length=128, null=True, blank=True, help_text="The fingerprint of the file",
)
@staticmethod
@transaction.atomic
def create(
project: Project,
path: str,
info: Dict,
job: Optional[Job] = None,
source: Optional[Source] = None,
upstreams: List["File"] = [],
downstreams: List["File"] = [],
snapshot=None,
) -> "File":
"""
Create a file within a project.
Jobs return a dictionary of file information for each
file that has been updated. This creates a `File` instance based
on that informaton.
If a file with the same path already exists in the project, then
it is made `current=False`.
"""
if snapshot:
current = False
else:
File.objects.filter(project=project, path=path, current=True).update(
current=False, updated=timezone.now()
)
current = True
file = File.objects.create(
project=project,
path=path,
current=current,
job=job,
source=source,
snapshot=snapshot,
modified=get_modified(info),
size=info.get("size"),
mimetype=info.get("mimetype"),
encoding=info.get("encoding"),
fingerprint=info.get("fingerprint"),
)
if upstreams:
file.upstreams.set(upstreams)
if downstreams:
for other in downstreams:
other.upstreams.add(file)
return file
@staticmethod
def get_latest(**kwargs):
"""
Get the latest file matching filter criteria.
This should generally be preferred over using `File.object.get()`.
Using `filter()` (and indexing to get the first item) is more robust than
using `get()` e.g. for a given `project`, there should only be one item with
`path` that is `current` but this avoids a `MultipleObjectsReturned`
exception in cases when there is not.
"""
from projects.models.files import File
return File.objects.filter(**kwargs).order_by("-created")[0]
def remove(self):
"""
To keep the file history we do not actually remove the file but make it not current.
"""
self.current = False
self.updated = timezone.now()
self.save()
def get_format(self) -> Optional[FileFormat]:
"""
Get the format of this file.
"""
try:
return FileFormats.from_url_or_mimetype(self.path, self.mimetype)
except ValueError:
return None
def get_upstreams(self, current=True):
"""
Get the upstream source or files.
"""
return [self.source] if self.source else self.upstreams.filter(current=current)
def get_downstreams(self, current=True):
"""
Get the downstream files.
"""
return self.downstreams.filter(current=current)
def open_url(self) -> Optional[str]:
"""
Get a URL to open the file at the source.
Currently, simply returns the URL to "open" the `source` (if any).
In the future, each source type should provide a URL to edit a
particular file from a multi-file source (e.g. a file within a Github repo).
Does not provide the URL of the source directly because that would
require additional queries to the table for each source type (instead provides URL
to API endpoint which will redirect to the URL).
Intentionally returns `None` for files in a snapshot (they do not have a `source`).
"""
return (
reverse(
"api-projects-sources-open",
kwargs=dict(project=self.project_id, source=self.source_id),
)
if self.source_id
else None
)
def get_lexer(self):
"""
Get a Pygments lexer for the file.
Returns None if no matching lexer can be found e.g. for
binary files like Word or PDF.
"""
try:
return pygments.lexers.guess_lexer_for_filename(self.path, "")
except pygments.util.ClassNotFound:
try:
return pygments.lexers.get_lexer_for_mimetype(self.mimetype)
except pygments.util.ClassNotFound:
try:
return FileFormats.from_url_or_mimetype(
self.path, self.mimetype
).lexer()
except ValueError:
return None
def highlight_content(self) -> Optional[Tuple[str, str]]:
"""
Highlight the content of the file.
Returns `None` if the content can not be highlighted.
"""
lexer = self.get_lexer()
assert lexer is not None
content = self.get_content()
formatter = pygments.formatters.HtmlFormatter(
cssclass="source", style="colorful"
)
css = formatter.get_style_defs(".highlight")
html = pygments.highlight(content, lexer, formatter)
return css, html
def highlight_url(self) -> Optional[str]:
"""
Get a URL to view the syntax highlighted content of the file (if possible).
Returns `None` if the content can not be highlighted.
"""
lexer = self.get_lexer()
if lexer:
return reverse(
"ui-projects-files-highlight",
kwargs=dict(
account=self.project.account.name,
project=self.project.name,
file=self.path,
),
)
else:
return None
def download_url(self) -> str:
"""
Get a URL to download the file.
This just returns the project's `content_url`, including necessary
snapshot path and keys (for private projects).
"""
if self.snapshot:
return self.project.content_url(snapshot=self.snapshot, path=self.path)
else:
return self.project.content_url(live=True, path=self.path)
def get_response(self, limit_rate: Optional[str] = "off") -> HttpResponse:
"""
Return a HTTP response to get this file.
"""
return (
self.snapshot.file_response(self.path)
if self.snapshot
else self.project.file_response(self.path)
)
def get_content(self) -> bytes:
"""
Get the content of this file.
"""
return (
self.snapshot.file_content(self.path)
if self.snapshot
else self.project.file_content(self.path)
)
def convert(
self, user: User, output: str, options: Dict = {}, snapshot: bool = False
) -> Job:
"""
Convert a file to another format.
Creates a `convert` job which returns a list of files produced (may be
more than one e.g a file with a media folder). Each of the files will have this
file as an upstream dependency.
For certain target formats (e.g. `gdoc`), a source is also created (e.g. `GoogleDocsSource`)
in the job callback. The source will have this new file as a downstream dependant,
and this file will have the new file as an upstream dependency.
Do not call back if this conversion is for a snapshot (do
not want a file entry for those at present).
"""
if self.mimetype:
options["from"] = self.mimetype
return Job.objects.create(
project=self.project,
creator=user,
description="Convert '{0}' to '{1}'".format(self.path, output),
method=JobMethod.convert.name,
params=dict(input=self.path, output=output, options=options),
secrets=GoogleSourceMixin().get_secrets(user)
if output.endswith(".gdoc")
else None,
**(Job.create_callback(self, "convert_callback") if not snapshot else {})
)
@transaction.atomic
def convert_callback(self, job: Job):
"""
Create files, and any sources, including their dependency relations after a convert job.
Add the created files to the project and make this file the upstream of each.
Create any sources and make this file a downstream.
"""
result = job.result
if not result:
return
for path, info in result.items():
if "source" in info:
assert (
"type_name" in info["source"]
), "Convert job must provide a `type_name` for a source"
source = Source.from_address(
SourceAddress(**info["source"]),
project=self.project,
creator=job.creator,
path=path,
)
relations = dict(source=source, downstreams=[self])
else:
relations = dict(upstreams=[self])
File.create(self.project, path, info, job=job, **relations)
class FileDownloads(models.Model):
"""
Download metrics by day for a file.
These metrics are principally stored to be able to restrict (i.e. prevent
or throttle) file downloads for a project once they reach a monthly limit.
For storage and compute efficiency we do not try to use them for user
facing analytics which would be inaccurate in any case due to caching headers etc.
The total number of bytes downloaded for the file in the month can be
calculated using this `count` and its `File.size`.
"""
file = models.ForeignKey(
File,
on_delete=models.CASCADE,
related_name="downloads",
help_text="The file that these download metrics relate to.",
)
month = models.CharField(
db_index=True,
max_length=7,
help_text="The calendar month, in YYYY-MM format, that these download metrics relate to.",
)
count = models.PositiveIntegerField(
help_text="The number of downloads for the file for the month."
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["file", "month"], name="%(class)s_unique_file_month"
)
]
def get_modified(info: Dict) -> Optional[datetime]:
"""
Get the modified data as a timezone aware datetime object.
"""
timestamp = info.get("modified")
return datetime.fromtimestamp(timestamp, tz=timezone.utc) if timestamp else None
```
#### File: projects/models/nodes.py
```python
import shortuuid
from django.db import models
from django.shortcuts import reverse
from meta.views import Meta
from stencila.schema.util import node_type as schema_node_type
from projects.models.projects import Project
from users.models import User, get_name
def generate_node_key():
"""
Generate a unique, and very difficult to guess, key to access the node.
"""
return shortuuid.ShortUUID().random(length=32)
# Applications known to create nodes
# For these provide further details in HTML views of nodes
APPS = {
"api": ("API", "https://hub.stenci.la/api"),
"encoda": ("Encoda", "https://github.com/stencila/encoda#readme"),
"gsuita": (
"Google Workspace",
"https://workspace.google.com/marketplace/app/stencila/110435422451",
),
}
class Node(models.Model):
"""
A Stencila Schema node.
Could be any type of node e.g. a `CodeChunk`, `CreativeWork`, `Number` within
some larger document, or a complete `Article` or `Review`.
Each node has a unique `key` generated at the time of creation. This
is the only way to retreive a node.
Each node can be associated with a `project`. This is for authorization.
Although the `key` is a secret, project based authorization adds an additional
layer of security e.g. in case of accidental leakage of a node URL.
This field does not use cascading delete because node URLs
should be permananent. The `project` is not required. This allows
apps to create nodes in documents (e.g. GSuita) or to or to convert documents
(e.g. Encoda) without having to associate them with a project.
Each node is created by an `app`. This string is primarily used when generating
HTML representations of the node to provide links back to that app.
A node is usually created within a `host`. This is a URL that is primarily used
when generating HTML representations of the node to provide links back to the
document.
The `json` of the node is also immutable. It is returned to requests with
`Accept: application/json` (if authorized).
"""
class Meta:
unique_together = (
"project",
"key",
)
creator = models.ForeignKey(
User,
null=True, # Should only be null if the creator is deleted
blank=True,
on_delete=models.SET_NULL,
related_name="nodes_created",
help_text="User who created the project.",
)
created = models.DateTimeField(
auto_now_add=True, help_text="When the project was created."
)
project = models.ForeignKey(
Project,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="nodes",
help_text="The project this node is associated with.",
)
app = models.TextField(
null=True,
blank=True,
help_text="An identifier for the app that created the node.",
)
host = models.URLField(
null=True,
blank=True,
help_text="URL of the host document within which the node was created.",
)
key = models.CharField(
unique=True,
default=generate_node_key,
max_length=64,
help_text="A unique, and very difficult to guess, key to access this node if it is not public.",
)
json = models.JSONField(help_text="The JSON content of node.")
def get_absolute_url(self):
"""Get the URL of this node."""
return reverse("api-nodes-detail", kwargs={"key": self.key})
def get_app(self):
"""Get name and URL of the creator app."""
return APPS.get(self.app.lower(), (self.app, None))
def get_meta(self) -> Meta:
"""Get the metadata to include in the head of the node's page."""
json = self.json
node_type = schema_node_type(json)
if node_type in ("CodeChunk", "CodeExpression"):
lang = json.get("programmingLanguage", "")
kind = "code chunk" if node_type == "CodeChunk" else "code expression"
title = f"{lang.title()} {kind}"
elif node_type in ("MathBlock", "MathFragment"):
lang = json.get("mathLanguage", "")
kind = "math block" if node_type == "MathBlock" else "math fragment"
title = f"{lang.title()} {kind}"
else:
title = node_type + " node"
description = "Created"
if self.creator:
description += f" by { get_name(self.creator) }"
app_name, app_url = self.get_app()
if app_name:
description += f" using { app_name }"
if self.created:
description += f" at { self.created } UTC"
return Meta(object_type="article", title=title, description=description,)
```
#### File: projects/models/reviews.py
```python
import re
from datetime import datetime
from typing import Dict, List, Optional, Tuple
import shortuuid
from django.db import models
from dois.models import Doi
from jobs.models import Job
from manager.helpers import EnumChoice
from manager.nodes import node_text_content
from projects.models.nodes import Node
from projects.models.projects import Project, ProjectAgent, ProjectRole
from projects.models.sources import Source
from users.models import User
class ReviewStatus(EnumChoice):
"""
The status of a review.
"""
PENDING = "PENDING"
REQUESTED = "REQUESTED"
CANCELLED = "CANCELLED"
ACCEPTED = "ACCEPTED"
DECLINED = "DECLINED"
COMPLETED = "COMPLETED"
EXTRACTING = "EXTRACTING"
EXTRACTED = "EXTRACTED"
FAILED = "FAILED"
REGISTERED = "REGISTERED"
@staticmethod
def as_choices() -> List[Tuple[str, str]]:
"""Return as a list of field choices."""
return [
(ReviewStatus.PENDING.name, "Pending"),
(ReviewStatus.REQUESTED.name, "Requested"),
(ReviewStatus.CANCELLED.name, "Cancelled"),
(ReviewStatus.ACCEPTED.name, "Accepted"),
(ReviewStatus.DECLINED.name, "Declined"),
(ReviewStatus.COMPLETED.name, "Completed"),
(ReviewStatus.EXTRACTING.name, "Retrieval in progress"),
(ReviewStatus.EXTRACTED.name, "Retrieved"),
(ReviewStatus.FAILED.name, "Retrieval failed"),
(ReviewStatus.REGISTERED.name, "Registered"),
]
@classmethod
def get_description(cls, status: str) -> Optional[str]:
"""Return the description of the status."""
choices = cls.as_choices()
for choice in choices:
if status == choice[0]:
return choice[1]
return None
def generate_review_key():
"""
Generate a unique, and very difficult to guess, key for a review.
"""
return shortuuid.ShortUUID().random(length=32)
class Review(models.Model):
"""
A review of a `Node` within a project.
The `subject` of the review will usually be an `Article`, or other type
of `CreativeWork` node, generated from a snapshot.
"""
project = models.ForeignKey(
Project,
null=False,
blank=False,
on_delete=models.CASCADE,
related_name="reviews",
help_text="The project that the review is for.",
)
creator = models.ForeignKey(
User,
# Usually set but allow for null if user is deleted
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="reviews_created",
help_text="The user who created the review.",
)
created = models.DateTimeField(
auto_now_add=True, help_text="The time the review was created."
)
updated = models.DateTimeField(
auto_now=True, help_text="The time the review was last updated."
)
status = models.CharField(
max_length=16,
choices=ReviewStatus.as_choices(),
default=ReviewStatus.PENDING.name,
help_text="The status of the review.",
)
reviewer = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="reviews_authored",
help_text="The user who authored the review.",
)
reviewer_email = models.EmailField(
null=True, blank=True, help_text="The email address of the reviewer.",
)
key = models.CharField(
default=generate_review_key,
max_length=64,
help_text="A unique, and very difficult to guess, key for the reviewer "
"to access the review if they are not a user.",
)
request_message = models.TextField(
null=True,
blank=True,
help_text="The message sent to the reviewer in the request to review.",
)
response_message = models.TextField(
null=True,
blank=True,
help_text="The message provided by the reviewer when accepting or declining to review.",
)
cancel_message = models.TextField(
null=True,
blank=True,
help_text="The message sent to the reviewer when the review was cancelled.",
)
source = models.ForeignKey(
Source,
# Should normally be set but allow for
# null if the the source is removed from the project
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="reviews",
help_text="The source for this review.",
)
job = models.ForeignKey(
Job,
null=True,
blank=True,
on_delete=models.PROTECT,
related_name="reviews",
help_text="The job that extracted the review from the source.",
)
subject = models.ForeignKey(
Node,
null=True,
blank=True,
on_delete=models.PROTECT,
related_name="reviews",
help_text="The node, usually a `CreativeWork`, that is the subject of the review.",
)
review = models.ForeignKey(
Node,
null=True,
blank=True,
on_delete=models.PROTECT,
help_text="The node, of type `Review`, representing the actual review.",
)
# The following fields are derived from the `review.json` when a `Review` model
# is extracted from a source. They are mostly optimizations to avoid fetching the
# JSON of the review everytime we want to display them (e.g. in listings)
review_author_name = models.CharField(
max_length=128, null=True, blank=True, help_text="The name of the reviewer.",
)
review_date = models.DateTimeField(
null=True,
blank=True,
help_text="The date of the review e.g it's `datePublished`.",
)
review_title = models.TextField(
null=True, blank=True, help_text="The title of the review.",
)
review_description = models.TextField(
null=True, blank=True, help_text="The description of the review.",
)
review_comments = models.IntegerField(
null=True, blank=True, help_text="The number of comments that the review has."
)
# The following methods get derived properties of the review for use in templates
# or API responses
def get_status(self) -> Optional[str]:
"""
Get a human readable string describing the status of the review.
"""
return ReviewStatus.get_description(self.status)
def get_date(self) -> datetime:
"""
Get the date for the review.
Returns the declared review date or when the review was last updated.
"""
return self.review_date or self.updated or self.created()
def get_reviewer_name(self) -> Optional[str]:
"""
Get the name for the reviewer.
Returns the account display name for reviewers that are users.
Returns the `review_author_name` (which may be null) otherwise.
"""
return (
self.reviewer.personal_account.display_name or self.reviewer.username
if self.reviewer
else self.review_author_name
)
def get_reviewer_image(self) -> Optional[str]:
"""
Get the image for the reviewer.
Returns the account image for reviewers that are users.
Returns null otherwise.
"""
return self.reviewer.personal_account.image.medium if self.reviewer else None
def get_doi(self) -> Optional[str]:
"""
Get the DOI of the review.
It is possible for a `Node` to have multiple DOIs assigned to it, so
we get the latest.
"""
return self.review and self.review.dois.order_by("-created").first()
def get_comments(self) -> Optional[int]:
"""
Get the number of comments that the review has.
"""
return self.review_comments
# Actions on a review
def request(self):
"""
Send the request to the reviewer.
"""
if self.reviewer_email or self.reviewer:
# TODO Send the email
# email = self.reviewer_email or get_email(self.reviewer)
# invite = Invite.objects.create(
# inviter=creator,
# email=email,
# message=self.invite_message,
# action=InviteAction.make_self.name,
# subject_type=ContentType.objects.get_for_model(Review),
# subject_id=self.id,
# arguments=dict(
# account=self.project.account.id,
# project=self.project.id,
# review=self.id,
# ),
# )
# invite.send_invitation(request)
self.status = ReviewStatus.REQUESTED.name
self.save()
def update(
self,
status: str,
response_message: Optional[str] = None,
cancel_message: Optional[str] = None,
user: Optional[User] = None,
filters: Dict = {},
):
"""
Update the status of a review.
Checks that the status update makes logical sense and
records the message and user (if any). Note that a status
update to `ACCEPTED`, `DECLINED` or `COMPLETED` can be made
by an anonymous users (a reviewer who has the review key but
is not an authenticated user).
"""
if (
status == ReviewStatus.CANCELLED.name
and self.status == ReviewStatus.REQUESTED.name
):
self.cancel_message = cancel_message or None
elif (
status == ReviewStatus.ACCEPTED.name
and self.status == ReviewStatus.REQUESTED.name
):
self.reviewer = user
self.response_message = response_message or None
# Add user as a REVIEWER to the project (if necessary)
try:
agent = ProjectAgent.objects.get(project_id=self.project, user=user)
except ProjectAgent.DoesNotExist:
ProjectAgent.objects.create(
project_id=self.project, user=user, role=ProjectRole.REVIEWER.name,
)
else:
if agent.role not in ProjectRole.and_above(ProjectRole.REVIEWER):
agent.role = ProjectRole.REVIEWER.name
agent.save()
elif (
status == ReviewStatus.DECLINED.name
and self.status == ReviewStatus.REQUESTED.name
):
self.reviewer = user
self.response_message = response_message or None
elif status == ReviewStatus.COMPLETED.name and self.status in (
ReviewStatus.PENDING.name,
ReviewStatus.ACCEPTED.name,
ReviewStatus.FAILED.name,
):
return self.extract(user, filters)
elif (
status == ReviewStatus.REGISTERED.name
and self.status == ReviewStatus.EXTRACTED.name
):
return self.register(user)
else:
raise ValueError(
f"Review can not be updated from {self.status} to {status}."
)
self.status = status
self.save()
def extract(self, user: User, filters: Dict = {}):
"""
Extract the review from its source.
Parses the filters according to the type of source.
Creates and dispatches an `extract` job on the review's source.
"""
if self.source.type_name == "Github":
match = re.match(
r"https:\/\/github.com\/(?:\w+)\/(?:\w+)\/pull\/(\d+)#pullrequestreview-(\d+)",
filters.get("filter_a", ""),
)
if match:
filters = dict(
pr_number=int(match.group(1)), review_id=int(match.group(2))
)
elif self.source.type_name.startswith("Google"):
filters = dict(name=filters.get("filter_a"))
job = self.source.extract(review=self, user=user, filters=filters)
job.dispatch()
self.job = job
self.status = ReviewStatus.EXTRACTING.name
self.save()
def extract_callback(self, job: Job):
"""
Store the extracted review.
"""
json = job.result
if not json:
self.status = ReviewStatus.FAILED.name
else:
self.review = Node.objects.create(
project=self.project, creator=job.creator, app="hub.reviews", json=json
)
authors = json.get("authors", [])
if len(authors) > 0:
self.review_author_name = authors[0].get("name")
self.review_date = (
json.get("datePublished")
or json.get("dateModified")
or json.get("dateCreated")
)
self.review_title = node_text_content(json.get("title"))
self.review_description = node_text_content(
json.get("description") or json.get("content")
)
self.review_comments = len(json.get("comments", []))
self.status = ReviewStatus.EXTRACTED.name
self.save()
def register(self, user: User):
"""
Register a DOI for this review.
"""
doi = Doi.objects.create(creator=user, node=self.review)
job = doi.register()
job.dispatch()
self.status = ReviewStatus.REGISTERED.name
self.save()
```
#### File: projects/models/snapshots.py
```python
import os
from typing import Optional
import shortuuid
from django.db import models
from django.http import HttpRequest
from django.utils import timezone
from jobs.models import Job, JobMethod
from manager.storage import StorageUsageMixin, snapshots_storage
from projects.models.files import File, get_modified
from projects.models.projects import Project
from users.models import User
def generate_snapshot_id():
"""
Generate a unique snapshot id.
The is separate function to avoid new AlterField migrations
being created as happens when `default=shortuuid.uuid`.
"""
return shortuuid.uuid()
class Snapshot(StorageUsageMixin, models.Model):
"""
A project snapshot.
The `path` field is stored on the model to improve durability (if
the convention for creating paths changes, the existing paths will not change).
The `zip_name` field provides a way of providing a more useful filename
when downloading the archive (it is populated with the project name and snapshot number).
"""
id = models.CharField(
primary_key=True,
max_length=32,
editable=False,
default=generate_snapshot_id,
help_text="The unique id of the snapshot.",
)
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
related_name="snapshots",
null=False,
blank=False,
help_text="The project that the snapshot is for.",
)
number = models.IntegerField(
db_index=True, help_text="The number of the snapshot within the project.",
)
creator = models.ForeignKey(
User,
null=True,
on_delete=models.SET_NULL,
related_name="snapshots_created",
help_text="The user who created the snapshot.",
)
created = models.DateTimeField(
auto_now_add=True, help_text="The time the snapshot was created."
)
path = models.CharField(
max_length=1024,
null=True,
help_text="The path of the snapshot's directory within the snapshot storage volume.",
)
zip_name = models.CharField(
max_length=1024,
null=True,
help_text="The name of snapshot's Zip file (within the snapshot directory).",
)
container_image = models.TextField(
null=True,
blank=True,
help_text="The container image to use as the execution environment for this snapshot.",
)
job = models.ForeignKey(
Job,
on_delete=models.SET_NULL,
related_name="snapshot_created",
null=True,
blank=True,
help_text="The job that created the snapshot",
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["project", "number"], name="%(class)s_unique_project_number"
)
]
STORAGE = snapshots_storage()
def __str__(self):
"""
Get a string representation to use in select options etc.
"""
return "Snapshot #{0}".format(self.number)
def save(self, *args, **kwargs):
"""
Override to ensure certain fields are populated.
Ensures that:
- `number` is not null and monotonically increases
- `path` and `zip_name` are set
"""
if self.number is None:
result = Snapshot.objects.filter(project=self.project).aggregate(
models.Max("number")
)
self.number = (result["number__max"] or 0) + 1
if not self.path:
self.path = os.path.join(str(self.project.id), str(self.id))
if not self.zip_name:
self.zip_name = "{project}-v{number}.zip".format(
project=self.project.name, number=self.number
)
return super().save(*args, **kwargs)
@staticmethod
def create(project: Project, user: User) -> Job:
"""
Snapshot the project.
"""
snapshot = Snapshot.objects.create(project=project, creator=user)
subjobs = []
# Clean the project's working directory
subjobs.append(project.cleanup(user))
# Pull the project's sources
subjobs.append(project.pull(user))
# "Reflow" the project (regenerate derived files)
reflow = project.reflow(user)
if reflow:
subjobs.append(reflow)
# Pin the container image
subjobs.append(
project.pin(user, **Job.create_callback(snapshot, "pin_callback"))
)
# Create an index.html if a "main" file is defined
main = project.get_main()
if main:
options = {}
theme = project.get_theme()
if theme:
options["theme"] = theme
subjobs.append(main.convert(user, "index.html", options=options))
# This is currently required to populate field `zip_name` below
snapshot.save()
# Archive the working directory to the snapshot directory
subjobs.append(
project.archive(
user,
snapshot=snapshot.id,
path=f"{project.id}/{snapshot.id}/{snapshot.zip_name}",
**Job.create_callback(snapshot, "archive_callback"),
)
)
job = Job.objects.create(
method=JobMethod.series.name,
description="Snapshot project '{0}'".format(project.name),
project=project,
creator=user,
)
job.children.set(subjobs)
job.dispatch()
snapshot.job = job
snapshot.save()
return snapshot
def pin_callback(self, job: Job):
"""
Update the container image for this snapshot.
Called when the `pin` sub-job is complete.
"""
self.container_image = job.result
self.save()
def archive_callback(self, job: Job):
"""
Update the files associated with this snapshot.
Called when the `archive` sub-job is complete.
"""
result = job.result
if not result:
return
# Do a batch insert of files. This is much faster when there are a lot of file
# than inserting each file individually.
File.objects.bulk_create(
[
File(
project=self.project,
path=path,
current=False,
job=job,
snapshot=self,
updated=timezone.now(),
modified=get_modified(info),
size=info.get("size"),
mimetype=info.get("mimetype"),
encoding=info.get("encoding"),
fingerprint=info.get("fingerprint"),
)
for path, info in result.items()
]
)
def session(self, request: HttpRequest) -> Job:
"""
Create a session job for the snapshot.
"""
project = self.project
job = Job.objects.create(
project=project,
creator=request.user if request.user.is_authenticated else None,
snapshot=self,
method=JobMethod.session.name,
params=dict(
snapshot=self.id,
snapshot_url=self.file_url(self.zip_name),
container_image=self.container_image,
mem_request=project.session_memory,
mem_limit=project.session_memory,
timeout=project.session_timeout,
timelimit=project.session_timelimit,
),
description="Session for snapshot #{0}".format(self.number),
)
job.add_user(request)
return job
@property
def is_active(self) -> bool:
"""
Is the snapshot currently active.
"""
return self.job and self.job.is_active
@property
def has_index(self) -> bool:
"""
Determine if the snapshot has an index.html file, or not.
"""
try:
self.files.get(path="index.html")
return True
except File.DoesNotExist:
return False
def content_url(self, path: Optional[str] = None) -> str:
"""
Get the URL that this snapshot content is served from.
"""
return self.project.content_url(snapshot=self, path=path)
def file_location(self, file: str) -> str:
"""
Get the location of one of the snapshot's files relative to the root of the storage volume.
"""
return os.path.join(self.path, file)
```
#### File: projects/models/sources_tests.py
```python
import pytest
from django.core.exceptions import ValidationError
from django.db.models import Q
from accounts.models import Account, AccountTier
from manager.testing import DatabaseTestCase
from projects.models.projects import Project
from projects.models.sources import (
ElifeSource,
GithubSource,
GoogleDocsSource,
GoogleDriveSource,
GoogleSheetsSource,
PlosSource,
Source,
SourceAddress,
UploadSource,
UrlSource,
)
def test_source_address():
with pytest.raises(KeyError, match='Unknown source type "foo"'):
SourceAddress("foo")
def test_coerce_address():
# A specific address (ie. starting with type://)
sa = Source.coerce_address("github://org/repo/a/file.md")
assert sa.type == GithubSource
assert sa["repo"] == "org/repo"
assert sa["subpath"] == "a/file.md"
sa = Source.coerce_address("gsheet://1BW6MubIyDirCGW9Wq-tSqCma8pioxBI6VpeLyXn5mZA")
assert sa.type == GoogleSheetsSource
assert sa["doc_id"] == "1BW6MubIyDirCGW9Wq-tSqCma8pioxBI6VpeLyXn5mZA"
# A HTTP URL that is matched by a specific source
sa = Source.coerce_address("https://github.com/org/repo")
assert sa.type == GithubSource
assert sa["repo"] == "org/repo"
sa = Source.coerce_address("https://github.com/stencila/hub/blob/master/README.md")
assert sa.type == GithubSource
assert sa["repo"] == "stencila/hub"
assert sa["subpath"] == "README.md"
# A generic URL that is caught as a URL source
sa = Source.coerce_address("https://example.org/file.R")
assert sa.type == UrlSource
# An address that is not matched by any source type
with pytest.raises(ValueError, match='Unable to parse source address "foo"'):
sa = Source.coerce_address("foo")
def test_default_make_address():
assert ElifeSource(article=43143).make_address() == "elife://43143"
def test_default_parse_address():
sa = Source.parse_address("foo")
assert sa is None
with pytest.raises(ValidationError):
Source.parse_address("foo", strict=True)
def test_query_from_address():
q = Source.query_from_address(SourceAddress("Github", repo="org/repo"))
assert isinstance(q, Q)
assert q.children == [("githubsource__repo", "org/repo")]
q = Source.query_from_address(
SourceAddress("Github", repo="org/repo", subpath="a/sub/folder")
)
assert q.children == [
("githubsource__repo", "org/repo"),
("githubsource__subpath", "a/sub/folder"),
]
q = Source.query_from_address(
SourceAddress("Github", repo="org/repo"), prefix="sources"
)
assert q.children == [("sources__githubsource__repo", "org/repo")]
q = Source.query_from_address(
SourceAddress("Github", repo="org/repo", subpath="a/sub/folder"),
prefix="sources",
)
assert q.children == [
("sources__githubsource__repo", "org/repo"),
("sources__githubsource__subpath", "a/sub/folder"),
]
q1 = Source.query_from_address("upload://foo.txt")
q2 = Source.query_from_address(SourceAddress("Upload", name="foo.txt"))
assert q1.children == [("uploadsource__name", "foo.txt")]
assert q2 == q1
@pytest.mark.django_db
def test_from_address():
s = Source.from_address(
SourceAddress("Github", repo="org/repo", subpath="a/folder")
)
assert isinstance(s, GithubSource)
assert s.repo == "org/repo"
assert s.subpath == "a/folder"
s = Source.from_address("github://org/repo")
assert isinstance(s, GithubSource)
assert s.repo == "org/repo"
assert s.subpath is None
def test_to_address():
s = GithubSource(repo="org/repo", subpath="a/folder")
a = s.to_address()
assert a.type_name == "Github"
assert list(a.keys()) == ["repo", "subpath"]
assert a.repo == "org/repo"
assert a.subpath == "a/folder"
def test_elife_parse_address():
sa = ElifeSource.parse_address("elife://52258")
assert sa.type == ElifeSource
assert sa["article"] == 52258
sa = ElifeSource.parse_address("https://elifesciences.org/articles/52258")
assert sa.type == ElifeSource
assert sa["article"] == 52258
sa = ElifeSource.parse_address("foo")
assert sa is None
with pytest.raises(ValidationError):
ElifeSource.parse_address("foo", strict=True)
def test_githubsource_make_address():
assert GithubSource(repo="user/repo").make_address() == "github://user/repo"
assert (
GithubSource(repo="user/repo", subpath="a/file.txt").make_address()
== "github://user/repo/a/file.txt"
)
def test_githubsource_url():
assert GithubSource(repo="user/repo").get_url() == "https://github.com/user/repo"
assert (
GithubSource(repo="user/repo", subpath="a/file.txt").get_url()
== "https://github.com/user/repo/blob/master/a/file.txt/"
)
def test_githubsource_parse_address():
for url in [
"github://django/django",
"http://github.com/django/django",
"https://github.com/django/django/",
]:
sa = GithubSource.parse_address(url)
assert sa.type == GithubSource
assert sa["repo"] == "django/django"
assert sa["subpath"] is None
for url in [
"github://django/django/django/db/models",
"http://github.com/django/django/tree/master/django/db/models",
"https://github.com/django/django/tree/master/django/db/models",
]:
sa = GithubSource.parse_address(url)
assert sa.type == GithubSource
assert sa["repo"] == "django/django"
assert sa["subpath"] == "django/db/models"
for url in [
"github://django/django/django/db/models/query_utils.py",
"https://github.com/django/django/blob/master/django/db/models/query_utils.py",
]:
sa = GithubSource.parse_address(url)
assert sa.type == GithubSource
assert sa["repo"] == "django/django"
assert sa["subpath"] == "django/db/models/query_utils.py"
def test_googledocssource_make_address():
assert GoogleDocsSource(doc_id="an-id").make_address() == "gdoc://an-id"
def test_googledocssource_to_address():
a = GoogleDocsSource(doc_id="an-id").to_address()
assert a.type_name == "GoogleDocs"
assert list(a.keys()) == ["doc_id"]
assert a.doc_id == "an-id"
def test_googledocssource_url():
assert (
GoogleDocsSource(doc_id="an-id").get_url()
== "https://docs.google.com/document/d/an-id/edit"
)
def test_googledocssource_parse_address():
for url in [
"gdoc://<KEY>",
"docs.google.com/document/d/<KEY>",
"https://docs.google.com/document/d/<KEY>ZA/",
"https://docs.google.com/document/d/1BW6MubIyDirCGW9Wq-tSqCma8pioxBI6VpeLyXn5mZA/edit",
"https://docs.google.com/document/u/1/d/1BW6MubIyDirCGW9Wq-tSqCma8pioxBI6VpeLyXn5mZA/edit",
]:
sa = GoogleDocsSource.parse_address(url)
assert sa.type == GoogleDocsSource
assert sa.doc_id == "<KEY>"
# Use of naked ids
assert (
GoogleDocsSource.parse_address("1BW6MubIyDirCGW9Wq-tSqCma8pioxBI6VpeLyXn5mZA")
is None
)
assert (
GoogleDocsSource.parse_address(
"<KEY>", naked=True
).doc_id
== "<KEY>"
)
# Use of strict
assert GoogleDocsSource.parse_address("foo") is None
with pytest.raises(ValidationError, match="Invalid Google Doc identifier"):
GoogleDocsSource.parse_address("foo", strict=True)
def test_googlesheetssource_make_address():
assert GoogleSheetsSource(doc_id="an-id").make_address() == "gsheet://an-id"
def test_googlesheetssource_to_address():
a = GoogleSheetsSource(doc_id="an-id").to_address()
assert a.type_name == "GoogleSheets"
assert list(a.keys()) == ["doc_id"]
assert a.doc_id == "an-id"
def test_googlesheetssource_url():
assert (
GoogleSheetsSource(doc_id="an-id").get_url()
== "https://docs.google.com/spreadsheets/d/an-id/edit"
)
def test_googlesheetssource_parse_address():
for url in [
"gsheet://<KEY>",
"docs.google.com/spreadsheets/d/<KEY>",
"https://docs.google.com/spreadsheets/d/<KEY>/",
"https://docs.google.com/spreadsheets/d/1BW6MubIyDirCGW9Wq-tSqCma8pioxBI6VpeLyXn5mZA/edit",
"https://docs.google.com/spreadsheets/u/0/d/1BW6MubIyDirCGW9Wq-tSqCma8pioxBI6VpeLyXn5mZA/edit",
]:
sa = GoogleSheetsSource.parse_address(url)
assert sa.type == GoogleSheetsSource
assert sa.doc_id == "<KEY>"
def test_googledrivesource_make_address():
assert (
GoogleDriveSource(kind="file", google_id="an-id").make_address()
== "gdrive://file/an-id"
)
assert (
GoogleDriveSource(kind="folder", google_id="an-id").make_address()
== "gdrive://folder/an-id"
)
def test_googledrivesource_url():
assert (
GoogleDriveSource(kind="file", google_id="an-id").get_url()
== "https://drive.google.com/file/d/an-id"
)
assert (
GoogleDriveSource(kind="folder", google_id="an-id").get_url()
== "https://drive.google.com/drive/folders/an-id"
)
def test_googledrivesource_parse_address():
for url in [
"gdrive://file/1AkmcbU9uuEL9YBFsOXEP-LzORXsCGOIl",
"drive.google.com/file/d/1AkmcbU9uuEL9YBFsOXEP-LzORXsCGOIl",
"https://drive.google.com/file/d/1AkmcbU9uuEL9YBFsOXEP-LzORXsCGOIl",
"https://drive.google.com/file/d/1AkmcbU9uuEL9YBFsOXEP-LzORXsCGOIl/view?usp=sharing",
]:
sa = GoogleDriveSource.parse_address(url)
assert sa.type == GoogleDriveSource
assert sa.kind == "file"
assert sa.google_id == "1AkmcbU9uuEL9YBFsOXEP-LzORXsCGOIl"
for url in [
"gdrive://folder/1OcB7VTWb3lc0u8FJX2LXc5GraKpn-r_m",
"https://drive.google.com/drive/folders/1OcB7VTWb3lc0u8FJX2LXc5GraKpn-r_m",
"https://drive.google.com/drive/u/1/folders/1OcB7VTWb3lc0u8FJX2LXc5GraKpn-r_m",
]:
sa = GoogleDriveSource.parse_address(url)
assert sa.type == GoogleDriveSource
assert sa.kind == "folder"
assert sa.google_id == "1OcB7VTWb3lc0u8FJX2LXc5GraKpn-r_m"
# Use of strict
assert GoogleDriveSource.parse_address("foo") is None
with pytest.raises(ValidationError, match="Invalid Google Drive address"):
GoogleDriveSource.parse_address("foo", strict=True)
def test_plos_parse_address():
sa = PlosSource.parse_address("plos://10.1371/journal.pcbi.1007406")
assert sa.type == PlosSource
assert sa["article"] == "10.1371/journal.pcbi.1007406"
sa = PlosSource.parse_address("doi://10.1371/journal.pcbi.1007406")
assert sa.type == PlosSource
assert sa["article"] == "10.1371/journal.pcbi.1007406"
sa = PlosSource.parse_address(
"https://journals.plos.org/plosbiology/article?id=10.1371/journal.pbio.3000872"
)
assert sa.type == PlosSource
assert sa["article"] == "10.1371/journal.pbio.3000872"
sa = PlosSource.parse_address("foo")
assert sa is None
with pytest.raises(ValidationError):
PlosSource.parse_address("foo", strict=True)
@pytest.mark.django_db
def test_upload_source_pull():
tier = AccountTier.objects.create()
account = Account.objects.create(name="account-name", tier=tier)
project = Project.objects.create(account=account, name="project-name")
source = UploadSource.objects.create(project=project, file="some-file.txt")
job = source.pull()
params = job.params
assert params["source"]["type"] == "Upload"
assert params["source"]["path"].endswith("some-file.txt")
def test_urlsource_make_address():
assert UrlSource(url="http://example.com").make_address() == "http://example.com"
def test_urlsource_url():
assert UrlSource(url="http://example.com").get_url() == "http://example.com"
def test_urlsource_parse_address():
assert UrlSource.parse_address("https://example.org") == SourceAddress(
"Url", url="https://example.org"
)
assert UrlSource.parse_address("http://example.org/a-file.md") == SourceAddress(
"Url", url="http://example.org/a-file.md"
)
assert UrlSource.parse_address("foo") is None
assert UrlSource.parse_address("http://not-a-hostname") is None
with pytest.raises(ValidationError, match="Invalid URL source"):
UrlSource.parse_address("foo", strict=True)
class SourcesTests(DatabaseTestCase):
def test_delete_project_with_source(self):
"""
A regression test for https://github.com/stencila/hub/issues/754
"""
account = Account.objects.create(name="test-account")
project = Project.objects.create(account=account, name="test-project")
ElifeSource.objects.create(project=project, article=5000, path="article.xml")
project.delete()
```
#### File: ui/views/jobs.py
```python
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from jobs.api.views import ProjectsJobsViewSet
from jobs.models import JobMethod, JobStatus
@login_required
def list(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
Get a list of project jobs.
"""
viewset = ProjectsJobsViewSet.init("list", request, args, kwargs)
project = viewset.get_project()
jobs = viewset.get_queryset(project)
paginator = Paginator(jobs, 25)
page_number = request.GET.get("page")
page_jobs = paginator.get_page(page_number)
return render(
request,
"projects/jobs/list.html",
dict(
project=project,
paginator=paginator,
jobs=page_jobs,
status=request.GET.get("status"),
method=request.GET.get("method"),
creator=request.GET.get("creator"),
status_options=[(value, value) for value in JobStatus.categories().keys()],
method_options=[
(label.title(), value.lower())
for (label, value) in JobMethod.as_choices()
],
creator_options=[("Me", "me"), ("Others", "other"), ("Anonymous", "anon")],
meta=project.get_meta(),
),
)
@login_required
def retrieve(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
Retrieve a job.
"""
viewset = ProjectsJobsViewSet.init("retrieve", request, args, kwargs)
project = viewset.get_project()
job = viewset.get_object(project)
return render(
request,
"projects/jobs/retrieve.html",
dict(project=project, job=job, meta=project.get_meta()),
)
```
#### File: manager/scripts/get_api_schema.py
```python
from django.test import RequestFactory
from manager.api.views.docs import schema_view
def run(to):
response = schema_view(RequestFactory().get("/api/schema"))
response.render()
with open(to, "wb") as file:
file.write(response.content)
```
#### File: api/views/features_tests.py
```python
from manager.testing import DatabaseTestCase
from users.models import Flag
class FeaturesTestCase(DatabaseTestCase):
"""Tests getting and setting of feature flags by users."""
@classmethod
def setUpClass(cls):
"""Add some features to be able to get and set"""
super().setUpClass()
Flag.objects.create(
name="feature_a", label="Feature A", default="on", settable=True
)
Flag.objects.create(
name="feature_b", label="Feature B", default="off", settable=True
)
def test_ok(self):
features = self.retrieve(self.ada, "api-features").data
assert features == {"feature_a": "on", "feature_b": "off"}
features = self.update(
self.ada, "api-features", {"feature_a": "off", "feature_b": "on"}
).data
assert features == {"feature_a": "off", "feature_b": "on"}
features = self.update(
self.ada, "api-features", {"feature_a": True, "feature_b": False}
).data
assert features == {"feature_a": "on", "feature_b": "off"}
features = self.update(
self.ada, "api-features", {"feature_a": "false", "feature_b": "true"}
).data
assert features == {"feature_a": "off", "feature_b": "on"}
def test_errors(self):
response = self.update(self.ada, "api-features", {"foo": "bar"})
assert response.status_code == 400
assert response.data["errors"][0]["message"] == 'Invalid feature name "foo"'
response = self.update(self.ada, "api-features", {"feature_a": "nope"})
assert response.status_code == 400
assert response.data["errors"][0]["message"] == 'Invalid feature state "nope"'
```
#### File: manager/users/models.py
```python
from typing import Dict, Optional
import django.contrib.auth.models
import shortuuid
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import connection, models
from django.db.models import Count, F, Max, Q
from django.db.models.expressions import RawSQL
from django.http import HttpRequest
from django.shortcuts import reverse
from django.utils import timezone
from invitations.adapters import get_invitations_adapter
from invitations.models import Invitation
from rest_framework.exceptions import ValidationError
from waffle.models import AbstractUserFlag
# Needed to ensure signals are loaded
import users.signals # noqa
from manager.helpers import EnumChoice
User: django.contrib.auth.models.User = get_user_model()
def get_email(user: User) -> Optional[str]:
"""
Get the best email address for a user.
The "best" email is the verified primary email,
falling back to verified if none marked as primary,
falling back to the first if none is verified,
falling back to `user.email`, falling back to
their public email.
"""
best = None
emails = user.emailaddress_set.all()
for email in emails:
if (email.primary and email.verified) or (not best and email.verified):
best = email.email
if not best and len(emails) > 0:
best = emails[0].email
if not best:
best = user.email
if not best and user.personal_account:
best = user.personal_account.email
# Avoid returning an empty string, return None instead
return best or None
def get_name(user: User) -> Optional[str]:
"""
Get the best name to display for a user.
The "best" name is their account's display name,
falling back to first_name + last_name,
falling back to username.
"""
if user.personal_account and user.personal_account.display_name:
return user.personal_account.display_name
if user.first_name or user.last_name:
return f"{user.first_name} {user.last_name}".strip()
return user.username
def get_attributes(user: User) -> Dict:
"""
Get a dictionary of user attributes.
Used for updating external services with current
values of user attributes e.g number of projects etc.
Flattens various other summary dictionaries e.g `get_projects_summary`
into a single dictionary.
"""
return {
**dict(
(f"feature_{name}", value)
for name, value in get_feature_flags(user).items()
),
**dict(
(f"orgs_{name}", value) for name, value in get_orgs_summary(user).items()
),
**dict(
(f"projects_{name}", value)
for name, value in get_projects_summary(user).items()
),
}
def get_orgs(user: User):
"""
Get all organizational accounts that a user is a member of.
"""
from accounts.models import Account
return Account.objects.filter(user__isnull=True, users__user=user).annotate(
role=F("users__role")
)
def get_orgs_summary(user: User) -> Dict:
"""
Get a summary of organizational accounts the user is a member of.
"""
from accounts.models import AccountRole
zero_by_role = dict([(role.name.lower(), 0) for role in AccountRole])
orgs = get_orgs(user)
orgs_summary = orgs.values("role").annotate(count=Count("id"), tier=Max("tier"))
orgs_by_role = dict([(row["role"].lower(), row["count"]) for row in orgs_summary])
return {
"max_tier": max(row["tier"] for row in orgs_summary) if orgs_summary else None,
"total": sum(orgs_by_role.values()),
**zero_by_role,
**orgs_by_role,
}
def get_projects(user: User, include_public=True):
"""
Get a queryset of projects for the user.
For authenticated users, each project is annotated with the
role of the user for the project.
"""
from projects.models.projects import Project
if user.is_authenticated:
# Annotate the queryset with the role of the user
# Role is the "greater" of the project role and the
# account role (for the account that owns the project).
# Authenticated users can see public projects and those in
# which they have a role
return Project.objects.annotate(
role=RawSQL(
"""
SELECT
CASE account_role.role
WHEN 'OWNER' THEN 'OWNER'
WHEN 'MANAGER' THEN
CASE project_role.role
WHEN 'OWNER' THEN 'OWNER'
ELSE 'MANAGER' END
ELSE project_role.role END AS "role"
FROM projects_project AS project
LEFT JOIN
(SELECT project_id, "role" FROM projects_projectagent WHERE user_id = %s) AS project_role
ON project.id = project_role.project_id
LEFT JOIN
(SELECT account_id, "role" FROM accounts_accountuser WHERE user_id = %s) AS account_role
ON project.account_id = account_role.account_id
WHERE project.id = projects_project.id""",
[user.id, user.id],
)
).filter((Q(public=True) if include_public else Q()) | Q(role__isnull=False))
else:
# Unauthenticated users can only see public projects
return Project.objects.filter(public=True).extra(select={"role": "NULL"})
def get_projects_summary(user: User) -> Dict:
"""
Get a summary of project memberships for a user.
"""
from projects.models.projects import ProjectRole
zero_by_role = dict([(role.name.lower(), 0) for role in ProjectRole])
projects = get_projects(user, include_public=False)
projects_by_role = dict(
[
(row["role"].lower(), row["count"])
for row in projects.values("role").annotate(count=Count("id"))
]
)
return {
"total": sum(projects_by_role.values()),
**zero_by_role,
**projects_by_role,
}
def get_feature_flags(user: User) -> Dict[str, str]:
"""
Get the feature flag settings for a user.
"""
with connection.cursor() as cursor:
cursor.execute(
"""
SELECT "name", "default", "user_id"
FROM users_flag
LEFT JOIN (
SELECT *
FROM users_flag_users
WHERE user_id = %s
) AS subquery ON users_flag.id = subquery.flag_id
WHERE users_flag.settable
""",
[user.id],
)
rows = cursor.fetchall()
features = {}
for row in rows:
name, default, has_flag = row
if has_flag:
features[name] = "off" if default == "on" else "on"
else:
features[name] = default
return features
def generate_anonuser_id():
"""
Generate a unique id for an anonymous user.
"""
return shortuuid.ShortUUID().random(length=32)
class AnonUser(models.Model):
"""
A model to store anonymous users when necessary.
Used to associate unauthenticated users with objects,
for example, so that the same session job can be provided
to them on multiple page refreshes.
"""
id = models.CharField(
primary_key=True,
max_length=64,
default=generate_anonuser_id,
help_text="The unique id of the anonymous user.",
)
created = models.DateTimeField(
auto_now_add=True, help_text="The time the anon user was created."
)
@staticmethod
def get_id(request: HttpRequest) -> Optional[str]:
"""
Get the id of the anonymous user, if any.
"""
if request.user.is_anonymous:
return request.session.get("user", {}).get("id")
return None
@staticmethod
def get_or_create(request: HttpRequest) -> "AnonUser":
"""
Create an instance in the database.
Only use this when necessary. e.g when you need
to associated an anonymous user with another object.
"""
id = AnonUser.get_id(request)
if id:
anon_user, created = AnonUser.objects.get_or_create(id=id)
return anon_user
else:
anon_user = AnonUser.objects.create()
request.session["user"] = {"anon": True, "id": anon_user.id}
return anon_user
class Flag(AbstractUserFlag):
"""
Custom feature flag model.
Adds fields to allow users to turn features on/off themselves.
In the future, fields may be
added to allow flags to be set based on the account (in addition to, or instead
of, only the user).
See https://waffle.readthedocs.io/en/stable/types/flag.html#custom-flag-models
"""
label = models.CharField(
max_length=128,
null=True,
blank=True,
help_text="A label for the feature to display to users.",
)
default = models.CharField(
max_length=3,
choices=[("on", "On"), ("off", "Off")],
default="on",
help_text='If the default is "on" then when the flag is active, '
'the feature should be considered "off" and vice versa.',
)
settable = models.BooleanField(
default=False, help_text="User can turn this flag on/off for themselves."
)
def is_active_for_user(self, user) -> bool:
"""
Is the feature "on" for a user.
Changes the underlying behaviour of Waffle flags based on
the `default` field for the flag.
"""
is_active = super().is_active_for_user(user)
return is_active if self.default == "off" else not is_active
def generate_invite_key():
"""
Generate a unique invite key.
The is separate function to avoid new AlterField migrations
being created as happens when `default=shortuuid.uuid`.
"""
return shortuuid.ShortUUID().random(length=32)
class InviteAction(EnumChoice):
"""
Actions to take when a user has accepted an invite.
"""
join_account = "join_account"
join_team = "join_team"
join_project = "join_project"
take_tour = "take_tour"
@staticmethod
def as_choices():
"""Return as a list of field choices."""
return [
(InviteAction.join_account.name, "Join account"),
(InviteAction.join_team.name, "Join team"),
(InviteAction.join_project.name, "Join project"),
(InviteAction.take_tour.name, "Take tour"),
]
class Invite(models.Model):
"""
An extension of the default invitation model.
Allows for different types of invitations, with actions
after success.
Re-implements the interface of `invitations.Invitation`
instead of extending it so that some fields can be redefined
e.g shorter case sensitive `key`; e.g. avoid the unique constraint
on `email` (because of actions, a single email address could
be invited more than once).
The methods for each action should use API view sets
with synthetic requests having the `inviter` as the
request user. This reduces code and provides consistency
in permissions checking, thereby reducing errors.
Adds `subject_object` `GenericForeignKey` to allow
querying from other models
"""
key = models.CharField(
max_length=64,
unique=True,
default=generate_invite_key,
help_text="The key for the invite.",
)
inviter = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="invites",
help_text="The user who created the invite.",
)
email = models.EmailField(
max_length=2048, help_text="The email address of the person you are inviting."
)
message = models.TextField(
null=True, blank=True, help_text="An optional message to send to the invitee."
)
created = models.DateTimeField(
auto_now_add=True, help_text="When the invite was created."
)
sent = models.DateTimeField(
null=True, blank=True, help_text="When the invite was sent."
)
accepted = models.BooleanField(
default=False,
help_text="Whether the invite has been accepted. "
"Will only be true if the user has clicked on the invitation AND authenticated.",
)
completed = models.DateTimeField(
null=True, blank=True, help_text="When the invite action was completed",
)
action = models.CharField(
max_length=64,
null=True,
blank=True,
choices=InviteAction.as_choices(),
help_text="The action to perform when the invitee signs up.",
)
subject_type = models.ForeignKey(
ContentType,
null=True,
blank=True,
on_delete=models.CASCADE,
help_text="The type of the target of the action. e.g Team, Account",
)
subject_id = models.IntegerField(
null=True, blank=True, help_text="The id of the target of the action.",
)
subject_object = GenericForeignKey("subject_type", "subject_id")
arguments = models.JSONField(
null=True,
blank=True,
help_text="Any additional arguments to pass to the action.",
)
# These methods need to be implemented for the `invitations` API
key_expired = Invitation.key_expired
def send_invitation(self, request):
"""Extend method to add the invite object to the template context."""
context = dict(
inviter=self.inviter,
inviter_name=self.inviter.get_full_name() or self.inviter.username,
invite_message=self.message,
invite_url=request.build_absolute_uri(
reverse("ui-users-invites-accept", args=[self.key])
),
reason_for_sending="This email was sent by user '{0}' to invite you to "
"collaborate with them on Stencila Hub.".format(self.inviter.username),
)
get_invitations_adapter().send_mail(
"invitations/email/email_invite", self.email, context
)
self.sent = timezone.now()
self.save()
def __str__(self):
return "Invite {0} {1}".format(self.action, self.email)
# These methods implement invitation actions
def redirect_url(self) -> str:
"""
Get the URL to redirect the user to after the invite has been accepted.
"""
if self.action == "join_account":
return reverse("ui-accounts-retrieve", args=[self.arguments["account"]])
elif self.action == "join_team":
return reverse(
"ui-accounts-teams-retrieve",
args=[self.arguments["account"], self.arguments["team"]],
)
elif self.action == "join_project":
return reverse(
"ui-projects-retrieve",
args=[self.arguments["account"], self.arguments["project"]],
)
elif self.action == "take_tour":
return self.arguments["page"] + "?tour=" + self.arguments["tour"]
else:
return "/"
def create_request(self, data) -> HttpRequest:
"""
Create a synthetic request to pass to view sets.
"""
request = HttpRequest()
request.data = data
request.user = self.inviter
return request
def perform_action(self, request, user=None):
"""
Perform the action (if any) registered for this invitation.
"""
# Accept and save in case the action fails below
self.accepted = True
self.save()
if self.action:
method = getattr(self, self.action)
if not method:
raise RuntimeError("No such action {0}".format(self.action))
method(user or request.user)
self.completed = timezone.now()
self.save()
def join_account(self, invitee):
"""
Add invitee to account with a particular role.
"""
from accounts.api.views import AccountsUsersViewSet
self.arguments["id"] = invitee.id
request = self.create_request(data=self.arguments)
viewset = AccountsUsersViewSet.init(
"create", request, args=[], kwargs=self.arguments
)
viewset.create(request, **self.arguments)
def join_project(self, invitee):
"""
Add invitee to project with a particular role.
If the user already has a project role, then the
invite is ignored.
"""
from projects.api.views.projects import ProjectsAgentsViewSet
self.arguments["type"] = "user"
self.arguments["agent"] = invitee.id
request = self.create_request(data=self.arguments)
viewset = ProjectsAgentsViewSet.init(
"create", request, args=[], kwargs=self.arguments
)
try:
viewset.create(request, **self.arguments)
except ValidationError as exc:
if "Already has a project role" not in str(exc):
raise exc
def take_tour(self, invitee):
"""
Nothing needs to be done here. User is redirected to tour URL.
"""
pass
```
#### File: manager/users/tasks.py
```python
import logging
import time
from typing import Union
import httpx
from celery import shared_task
from django import conf
from users.api.serializers import MeSerializer
from users.models import User, get_email
logger = logging.getLogger(__name__)
@shared_task
def update_services_all_users(services=["userflow"]):
"""
Update external services for all users.
"""
users = User.objects.select_related("personal_account").prefetch_related(
"projects", "accounts"
)
for user in users:
try:
update_services_for_user(user, services)
except Exception:
logger.warning(
"Exception while updating external services for user",
exc_info=True,
extra={"user": user},
)
@shared_task
def update_services_for_user(user: Union[User, int], services=["userflow"]):
"""
Update external services with latest values of a user's attributes.
If the user has turned off the related feature, then the service will not be updated.
"""
if isinstance(user, int):
user = (
User.objects.select_related("personal_account")
.prefetch_related("projects", "accounts")
.get(id=user)
)
data = MeSerializer(user).data
feature_flags = data["feature_flags"]
if "userflow" in services and feature_flags.get("product_tours", "on") == "on":
update_userflow(user, data)
last_userflow_request: float = 0
def update_userflow(user: User, data: dict):
"""
Update UserFlow data for a user.
See https://getuserflow.com/docs/api#create-or-update-a-user
There are API rate limits (for our current account 500 request/min)
so this implements a basic approach to throttling of waiting for at least
0.5 second between requests (although suboptimal, because this is usually
called in a background task it's OK).
"""
# UserFlow can take arbitrary user data but it is
# necessary to flatten it into a dictionary
attributes = {}
for name, value in data.items():
if isinstance(value, dict):
for subname, subvalue in value.items():
attributes[f"{name}_{subname}"] = subvalue
else:
attributes[name] = value
# UserFlow "expects" some attributes (shows them in
# their interface by default), so provide them:
# `name`, `email`
name = user.username
if name:
attributes["name"] = name
email = get_email(user)
if email:
attributes["email"] = email
# Remove attributes we don't want to send
del attributes["id"]
del attributes["public_email"]
del attributes["email_addresses"]
del attributes["linked_accounts"]
key = getattr(conf.settings, "USERFLOW_API_KEY", None)
if key:
global last_userflow_request
diff = time.time() - last_userflow_request
if diff < 0.5:
time.sleep(0.5 - diff)
last_userflow_request = time.time()
response = httpx.post(
"https://api.getuserflow.com/users",
headers={
"Authorization": f"Bearer {key}",
"UserFlow-Version": "2020-01-03",
},
json={"id": user.id, "attributes": attributes},
)
response.raise_for_status()
```
#### File: users/ui/views.py
```python
import logging
import invitations.views
from allauth.account.views import EmailView, LoginView, LogoutView
from allauth.account.views import PasswordChangeView as BasePasswordChangeView
from allauth.account.views import SignupView as BaseSignupView
from allauth.socialaccount.views import ConnectionsView as BaseConnectionsView
from django.contrib.auth.decorators import login_required
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect as redir
from django.shortcuts import render, reverse
from users.api.serializers import InviteSerializer
from users.api.views.invites import InvitesViewSet
from users.models import Flag, Invite
logger = logging.getLogger(__name__)
class AuthenticationMixin:
"""Mixin to provide additional template context."""
def get_context_data(self, *args, **kwargs):
"""Add extra context to template."""
data = super().get_context_data(*args, **kwargs)
data["providers"] = [
dict(name="github", title="GitHub", icon="github"),
dict(name="google", title="Google", icon="google"),
dict(name="orcid", title="ORCID"),
dict(name="twitter", title="Twitter", icon="twitter"),
]
if "auth_provider" in self.request.COOKIES:
data["auth_provider"] = self.request.COOKIES["auth_provider"]
return data
class SignupView(AuthenticationMixin, BaseSignupView):
"""Override allauth SignupView to custom URL and template name."""
template_name = "users/signup.html"
class SigninView(AuthenticationMixin, LoginView):
"""Override allauth LoginView to custom URL and template name."""
template_name = "users/signin.html"
class SignoutView(LogoutView):
"""Override of allauth LogoutView to custom URL and template name."""
template_name = "users/signout.html"
class AccountRoleMixin:
"""
Mixin that adds context variables necessary to render allauth views with the 'accounts/base.html' template.
"""
def get_context_data(self, **kwargs):
"""Add the account and role context variables."""
context = super().get_context_data(**kwargs)
context["account"] = self.request.user.personal_account
context["role"] = "OWNER"
return context
class PasswordChangeView(AccountRoleMixin, BasePasswordChangeView):
"""Override of allauth PasswordChangeView to add template context variables."""
class EmailsView(AccountRoleMixin, EmailView):
"""Override of allauth EmailView to add template context variables."""
class ConnectionsView(AccountRoleMixin, BaseConnectionsView):
"""Override of allauth ConnectionsView to add template context variables."""
@login_required
def invites_create(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
Create a new invite.
Will be linked to with action and arguments e.g
/me/invites/send?email=<EMAIL>&action=join_account&account=3&next=/3
"""
viewset = InvitesViewSet.init("create", request, args, kwargs)
context = viewset.get_response_context()
message = request.GET.get("message", "")
action = request.GET.get("action")
arguments = dict(
[
(key, value)
for key, value in request.GET.items()
if key not in ["email", "message", "action", "next"]
]
)
serializer = InviteSerializer(
data=dict(message=message, action=action, arguments=arguments)
)
serializer.is_valid()
next = request.GET.get("next", reverse("ui-users-invites-list"))
return render(
request,
"invitations/create.html",
dict(
**context,
account=request.user.personal_account,
role="OWNER",
serializer=serializer,
next=next
),
)
@login_required
def invites_list(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
Get a list of invites.
Mainly intended for users to be able to check to see what invites
they have sent.
"""
viewset = InvitesViewSet.init("list", request, args, kwargs)
invites = viewset.get_queryset()
return render(
request,
"invitations/list.html",
dict(account=request.user.personal_account, role="OWNER", invites=invites),
)
class AcceptInviteView(invitations.views.AcceptInvite):
"""Override to allow for invite actions."""
def get_object(self, *args, **kwargs):
"""
Override to allow case sensitive matching of keys.
"""
try:
return Invite.objects.get(key=self.kwargs["key"])
except Invite.DoesNotExist:
return None
def post(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
Override to perform invite action before or after sign up.
"""
# Do the usual processing of the invite so that
# it's `accepted` etc fields get updated
response = super().post(request, *args, **kwargs)
invite = self.object
if not invite:
# No invite so just return response
return response
if request.user.is_authenticated:
# Perform the action (if necessary) and redirect to it's URL
if not invite.completed:
invite.perform_action(request)
return redir(invite.redirect_url())
else:
# Redirect to sign up page with invite URL
# as next
response = redir(
reverse("ui-users-signup") + "?next=" + invite.redirect_url()
)
# Set a cookie so that the action is performed
# once the user is logged in
response.set_cookie("invite", invite.key)
return response
def accept_invite_after_signup(sender, request, user, **kwargs):
"""
After a user has signed up check for invite cookie and perform action if present.
"""
key = request.COOKIES.get("invite")
if not key:
return
try:
invite = Invite.objects.get(key=key)
except Invite.DoesNotExist:
logger.warning("Could not find invite with key")
return
if not invite.completed:
invite.perform_action(request, user)
signed_up_signal = (
invitations.views.get_invitations_adapter().get_user_signed_up_signal()
)
signed_up_signal.connect(accept_invite_after_signup)
@login_required
def features(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
Feature and privacy settings.
"""
flags = Flag.objects.filter(settable=True)
return render(
request,
"users/features.html",
dict(account=request.user.personal_account, role="OWNER", flags=flags,),
)
@login_required
def redirect(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
Redirect from unmatched /me URLs to the user's account.
e.g. /me => /anna
e.g. /me/settings -> /me/settings
If user is unauthenticated, will be asked to signin first.
"""
return redir("/{0}/{1}".format(request.user.personal_account, kwargs["rest"]))
```
#### File: worker/jobs/archive.py
```python
import logging
import os
import shutil
import tempfile
from typing import Optional
import httpx
from config import get_snapshot_dir
from jobs.base.job import Job
from util.files import Files, ensure_dir, list_files
logger = logging.getLogger(__name__)
class Archive(Job):
"""
A job that archives files from a project's working directory to a snapshot.
Previously, we copied all files to the snapshot dir. However, because there were
performance and reliability issues with that, we now only copy `index.html` and
related files. This is a temporary measure until we place these in a separate bucket.
"""
name = "archive"
def do( # type: ignore
self,
project: int,
snapshot: str,
path: str,
url: Optional[str],
secrets: Optional[dict],
**kwargs,
) -> Files:
assert isinstance(project, int)
assert isinstance(snapshot, str)
assert isinstance(path, str)
files = list_files()
# This section is temporary, in future index.html will be
# placed in content storage.
snapshot_dir = get_snapshot_dir(project, snapshot)
ensure_dir(snapshot_dir)
if os.path.exists("index.html"):
shutil.copy("index.html", snapshot_dir)
if os.path.exists("index.html.media"):
shutil.copytree(
"index.html.media", os.path.join(snapshot_dir, "index.html.media")
)
temp_zip_base_name = tempfile.NamedTemporaryFile().name
shutil.make_archive(temp_zip_base_name, "zip", ".")
temp_zip_file_name = temp_zip_base_name + ".zip"
if url:
response = httpx.post(
url, data=secrets, files={"file": open(temp_zip_file_name, "rb")}
)
response.raise_for_status()
else:
# In development simulate POSTing by copying to the snapshot storage
logger.warning("No URL was supplied, copying to snapshot dir")
from config import get_snapshots_root
shutil.copy(temp_zip_file_name, os.path.join(get_snapshots_root(), path))
return files
```
#### File: jobs/base/job.py
```python
import os
import traceback
from datetime import datetime
from typing import Any, Optional
import celery
import sentry_sdk
from celery import states
from celery.exceptions import Ignore, SoftTimeLimitExceeded
from celery.utils.log import get_task_logger
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from config import get_working_dir
from util.files import ensure_dir
from util.serialize import serialize
# Log levels
# These are the same as used in https://github.com/stencila/logga
ERROR = 0
WARN = 1
INFO = 2
DEBUG = 3
# Get the Celery logger
logger = get_task_logger(__name__)
# Initialize Sentry. This reuses the environment variable already set
# for the `manager` service (which is why it is prefixed by `DJANGO_`).
DJANGO_SENTRY_DSN = os.environ.get("DJANGO_SENTRY_DSN")
if DJANGO_SENTRY_DSN:
sentry_sdk.init(
dsn=DJANGO_SENTRY_DSN, integrations=[CeleryIntegration(), RedisIntegration()]
)
else:
logger.warning(
"No DJANGO_SENTRY_DSN environment variable, errors will not be reported to Sentry."
)
class Job(celery.Task):
"""
Base class for all Jobs.
Extends Celery's Task class to handle logging and
termination of jobs.
"""
def begin(self, task_id=None):
"""
Begin the job.
Because the `run()` method is executed several times
for each task instance, this method performs any
initialization in advance.
"""
self.task_id = task_id
self.log_entries = []
def notify(self, state="RUNNING", **kwargs):
"""
Send a notification to the `overseer` service.
Used to update the status, url, log etc of a job.
"""
if self.task_id:
self.send_event(
"task-updated", task_id=self.task_id, state=state, **kwargs,
)
def flush(self):
"""
Flush the log.
In the future this may be more intelligent and buffer
sending of log events to reduce the number
of requests.
"""
if len(self.log_entries):
self.notify(log=self.log_entries)
def log(self, level: int, message: str):
"""
Create a log entry.
This function:
- Emits to the Python logger, and
- Appends an entry to the job's log and updates the
state with the log as metadata thereby making the
log and any extra details available to the `manager`.
(see the `update_job` there for how these are extracted)
"""
log_message = "Job {0}: {1}".format(self.name, message)
log_extra = {"task_id": self.task_id, "job_method": self.name}
if level == DEBUG:
logger.debug(log_message, extra=log_extra)
elif level == INFO:
logger.info(log_message, extra=log_extra)
elif level == WARN:
logger.warning(log_message, extra=log_extra)
else:
logger.error(log_message, extra=log_extra)
self.log_entries.append(
dict(time=datetime.utcnow().isoformat(), level=level, message=message)
)
self.flush()
def error(self, message: str):
"""Log an error message."""
self.log(ERROR, message)
def warn(self, message: str):
"""Log a warning message."""
self.log(WARN, message)
def info(self, message: str):
"""Log a informational message."""
self.log(INFO, message)
def debug(self, message: str):
"""Log a debug message."""
self.log(DEBUG, message)
def success(self, result: Any):
"""
Job has succeeded.
This method bundles the job result and the log together
and returns them both as the Celery task result.
"""
return dict(result=serialize(result), log=self.log_entries)
def terminated(self):
"""
Job has been terminated.
When cancelling a job the `manager` sends the `SIGUSR1`
signal which causes a `SoftTimeLimitExceeded` to be thrown
and this method to be called.
See https://github.com/celery/celery/issues/2727 for why
this is preferable to the `Terminate` signal (which can not
be caught in the same way and seems to kill the parent worker).
This method just flushes the log.
"""
self.flush()
def failure(self, exc: Exception):
"""
Job has failed due to an exception.
This method re-raises the exception so that it can be handled
by Celery (e.g. the job marked with `FAILURE`). However, before
doing so it reports the error to Sentry.
"""
sentry_sdk.capture_exception(exc)
# Stringify the original exception to avoid issues that Celery has
# with pickling some types of exceptions
raise Exception(str(exc))
def run(self, *args, task_id=None, **kwargs):
"""
Run the job.
This is an override of `Task.run` which is the method
that actually gets called by Celery each time a task
in processed. It wraps `self.do()` to handle
logging, exceptions, termination etc.
Most jobs need to operate within a project's working directory
and project `File` paths are always relative to those.
To avoid code repetition and potential errors this method requires
that a project id is supplied and changes into the working
directory of that project.
"""
project = kwargs.get("project")
if project is None:
raise ValueError("Project number must be provided as an argument!")
current_dir = os.getcwd()
working_dir = get_working_dir(project)
ensure_dir(working_dir)
self.begin(task_id)
try:
os.chdir(working_dir)
result = self.do(*args, **kwargs)
return self.success(result)
except SoftTimeLimitExceeded:
return self.terminated()
except Ignore as exc:
raise exc
except Exception as exc:
raise self.failure(exc)
finally:
os.chdir(current_dir)
def do(self, *args, **kwargs):
"""
Do the job!
Derived job classes should implement this method
"""
raise NotImplementedError("Method do() is not implemented")
```
#### File: worker/jobs/clean.py
```python
import os
import shutil
from jobs.base.job import Job
from util.files import Files, list_files
class Clean(Job):
"""
A job that cleans a project's working directory.
Recursively removes all files in the current directory (assumes that
have already changed into project's working directory) and
returns a list of files (which should be empty).
This is equivalent to `rm -rf .` so be very careful where you run
this job.
"""
name = "clean"
def do( # type: ignore
self, **kwargs
) -> Files:
for root, dirs, files in os.walk("."):
for file in files:
try:
os.unlink(os.path.join(root, file))
except Exception as exc:
self.warn(str(exc))
for dir in dirs:
try:
shutil.rmtree(os.path.join(root, dir))
except Exception as exc:
self.warn(str(exc))
return list_files()
```
#### File: jobs/extract/gdrive.py
```python
import json
import re
from typing import Callable, Dict, List, Optional
from googleapiclient.discovery import build
from stencila.schema.types import Comment, Person, Review
from jobs.convert import Convert
from util.gapis import gdrive_service
def extract_gdrive(
source: Dict, filters: Optional[Dict] = {}, secrets: Optional[Dict] = {}, **kwargs
) -> Optional[Review]:
"""
Extract a review from the comments for a Google document.
"""
file_id = source.get("doc_id") or source.get("google_id")
assert file_id, "A Google document or file id is required"
assert secrets is not None, "Authentication tokens are required"
return create_review(filter_comments(get_comments(file_id, secrets), filters))
def get_comments(file_id: str, secrets: Dict) -> List[Dict]:
"""
Get all the comments on a document.
"""
comments = []
api = gdrive_service(secrets).comments()
request = api.list(fileId=file_id, fields="*")
while request is not None:
response = request.execute()
comments += response.get("comments", [])
request = api.list_next(request, response)
return comments
def filter_comments(comments: List[Dict], filters: Optional[Dict] = {}) -> List[Dict]:
"""
Filter a list of comments.
"""
if filters is None:
return comments
filtered = []
for comment in comments:
ok = True
for key, regex in filters.items():
if key == "name":
if not re.match(regex, comment.get("author", {}).get("displayName")):
ok = False
if ok:
filtered.append(comment)
return filtered
def create_review(comments: List[Dict]) -> Optional[Review]:
"""
Create a`Review` from a list of comments.
"""
if len(comments) == 0:
return None
# Find the main comment in the review
main = None
rest = []
for comment in comments:
# Look for main comment
text = comment.get("content", "")
if main is None and is_main(text):
main = comment
else:
rest.append(comment)
if main is None:
# No main comment found so use the first
main = comments[0]
rest = comments[1:]
# Parse the main comment into the review
review = parse_comment(main, parse_markdown=True)
if review is None:
return None
# Convert the `Comment` into a `Review`
review_properties = review.__dict__
if "commentAspect" in review_properties:
del review_properties["commentAspect"]
# Parse the rest into its comments
review_comments = list(filter(lambda x: x is not None, map(parse_comment, rest)))
return Review(**review_properties, comments=review_comments,)
def is_main(text: str) -> bool:
"""
Is the comment the main comment.
"""
return re.match(r"^(\s*#[^#])|(---)", text) is not None
def parse_comment(data: Dict, parse_markdown=False) -> Optional[Comment]:
"""
Parse a dictionary of comment data into a `Comment` instance.
For a list of comment properties available see
https://googleapis.github.io/google-api-python-client/docs/dyn/drive_v3.comments.html#list
"""
content = data.get("content", "").strip()
if not content:
return None
if parse_markdown:
json_str = Convert().do(content, "-", {"from": "md", "to": "json"}) # type: ignore
article = json.loads(json_str)
if not article:
raise RuntimeError("Failed to convert review body from Markdown")
del article["type"]
comment = Comment(**article)
else:
comment = Comment(content=[content])
author = data.get("author")
if not comment.authors and author:
name = author.get("displayName")
email = author.get("emailAddress")
comment.authors = [Person(name=name, emails=[email] if email else None)]
dateCreated = data.get("createdTime")
if not comment.dateCreated and dateCreated:
comment.dateCreated = dateCreated
dateModified = data.get("modifiedTime")
if not comment.dateModified and dateModified:
comment.dateModified = dateModified
commentAspect = data.get("quotedFileContent", {}).get("value")
if not comment.commentAspect and commentAspect:
comment.commentAspect = commentAspect
comments = data.get("replies", [])
if len(comments) > 0:
comment.comments = list(map(parse_comment, comments))
return comment
```
#### File: jobs/extract/gdrive_test.py
```python
import os
import pytest
from stencila.schema.types import Comment, Review
from .gdrive import create_review, extract_gdrive, filter_comments, is_main
# To re-record this test, get a new Google token (e.g. via https://developers.google.com/oauthplayground),
# paste it in below, ensure that the Google account has access to the documents
# being accessed, and run
# ./venv/bin/pytest --record-mode=rewrite jobs/extract/gdrive_test.py
ACCESS_TOKEN = "<KEY>"
def test_filter_comments():
comments = [
dict(author=dict(displayName="<NAME>")),
dict(author=dict(displayName="<NAME>")),
]
n = len(comments)
assert len(filter_comments(comments, filters={})) == n
assert len(filter_comments(comments, filters={"unknown key": ""})) == n
assert len(filter_comments(comments, filters={"name": r"^J"})) == n
assert len(filter_comments(comments, filters={"name": r"^Ji"})) == 1
assert len(filter_comments(comments, filters={"name": r"^<NAME>$"})) == 1
assert len(filter_comments(comments, filters={"name": r"^$"})) == 0
def test_no_comments():
review = create_review([])
assert review is None
def test_only_empty_comments():
review = create_review([dict(), dict(content="")])
assert review is None
def test_no_comment_that_matches_main():
review = create_review([dict(content="I'm just a lonesome comment")])
assert review.content[0] == {
"type": "Paragraph",
"content": ["I'm just a lonesome comment"],
}
assert len(review.comments) == 0
review = create_review(
[dict(content="I'm the main cause I am first"), dict(content="Number two")]
)
assert review.content[0] == {
"type": "Paragraph",
"content": ["I'm the main cause I am first"],
}
assert len(review.comments) == 1
def test_missing_id():
with pytest.raises(AssertionError) as excinfo:
extract_gdrive(source={})
assert "A Google document or file id is required" in str(excinfo.value)
def test_missing_token():
with pytest.raises(AssertionError) as excinfo:
extract_gdrive(source=dict(doc_id="foo"))
assert "A Google access token is required" in str(excinfo.value)
def test_is_main():
assert is_main("# Review of ...")
assert is_main("---\ntitle: Review of ...")
assert not is_main("## Secondary heading")
assert not is_main("A thematic break ---")
@pytest.mark.vcr
def test_fixture_2():
"""
Test on a real Google Doc.
https://docs.google.com/document/d/1ngLlIJr2SJ15A7Mnb2mg7f8hWePUDzYETjMjTaPoJWM
"""
review = extract_gdrive(
source=dict(doc_id="1ngLlIJr2SJ15A7Mnb2mg7f8hWePUDzYETjMjTaPoJWM"),
secrets=dict(access_token=ACCESS_TOKEN),
)
assert isinstance(review, Review)
assert review.title == 'Review of "A Google Doc test fixture for Stencila"'
assert review.authors[0].name == "<NAME>"
assert review.dateCreated == "2020-11-25T23:22:51.811Z"
assert review.dateModified == "2020-11-26T01:47:09.180Z"
assert review.content == [
{
"type": "Paragraph",
"content": [
"Overall, this is a reasonable attempt at creating a tests fixture "
"for use in testing Stencila's integrations with Google Docs."
],
},
{"type": "Paragraph", "content": ["My main concerns are:"]},
{
"type": "List",
"items": [
{
"type": "ListItem",
"content": [
{"type": "Paragraph", "content": ["it is quite boring"]}
],
},
{
"type": "ListItem",
"content": [
{
"type": "Paragraph",
"content": [
"what if someone changes the documents, the tests will break!"
],
}
],
},
],
"order": "unordered",
},
]
assert len(review.comments) == 1
comment1 = review.comments[0]
assert comment1.content == ["This is a test second comment in my review."]
assert comment1.commentAspect == "is a test"
assert len(comment1.comments) == 1
comment1_reply1 = comment1.comments[0]
assert comment1_reply1.content == ["This is a test reply to my second comment."]
```
#### File: jobs/pull/elife.py
```python
import io
import os
import re
import shutil
from pathlib import Path
from typing import List, Optional
from lxml import etree
from util.files import Files, ensure_dir, file_info
from util.http import HttpSession
def pull_elife(source: dict, path: Optional[str] = None, **kwargs) -> Files:
"""
Pull an eLife article.
Fetches the XML of the article from https://elifesciences.org
and then walks the XML tree, downloading any graphic media from the
elife image server https://iiif.elifesciences.org/
"""
article = source.get("article")
assert article, "eLife source must have an article number"
if not path:
path = f"elife-{article}.xml"
folder, file = os.path.split(path)
ensure_dir(folder)
files = {}
session = HttpSession()
# Get the article JATS XML
response = session.fetch_url(f"https://elifesciences.org/articles/{article}.xml")
tree = etree.parse(io.BytesIO(response.content))
root = tree.getroot()
xlinkns = "http://www.w3.org/1999/xlink"
# Get the figures and rewrite hrefs
for graphic in root.iterdescendants(tag="graphic"):
href = graphic.attrib.get("{%s}href" % xlinkns)
if not href.startswith("elife"):
continue
if not href.endswith(".tif"):
href += ".tif"
url = f"https://iiif.elifesciences.org/lax:{article}%2F{href}/full/full/0/default.jpg"
image_name = href.replace(f"elife-{article}-", "")
image_name = re.sub(r"-v\d+\.tif$", ".jpg", image_name)
new_href = f"{file}.media/{image_name}"
image_path = os.path.join(folder, new_href)
os.makedirs(os.path.join(folder, f"{file}.media"), exist_ok=True)
session.pull(url, image_path)
graphic.attrib["{%s}href" % xlinkns] = new_href
graphic.attrib["mime-subtype"] = "jpeg"
files[image_path] = file_info(image_path)
if os.path.exists(path) and os.path.isdir(path):
shutil.rmtree(path)
tree.write(open(path, "wb"))
files[path] = file_info(path, mimetype="application/jats+xml")
return files
```
#### File: jobs/pull/github.py
```python
import os
import tempfile
from datetime import datetime
from pathlib import Path
from typing import List, Optional
from zipfile import ZipFile, ZipInfo
import httpx
from util.files import (
Files,
bytes_fingerprint,
ensure_parent,
file_fingerprint,
file_info,
file_mimetype,
)
from util.github_api import github_client
def pull_github(
source: dict, path: Optional[str] = None, secrets: dict = {}, **kwargs
) -> Files:
"""
Pull a GitHub repo/subpath.
If a user token is provided in `secrets` it will be used to authenticate
as that user.
"""
assert source.get("repo"), "GitHub source must have a repo"
subpath = source.get("subpath") or ""
if subpath.endswith("/"):
subpath = subpath[:-1]
path = path or "."
# Get the possibly token protected link for the repo archive
# See https://developer.github.com/v3/repos/contents/#download-a-repository-archive
client = github_client(secrets.get("token"))
repo_resource = client.get_repo(source["repo"])
archive_link = repo_resource.get_archive_link("zipball")
# Get the archive. To avoid it filling up memory, stream directly to file,
# Increase timeout over the default of 5s.
zip_file = tempfile.NamedTemporaryFile(suffix=".zip", delete=False)
with httpx.stream("GET", archive_link, timeout=60) as response:
for data in response.iter_bytes():
zip_file.write(data)
zip_file.close()
return pull_zip(zip_file.name, subpath=subpath, path=path)
def pull_zip(
zip_file: str, subpath: str = "", path: str = ".", strip: int = 1
) -> Files:
"""
Pull files from a Zip file.
:param zip_file: The path to the zip file.
:param subpath: The file or directory in the zip file to extract.
:param path: The destination path
:param strip: Number of leading components from filenames to ignore.
Similar to `tar`'s `--strip-components` option.
"""
files = {}
with ZipFile(zip_file, "r") as zip_archive:
for zip_info in zip_archive.infolist():
zip_path = zip_info.filename
# Skip directories
if zip_path[-1] == "/":
continue
# Remove the first element of the path (the repo name + hash)
inner_path = os.path.join(*(zip_path.split("/")[strip:]))
# Save if in the subpath
remainder_path = None
if subpath == "":
remainder_path = inner_path
elif inner_path.startswith(subpath + "/"):
chars = len(subpath) + 1
remainder_path = inner_path[chars:]
elif inner_path == subpath:
remainder_path = inner_path
if remainder_path:
dest_path = os.path.join(path, remainder_path)
# Using `extract` is much much faster than reading bytes
# and then writing them to file. Also it maintains other file info
# such as modified time in the file written to disk. This speed up
# is much more important for real world zips than any speed advantage
# due to not reading bytes twice for fingerprint generation.
zip_info.filename = dest_path
zip_archive.extract(zip_info)
files[remainder_path] = file_info(dest_path)
return files
```
#### File: jobs/pull/http.py
```python
import mimetypes
import os
import shutil
from typing import Optional
import httpx
from util.files import Files, ensure_parent, file_ext, file_info, remove_if_dir
GIGABYTE = 1073741824.0
MAX_SIZE = 1 # Maximum file size in Gigabytes
def pull_http(
source: dict, path: Optional[str] = None, secrets: dict = {}, **kwargs
) -> Files:
"""
Pull a file from a HTTP source.
"""
url = source.get("url")
assert url, "Source must have a URL"
with httpx.stream("GET", url) as response:
if response.status_code != 200:
raise RuntimeError(f"Error when fetching {url}: {response.status_code}")
size = int(response.headers.get("Content-Length", 0)) / GIGABYTE
if size > MAX_SIZE:
RuntimeError(f"Size of file is greater than {MAX_SIZE}GB maximum: {size}GB")
if not path:
path = str(os.path.basename(url))
if not file_ext(path):
content_type = response.headers.get("Content-Type", "text/html").split(";")[
0
]
ext = mimetypes.guess_extension(content_type, strict=False)
path += ext or ".txt"
ensure_parent(path)
remove_if_dir(path)
with open(path, "wb") as file:
for data in response.iter_bytes():
file.write(data)
return {path: file_info(path)}
return {}
```
#### File: jobs/push/gdoc.py
```python
import json
import os
import tempfile
from typing import List
from googleapiclient.discovery import build
from googleapiclient.http import MediaFileUpload
from oauth2client.client import GoogleCredentials
from jobs.convert import Convert
def push_gdoc(paths: List[str], project: str, source: dict):
"""
Push google doc using given user token
"""
assert "doc_id" in source, "source must have a doc_id"
assert "token" in source, "source must include a token"
assert len(paths) == 1, "paths must contain exactly one item"
docx_file = tempfile.NamedTemporaryFile(delete=False).name
json_file = os.path.join(project, paths[0])
Convert().do(json_file, docx_file, {"from": "gdoc", "to": "docx"}) # type: ignore
credentials = GoogleCredentials(
source["token"], None, None, None, None, None, "Stencila Hub Client",
)
drive_service = build("drive", "v3", credentials=credentials, cache_discovery=False)
files_resource = drive_service.files()
media = MediaFileUpload(docx_file)
files_resource.update(fileId=source["doc_id"], media_body=media).execute()
os.unlink(docx_file)
```
#### File: worker/jobs/register_test.py
```python
import os
from datetime import datetime
import pytest
from stencila.schema.json import object_encode
from stencila.schema.types import Article, CreativeWork, Person, PropertyValue, Review
from .register import Register
# These tests use a `pytest-recording` "casette" to record resposes from https://test.crossref.org/
# Those casettes has had any sensitive data redacted from them.
# To re-record casettes do
# CROSSREF_DEPOSIT_CREDENTIALS=username:password ./venv/bin/pytest --record-mode=rewrite jobs/register_test.py
# Pretend these credentials have been set so that we can reuse the
# recorded casettes
credentials = os.getenv("CROSSREF_DEPOSIT_CREDENTIALS", "username:password")
def is_isodate(value: str) -> bool:
"""Test that a string is a valid ISO date."""
return isinstance(datetime.fromisoformat(value.replace("Z", "+00:00")), datetime)
@pytest.mark.vcr
def test_register_article():
"""
Test registration of an Article.
"""
article = object_encode(
Article(
authors=[object_encode(Person(givenNames=["Joe"], familyNames=["James"]))],
title="My preprint",
)
)
job = Register(credentials=credentials)
result = job.do(
node=article,
doi="10.47704/54320",
url="https://example.org",
batch="the-unique-batch-id",
)
assert isinstance(result, dict)
assert is_isodate(result["deposited"])
assert isinstance(result["deposit_request"], dict)
assert isinstance(result["deposit_response"], dict)
assert result["deposit_success"]
@pytest.mark.vcr
def test_register_review():
"""
Test registration of a Review.
"""
review = object_encode(
Review(
authors=[object_encode(Person(givenNames=["Joe"], familyNames=["James"]))],
title="Review of my preprint",
itemReviewed=object_encode(
Article(
identifiers=[
object_encode(PropertyValue(name="doi", value="10.5555/54320"))
]
)
),
)
)
job = Register(credentials=credentials)
result = job.do(
node=review,
doi="10.47704/54321",
url="https://example.org",
batch="the-unique-batch-id",
)
assert isinstance(result, dict)
assert is_isodate(result["deposited"])
assert isinstance(result["deposit_request"], dict)
assert isinstance(result["deposit_response"], dict)
assert result["deposit_success"]
@pytest.mark.vcr
def test_register_utf8():
"""
Test registration of an article with UTF-8 chars.
"""
job = Register(credentials=credentials)
result = job.do(
node=object_encode(Article(title="A title with non-ASCII chars Й in it.")),
doi="10.47704/54321",
url="https://example.org",
batch="the-unique-batch-id",
)
assert result["deposit_success"]
@pytest.mark.vcr
def test_bad_credentials():
"""
Test incorrect credentials error.
"""
job = Register(credentials="foo:bar")
result = job.do(
node=object_encode(Article()),
doi="10.5555/54321",
url="https://example.org",
batch="the-unique-batch-id",
)
assert isinstance(result, dict)
assert is_isodate(result["deposited"])
assert isinstance(result["deposit_request"], dict)
assert isinstance(result["deposit_response"], dict)
assert not result["deposit_success"]
def test_no_credentials():
"""
Test no credentials error.
"""
if "CROSSREF_DEPOSIT_CREDENTIALS" in os.environ:
del os.environ["CROSSREF_DEPOSIT_CREDENTIALS"]
job = Register()
result = job.do(
node=object_encode(Article()),
doi="10.5555/54321",
url="https://example.<EMAIL>",
batch="the-unique-batch-id",
)
assert isinstance(result, dict)
assert len(result.keys()) == 0
```
#### File: jobs/session/subprocess_session.py
```python
import os
from config import get_node_modules_bin, get_snapshot_dir
from jobs.base.subprocess_job import SubprocessJob
from util.network import get_local_ip, get_random_port
class SubprocessSession(SubprocessJob):
"""
Runs a session in a local subprocess.
This class should only be used for trusted sessions.
"""
def do(self, *args, **kwargs):
"""
Start the session.
Override of `Job.do` which updates the job state with the
URL of the session before starting the session (which blocks
until the job is terminated).
"""
# Project to start session for
project = kwargs.get("project")
assert project is not None, "A project id is required to start a session"
# If a snapshot directory is specified then change into it
# (if not a snapshot session then we will already be in the project's working directory)
snapshot = kwargs.get("snapshot")
snapshot_url = kwargs.get("snapshot_url")
if snapshot:
assert snapshot_url, "A snapshot_url is required for snapshots"
os.chdir(get_snapshot_dir(project, snapshot))
ip = get_local_ip()
ports = {"ws": get_random_port(), "http": get_random_port()}
urls = dict(
(protocol, f"{protocol}://{ip}:{port}") for protocol, port in ports.items()
)
self.notify(state="RUNNING", urls=urls)
return super().do(
[get_node_modules_bin("executa"), "serve", "--debug"]
+ [f"--{protocol}=0.0.0.0:{port}" for protocol, port in ports.items()]
)
```
#### File: worker/jobs/sleep_test.py
```python
import pytest
from .base.job import INFO
from .sleep import Sleep
def test_sleep():
"""
A simple test of the sleep job.
Given that this job class is mainly just for integration
testing, don't try to do anything fancy here.
"""
job = Sleep()
current = {"index": 0}
def send_event(event, **kwargs):
assert kwargs.get("state") == "RUNNING"
index = current["index"]
assert kwargs["log"][index]["level"] == INFO
assert kwargs["log"][index]["message"].startswith(
"This is repetition {}".format(index + 1)
)
current["index"] += 1
job.send_event = send_event
job.begin()
job.do()
```
#### File: worker/util/files_test.py
```python
from .files import assert_within, is_within
def test_is_within():
assert is_within(".", "child")
assert is_within(".", "child/grandchild")
assert is_within(".", "child/grandchild/..")
assert is_within("child", "child/grandchild")
assert not is_within(".", "..")
assert not is_within(".", "../..")
assert not is_within(".", "child/../../..")
assert not is_within("child", "child/../../deep/../..")
def test_assert_within():
assert_within(".", "child")
```
#### File: worker/util/gapis.py
```python
import json
import os
from typing import Dict
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
def google_credentials(secrets: Dict) -> GoogleCredentials:
"""
Create a Google credentials object to use with Google APIs.
"""
assert secrets.get(
"access_token"
), """A Google access token is required. Please connect a Google account to your Stencila
account at https://hub.stenci.la/me/social/connections/."""
return GoogleCredentials(
access_token=secrets.get("access_token"),
client_id=secrets.get("client_id"),
client_secret=secrets.get("client_secret"),
refresh_token=secrets.get("refresh_token"),
token_expiry=None,
token_uri="https://accounts.google.com/o/oauth2/token",
user_agent="Stencila Hub Client",
)
def gdocs_service(secrets: Dict):
"""
Build a Google Docs API service client.
"""
return build(
"docs", "v1", credentials=google_credentials(secrets), cache_discovery=False
)
def gdrive_service(secrets: Dict):
"""
Build a Google Drive API service client.
"""
return build(
"drive", "v3", credentials=google_credentials(secrets), cache_discovery=False
)
def gsheets_service(secrets: Dict):
"""
Build a Google Sheets API service client.
"""
return build(
"sheets", "v4", credentials=google_credentials(secrets), cache_discovery=False
)
``` |
{
"source": "jlcalvano/craigslist-room-share-scrapper",
"score": 2
} |
#### File: jlcalvano/craigslist-room-share-scrapper/main.py
```python
from utils.db import insert_into, does_id_exist
from utils.email_sender import send_email
import config
import os
os.chdir(r"C:\Users\jlcal\Desktop\Projects\craigslist-airbnb-scrapper")
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from bs4 import BeautifulSoup
import airbnb
import re
DRIVER_PATH = "driver\chromedriver.exe"
ser = Service(DRIVER_PATH)
print(os.path.dirname(os.getcwd()))
chrome_options = Options()
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--disable-gpu")
#chrome_options.add_argument("--no-sandbox") # linux only
chrome_options.add_argument("--headless")
chrome_options.add_argument('log-level=3')
# chrome_options.headless = True # also works
driver = webdriver.Chrome(service=ser,options=chrome_options)
driver.get('https://newjersey.craigslist.org/search/hhh?lat=40.99104221112936&lon=-74.33555603027345&excats=2-17-21-1-17-7-34-22-22-1&search_distance=4&max_price=1500&availabilityMode=0&sale_date=all+dates')
soup = BeautifulSoup(driver.page_source,'html.parser')
def clean_the_string(element):
if element:
text = element.getText().strip().title()
else:
text = ''
return text
towns = config.towns
craig_entries = []
res = soup.select_one('#search-results')
for item in res.findChildren(recursive=False):
if item.name != 'li':
break
pid = item.attrs['data-pid']
name = clean_the_string(item.find('h3'))
try:
hood = re.search(r"(?<=\().+?(?=\))",str(item.find('span',class_='result-hood'))).group()
except:
hood = ''
pass
price = clean_the_string(item.find('span',class_='result-price'))
dist = clean_the_string(item.find('span',class_='maptag'))
href = item.find('a',class_='result-image').attrs['href']
if any([x not in hood.lower() for x in towns]) or hood == '':
inDb = does_id_exist(pid)
if not inDb:
insert_into(pid)
entry = {
"title": name,
"town": hood.title(),
"link": href,
"price": price,
"distance": dist,
"isNew": not inDb
}
craig_entries.append(entry)
for item in craig_entries:
driver.get(item["link"])
soup = BeautifulSoup(driver.page_source,'html.parser')
strPostedDate = clean_the_string(soup.find('p',class_="postinginfo reveal")).replace('Posted','').strip()
if strPostedDate == 'About A Month Ago':
strPostedDate = '~ 1 Month'
item.update({
"posted": strPostedDate
})
driver.quit()
airbnd_entries = airbnb.main()
send_email(craig_entries, airbnd_entries)
print('\nComplete\n')
``` |
{
"source": "jlcanovas/sourcecred-tooling",
"score": 3
} |
#### File: jlcanovas/sourcecred-tooling/convert_graph_to_D3JSON.py
```python
import getopt
import json
import sys
from igraph import Graph
"""
Usage of this script
Main options:
-i - The path of the igraph-compatible graph file
-o - The path for the JSON file
"""
USAGE = 'convert_graph_to_D3JSON.py -i CRED_GRAPH_PATH -o OUTPUT_GRAPH'
def convert_to_JSON(graph):
"""Converts an igraph into a D3-compatible json file
:returns a json Object
"""
nodes = []
for node in graph.vs:
nodes.append({'id': node['name'],
'label': node['label'],
'size': node['totalCred'],
'type': node['type']
})
edges = []
for edge in graph.es:
edges.append({'source': graph.vs[edge.source]['name'],
'target': graph.vs[edge.target]['name'],
'width': edge['forwardFlow'],
'id': str(edge.source) + "+" + str(edge.target)})
json_g = {
'nodes': nodes,
'edges': edges
}
return json_g
def main(argv):
if len(argv) == 0:
sys.exit(0)
try:
opts, args = getopt.getopt(argv, "hi:o:", [])
except getopt.GetoptError:
print(USAGE)
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print(USAGE)
sys.exit()
elif opt in ('-i'):
input_graph_path = arg
elif opt in ('-o'):
output_path = arg
g = Graph.Load(input_graph_path)
json_g = convert_to_JSON(g)
with open(output_path, 'w') as f:
json.dump(json_g, f)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: jlcanovas/sourcecred-tooling/join_csvs.py
```python
import sys
import csv
"""
Usage of this script
"""
USAGE = 'join_csvs.py FILE1.csv FILE2.csv'
def main(argv):
if len(argv) == 0:
print(USAGE)
sys.exit(0)
# Loading first CSV file
csv1_path = argv[0]
csv1_map = {}
with open(csv1_path, newline='') as csv1_file:
csv1_reader = csv.reader(csv1_file, delimiter=',')
for row in csv1_reader:
if row[2] == "USERLIKE":
csv1_map[row[3]] = row # We take only users
# Loading second CSV file
csv2_path = argv[1]
csv2_map = {}
with open(csv2_path, newline='') as csv2_file:
csv2_reader = csv.reader(csv2_file, delimiter=',')
for row in csv2_reader:
if row[2] == "USERLIKE":
csv2_map[row[3]] = row # We take only users
# Generating the CSV
print(f'id,username,cred_coder,cred_coder_perc,cred_commenter,cred_commenter_perc,cred_total')
for key in csv1_map:
csv1_row = csv1_map[key]
csv2_row = csv2_map[key]
cred_coder = csv1_row[1]
cred_commenter = csv2_row[1]
cred_total = str(float(csv1_row[1])+float(csv2_row[1]))
cred_coder_perc = str(float(cred_coder)/float(cred_total)) if float(cred_total) > 0 else 0
cred_commenter_perc = str(float(cred_commenter)/float(cred_total)) if float(cred_total) > 0 else 0
print(f'{csv1_row[0]},{key},{cred_coder},{cred_coder_perc},{cred_commenter},{cred_commenter_perc},{cred_total}')
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "jlcanovas/togglAnalyzer",
"score": 3
} |
#### File: togglAnalyzer/togglanalyzer/storage.py
```python
import mysql.connector
class TogglStorage(object):
def __init__(self, config):
self.config = config
def init_db(self, force=False):
"""Inits the database, creating the required tables"""
cnx = mysql.connector.connect(user=self.config.USER, password=self.config.PASSWORD, database=self.config.DATABASE, host=self.config.HOST, port=self.config.PORT, raise_on_warnings=True, buffered=True)
query = "SHOW TABLES LIKE 'entry'"
cursor = cnx.cursor()
cursor.execute(query)
result = cursor.fetchone()
if result is not None:
print 'There are tables in the database!'
if result is not None and not force:
print 'Launch the script with the -f option'
return
if result is not None and force:
print 'Deleting tables'
drop_table_entry = 'DROP TABLE IF EXISTS entry;'
drop_table_project = 'DROP TABLE IF EXISTS project;'
drop_table_tag = 'DROP TABLE IF EXISTS tag;'
drop_table_entry_tag = 'DROP TABLE IF EXISTS entry_tag;'
cursor = cnx.cursor()
cursor.execute(drop_table_entry)
cursor.execute(drop_table_project)
cursor.execute(drop_table_tag)
cursor.execute(drop_table_entry_tag)
cursor.close()
print 'Creating entry table'
create_table_entry = "CREATE TABLE entry( " \
"id int(20) PRIMARY KEY, " \
"description varchar(255), " \
"start timestamp, " \
"end timestamp, " \
"user varchar(255), " \
"project int(20), " \
"INDEX useri (user), " \
"INDEX projecti (project) " \
") ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC;"
cursor = cnx.cursor()
cursor.execute(create_table_entry)
print 'Creating project table'
create_table_project = "CREATE TABLE project( " \
"id int(20) AUTO_INCREMENT PRIMARY KEY, " \
"name varchar(255), " \
"INDEX namei (name) " \
") ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC;"
cursor = cnx.cursor()
cursor.execute(create_table_project)
print 'Creating tag table'
create_table_tag = "CREATE TABLE tag( " \
"id int(20) AUTO_INCREMENT PRIMARY KEY, " \
"name varchar(255), " \
"INDEX namei (name) " \
") ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC;"
cursor = cnx.cursor()
cursor.execute(create_table_tag)
cursor.close()
print 'Creating entry_tag table'
create_table_entry_tag = "CREATE TABLE entry_tag( " \
"entry_id int(20), " \
"tag_id int(20), " \
"PRIMARY KEY tagentryi (entry_id, tag_id) " \
") ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=DYNAMIC;"
cursor = cnx.cursor()
cursor.execute(create_table_entry_tag)
cursor.close()
def add_project(self, project):
cnx = mysql.connector.connect(user=self.config.USER, password=<PASSWORD>.config.PASSWORD, database=self.config.DATABASE, host=self.config.HOST, port=self.config.PORT, raise_on_warnings=True, buffered=True)
cursor = cnx.cursor()
query = "INSERT IGNORE INTO project(name) VALUES (%s)"
arguments = [project]
cursor.execute(query, arguments)
cnx.commit()
cursor.close()
def add_tag(self, tag):
cnx = mysql.connector.connect(user=self.config.USER, password=self.config.PASSWORD, database=self.config.DATABASE, host=self.config.HOST, port=self.config.PORT, raise_on_warnings=True, buffered=True)
cursor = cnx.cursor()
query = "INSERT IGNORE INTO tag(name) VALUES (%s)"
arguments = [tag]
cursor.execute(query, arguments)
cnx.commit()
cursor.close()
def add_entry_tag(self, entry, tag):
tag_id = self.get_tag_id(tag)
cnx = mysql.connector.connect(user=self.config.USER, password=<PASSWORD>, database=self.config.DATABASE, host=self.config.HOST, port=self.config.PORT, raise_on_warnings=True, buffered=True)
cursor = cnx.cursor()
query = "INSERT IGNORE INTO entry_tag(entry_id, tag_id) VALUES (%s, %s)"
arguments = [entry, tag_id]
cursor.execute(query, arguments)
cnx.commit()
cursor.close()
def get_tag_id(self, tag):
"""
Utility function to obtain the id of a tag
:param tag: Name of the tag
"""
cnx = mysql.connector.connect(user=self.config.USER, password=<PASSWORD>, database=self.config.DATABASE, host=self.config.HOST, port=self.config.PORT, raise_on_warnings=True, buffered=True)
cursor = cnx.cursor()
query = "SELECT id FROM tag WHERE name = %s"
arguments = [tag]
cursor.execute(query, arguments)
result = cursor.fetchone()
cursor.close()
if result is None:
self.add_tag(tag)
return self.get_tag_id(tag)
repo_id = result[0]
cnx.close()
return repo_id
def get_project_id(self, project):
"""
Utility function to obtain the id of a project
:param project: Name of the project
"""
cnx = mysql.connector.connect(user=self.config.USER, password=<PASSWORD>, database=self.config.DATABASE, host=self.config.HOST, port=self.config.PORT, raise_on_warnings=True, buffered=True)
cursor = cnx.cursor()
query = "SELECT id FROM project WHERE name = %s"
arguments = [project]
cursor.execute(query, arguments)
result = cursor.fetchone()
cursor.close()
if result is None:
self.add_project(project)
return self.get_project_id(project)
repo_id = result[0]
cnx.close()
return repo_id
def add_entry(self, id, description, start, end, user, project, tags):
"""Adds a new entry to the entry table"""
cnx = mysql.connector.connect(user=self.config.USER, password=self.config.PASSWORD, database=self.config.DATABASE, host=self.config.HOST, port=self.config.PORT, raise_on_warnings=True, buffered=True)
cursor = cnx.cursor()
query = "INSERT IGNORE INTO entry(id, description, start, end, user, project) VALUES (%s, %s, %s, %s, %s, %s)"
digested_start = start.replace('T', ' ')
digested_start = digested_start[:-6]
digested_end = end.replace('T', ' ')
digested_end = digested_end[:-6]
project_id = self.get_project_id(project)
arguments = [id, description, digested_start, digested_end, user, project_id]
cursor.execute(query, arguments)
cnx.commit()
cursor.close()
for tag in tags:
self.add_entry_tag(id, tag)
``` |
{
"source": "JLCaraveo/sklearn-projects-Platzi",
"score": 3
} |
#### File: sklearn-projects-Platzi/production_project/utils.py
```python
import pandas as pd
import joblib
class Utils:
def load_from_csv(self, path):
return pd.read_csv(path)
def load_from_mysql(self):
pass
def features_target(self, df, drop_columns, target):
x = df.drop(drop_columns, axis=1)
y = df[target]
return x, y
def model_export(self, clf, score):
print(score)
joblib.dump(clf, './models/best_model.pkl')
``` |
{
"source": "JLCarveth/Consize",
"score": 3
} |
#### File: JLCarveth/Consize/main.py
```python
import os,sys
import re
from nltk.tokenize import sent_tokenize
import datetime
current_version = 1.1
os.chdir("data")
# Keep all blacklisted words in a text file for easy access/updating
try:
blacklist_file = open("blacklist.txt", "r+")
with open("content.txt", "r+") as content_file:
content = content_file.read()
except IOError:
print('Error opening file.')
blacklist = []
for line in blacklist_file.readlines():
blacklist.append(line.strip('\n'))
# Split the content string into a list, then remove all blacklisted words.
def scrub_n_split(text):
'''
String -> List[#of words in str]
Converts a string to a list, making each word its own element and removing
useless characters I don't need.
'''
content_list = re.sub('[()\/!@#$%^&*|{}!><.,\n]',' ', text)
content_list = content_list.split(' ')
content_list = [x.lower() for x in content_list]
content_list = [x for x in content_list if x not in blacklist]
return content_list
def split_to_sentence(content):
'''
List of Str -> List of Str
'''
sentences = sent_tokenize(content)
return sentences
def frequency(l):
'''
List of Strings -> List[Str, int]
Takes a scrubbed list of words and counts the frequency of each word in
the list. Returns a nested list with the word and an integer representing
how frequent the word is in the text.
'''
frequency = []
for i in range(0,len(l)):
if(l[i] not in frequency):
occur = l.count(l[i])
frequency.append([occur, l[i]])
else:
pass
f_set = set(tuple(x) for x in frequency)
frequency = [list(x) for x in f_set]
frequency.sort(reverse=True)
return frequency
def rank_sentences(s, f):
'''
(List of Str, List of [int, Str]) -> List of [int, Str]
Takes as input the following:
1. s = List of sentences extracted from text
2. f = Nested list of words in the text and their occurence
Returns a nested list with sentences and their 'points' based on how many
popular key words they contain.
'''
ranked_s = []
for x in range(len(s)):
score = 0
for y in range(len(f)):
if f[y][1] in s[x]:
score += f[y][0]
ranked_s.append([score, s[x]])
return ranked_s
def sentence_trim(sr, s, constrict=70):
'''
([int, List of str], List of Str, int=.5) -> [int, List of str]
Takes as input nested list with [int (repr. sentence points/rank)
and a List of sentences], and a List of sentences, and returns a nested list
with sentences in their original order, and superflous sentences dropped.
Consize optional argument with a default of 50% controls how many sentences
to include in the final list.
'''
x = len(sr)
y = x - (x * (constrict/100))
sr_sorted = sr[:]
sr_sorted.sort(reverse=True)
for i in range(int(x-y)):
try:
sr_sorted.pop()
except IndexError:
pass
result = [x for x in sr if x in sr_sorted]
return(result)
def write_log(org_wordc, fin_wordc):
'''
(int, int) -> None
Logs actions to a text file.
'''
current_time = datetime.datetime.now()
with open('log.txt', 'a') as log_file:
log_file.write("#"*40+'\n')
log_file.write('Action: Summarize\n')
log_file.write('Time: '+str(current_time) + '\n')
log_file.write('Words in Original:'+ str(org_wordc) + '\n')
log_file.write('Words in Consized: '+ str(fin_wordc) + '\n')
def main():
words = scrub_n_split(content)
sentences = split_to_sentence(content)
word_frequency = frequency(words)
sentence_frequency = rank_sentences(sentences, word_frequency)
final = sentence_trim(sentence_frequency, sentences)
final_words = 0
for x in range(len(final)):
print(final[x][1], end=" ")
final_words += len(final[x][1].split(' '))
write_log(len(words), final_words)
blacklist_file.close()
``` |
{
"source": "jlcastrogro/Clima",
"score": 3
} |
#### File: jlcastrogro/Clima/Control.py
```python
import json
import threading
import time
from Reader import Reader
from Writer import Writer
from Publisher import Publisher
from Consumer import Consumer
from queue import Queue
class Control(threading.Thread):
def __init__(self, clima):
self.clima = clima
self.queue = Queue(100)
self.reader = Reader(self, self.clima)
self.writer = Writer(self.clima)
self.consumer = Consumer(self, self.clima)
self.publisher = Publisher(self.clima)
def run(self):
threads = [self.reader, self.writer, self.consumer, self.publisher]
for thread in threads:
thread.start()
while True:
if self.clima.state and (not self.queue.empty()):
self.type_message(self.get_element_queue())
time.sleep(0.01)
def type_message(self, msg):
msg = json.loads(msg)
type = msg['type']
if type == "temp":
if msg['temp'] > 28:
msg = json.dumps(
{"cmd": "HIGH", "pin": self.clima.pin_actuador_1})
else:
msg = json.dumps(
{"cmd": "LOW", "pin": self.clima.pin_actuador_0})
elif type == "cmd":
cmd = msg["cmd"]
if cmd == "offreader":
self.reader.set_state(False)
elif cmd == "onreader":
self.reader.set_state(True)
elif cmd == "offwriter":
self.writer.set_state(False)
elif cmd == "onwriter":
self.writer.set_state(True)
elif cmd == "autowriter":
self.reader.set_state(True)
self.writer.set_auto(True)
elif cmd == "noautowriter":
self.reader.set_state(False)
self.writer.set_auto(False)
elif cmd == "offclima":
self.clima.set_state(False)
elif cmd == "onclima":
self.clima.set_state(True)
else:
msg = json.dumps(msg)
print("Escribiendo en la cola del writer:", msg)
if self.writer.get_state():
self.writer.put_element_queue(msg)
if self.publisher.get_state:
self.publisher.put_element_queue(msg)
def put_element_queue(self, element):
self.queue.put(element)
def get_element_queue(self):
return self.queue.get()
def set_state(self, state):
self.state = state
def get_state(self):
return self.state
```
#### File: jlcastrogro/Clima/Publisher_Extern.py
```python
import pika
import sys
import base64
import json
import os
import utils
import queue
class Publisher_Extern(object):
def __init__(self, server, user, password, queue_rabbit):
# Variables de RabbitMQ
self.connection = None
self.channel = None
self.server = server
self.user = user
self.password = password
self.queue_rabbit = queue_rabbit
# Funciones para inicializar la conexión
self.create_connection()
self.create_channel()
self.declare_queue()
def create_connection(self):
credentials = pika.credentials.PlainCredentials(
self.user, self.password)
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self.server, credentials=credentials))
def create_channel(self):
self.channel = self.connection.channel()
def declare_queue(self):
self.channel.queue_declare(queue=self.queue_rabbit)
def send_message(self, msg):
msg = self.encode_message(msg)
self.channel.basic_publish(exchange='',
routing_key=self.queue_rabbit,
body=msg)
print("Mensaje enviado: ", msg)
self.close_connection()
def encode_message(self, msg):
return msg
def close_connection(self):
self.connection.close()
```
#### File: jlcastrogro/Clima/RabbitMQ.py
```python
import pika
class RabbitMQ(object):
def __init__(self, server, user, password, queue_rabbit):
# Varibles internas
self.state = True
# Variables de RabbitMQ
self.connection = None
self.channel = None
self.server = server
self.user = user
self.password = password
self.queue_rabbit = queue_rabbit
# Funciones para inicializar la conexión
self.create_connection()
self.create_channel()
self.declare_queue()
def create_connection(self):
credentials = pika.credentials.PlainCredentials(
self.user, self.password)
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host=self.server, credentials=credentials))
def create_channel(self):
self.channel = self.connection.channel()
def declare_queue(self):
self.channel.queue_declare(queue=self.queue_rabbit)
def set_state(self, state):
self.state = state
def get_state(self):
return self.state
``` |
{
"source": "jlcatonjr/Macroeconomics-Growth-and-Monetary-Equilibrium",
"score": 3
} |
#### File: Macroeconomics-Growth-and-Monetary-Equilibrium/Chapter 10/ADAS.py
```python
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.backends.backend_pdf import PdfPages
"""
Notes for future:
You set up the basic elements of a curve, but not everything
is truly automated. Need to:
1. Automate Coordinates of S,D text
"""
# shift1 is demand shift, shift 2 is supply shift
def supplyAndDemandWithShifts(supply, demand, vertSupply=False, shift1=None, shift2=None, inc=1, name= "Loanable Funds", pp = PdfPages("Default.pdf")):
pp = pp
fig = plt.figure(dpi=128, figsize=(10,6))
frame = plt.gca()
plt.title(name, fontsize=20, ha='center')
if vertSupply:
supply = round(len(supply)/2)
print(supply)
if shift1:
if (shift1 != "Supply-Left" and shift1 != "Supply-Right") or vertSupply == False:
firstShift = selectShiftCurve(demand, supply, shift1,order=1)
else:
if shift1 == "Supply-Right":
firstShift = 7000
if shift1 == "Supply-Left":
firstShift = 3000
if shift2:
secondShift = selectShiftCurve(demand, supply,shift1, shift2,order=2)
i = 0
if shift1 and shift2:
xi,yi= findIntersection(supply, demand, inc)
plotCurves(supply, demand,vertSupply, firstShift, secondShift, inc)
placePrimaryText(vertSupply)
p0, q0 = plotVertAndHorizLines(demand,supply,inc,i, "k--", vertSupply=vertSupply)
i +=1
# Horizontal and Vertical Lines for First Shift
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
p1, q1 = plotVertAndHorizLines(firstShift, supply, inc,i, "k--",vertSupply=vertSupply, shift1=shift1,xi=xi,yi=yi)
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
p1, q1 = plotVertAndHorizLines(firstShift, demand, inc,i, "k--",vertSupply=vertSupply, shift1=shift1,xi=xi,yi=yi)
i += 1
if (shift2 == "Demand-Left" or shift2 == "Demand-Right"):
if (shift1 == "Demand-Left" or shift1 == "Demand-Right"):
p2, q2 = plotVertAndHorizLines(secondShift, supply, inc,i, "k--", xi, yi,vertSupply=vertSupply, shift2=shift2)
if shift1 != shift2:
p0.remove
q0.remove
if (shift1 == "Supply-Left" or shift1 == "Supply-Right"):
x1, y1 = findIntersection(demand, firstShift, inc)
p2, q2 = plotVertAndHorizLines(secondShift, firstShift, inc, i, "k--", x1, y1,vertSupply=vertSupply,shift2=shift2)
if (shift2 == "Demand-Left" and shift1 == "Supply-Right") or (shift2 == "Demand-Right" and shift1 == "Supply-Left") :
q0.remove
if shift2 == "Supply-Left" or shift2 == "Supply-Right":
if (shift1 == "Demand-Left" or shift1 == "Demand-Right"):
p2, q2 = plotVertAndHorizLines(secondShift, firstShift, inc,i, "k--", xi, yi,vertSupply=vertSupply,shift2=shift2)
if (shift1 == "Demand-Left" and shift2 == "Supply-Right") or (shift1 == "Demand-Right" and shift2 == "Supply-Left") :
q0.remove
if (shift1 == "Supply-Left" or shift1 == "Supply-Right"):
p2, q2 = plotVertAndHorizLines(secondShift, demand, inc,i, "k--", xi, yi,vertSupply=vertSupply,shift2=shift2)
if shift1 != shift2:
p0.remove
q0.remove
if shift1 == None and shift2 == None:
plotCurves(supply, demand, vertSupply = vertSupply)
p0, q0 = plotVertAndHorizLines(demand,supply,inc,i, "k--",vertSupply=vertSupply)
if shift1 and not shift2:
placePrimaryText(vertSupply)
p0, q0 = plotVertAndHorizLines(demand,supply,inc,i, "k--",vertSupply=vertSupply)
# Horizontal and Vertical Lines for First Shift
i +=1
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
p1, q1 = plotVertAndHorizLines(firstShift, supply, inc,i, "k--",vertSupply=vertSupply, shift1=shift1)
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
p1, q1 = plotVertAndHorizLines(firstShift, demand, inc,i, "k--",vertSupply=vertSupply, shift1 = shift1)
plotCurves(supply, demand, vertSupply,firstShift, None, inc)
if not shift1 and shift2:
plotCurves(supply, demand,vertSupply, None, secondShift, inc)
p0, q0 = plotVertAndHorizLines(demand,supply,inc,i, "k--",vertSupply=vertSupply)
# Horizontal and Vertical Lines for First Shift
i +=1
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
p1, q1 = plotVertAndHorizLines(firstShift, supply, inc,i, "k--",vertSupply=vertSupply,xi=xi,yi=yi)
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
p1, q1 = plotVertAndHorizLines(firstShift, demand, inc,i, "k--",vertSupply=vertSupply,xi=xi,yi=yi)
placePrimaryText(vertSupply)
placeShiftText(shift1, shift2,vertSupply=vertSupply)
setupAxes(frame)
plt.savefig(name.replace("\n"," "))
pp.savefig(fig)
# plt.close()
# pp.close()
def placePrimaryText(vertSupply=False):
#plt.text(x,y,text,fontsize)
p = plt.text(-600, 10000, "$\pi$", fontsize=24)
if vertSupply == False:
s = plt.text(8200, 8800,"$SRAS_0$", fontsize = 24)
else:
s = plt.text(5100, 8800, "$LRAS_0$", fontsize = 24)
d = plt.text(8200, 2000,"$AD_0$", fontsize = 24)
q = plt.text(10000, -650, "$\%\Delta y$", fontsize=24)
return p , s , d , q
def placeShiftText(shift1, shift2=None, vertSupply=False):
if shift1 == None:
if (shift2):
placeShiftText(shift2)
else:
return
if shift1 == "Demand-Left":
plt.text(5500, 1650,"$AD_1$", fontsize = 24)
if shift1 == "Demand-Right":
plt.text(8500, 3800,"$AD_1$", fontsize = 24)
if shift1 == "Supply-Left":
if vertSupply == False:
plt.text(6600, 8800,"$LRAS_1$", fontsize = 24)
else:
plt.text(3100, 8800,"$LRAS_1$", fontsize = 24)
if shift1 == "Supply-Right":
if vertSupply == False:
plt.text(8500, 7600,"$LRAS_1$", fontsize = 24)
else:
plt.text(7100, 8800,"$LRAS_1$", fontsize = 24)
# safety check . . .
if shift1 and shift2:
if shift2 == "Demand-Left":
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
plt.text(6200, 1000,"$AD_1$", fontsize = 24)
if shift1 == "Demand-Left":
plt.text(4000, 1600,"$AD_2$", fontsize = 24)
if shift1 == "Demand-Right":
plt.text(8200, 2000,"$AD_{0,2}$", fontsize = 24) # same as initial
if shift2 == "Demand-Right":
if shift1 == "Supply-Left" or shift1 == "Supply-Right":
plt.text(8200, 3450,"$AD_1$", fontsize = 24)
if shift1 == "Demand-Left":
plt.text(8200, 2000,"$AD_{0,2}$", fontsize = 24) # same as initial
if shift1 == "Demand-Right":
plt.text(9000, 5750,"$AD_2$", fontsize = 24)
if shift2 == "Supply-Left":
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
plt.text(6600, 8800,"$LRAS_1$", fontsize = 24)
if shift1 == "Supply-Left":
plt.text(5100, 8800,"$LRAS_2$", fontsize = 24)
if shift1 == "Supply-Right":
plt.text(7755, 8800,"$LRAS_2$", fontsize = 24) # same as initial
if shift2 == "Supply-Right":
if shift1 == "Demand-Left" or shift1 == "Demand-Right":
plt.text(8500, 7600,"$LRAS_1$", fontsize = 24)
if shift1 == "Supply-Left":
plt.text(7755, 8800,"$LRAS_{0,2}$", fontsize = 24) # same as initial
if shift1 == "Supply-Right":
plt.text(9750, 6000,"$LRAS_2$", fontsize = 24)
def plotCurves(supply, demand, vertSupply=False, shift1=None, shift2=None, inc=1):
# plt.plot((x1,x2), (y1,y2), linestyle/color, linewidth)
if vertSupply == False:
plt.plot(supply, 'C0-', linewidth=3)
else:
plt.axvline(x=supply, color = 'C0', linewidth=3)
plt.plot(demand, 'C0-', linewidth=3)
try:
if isinstance(shift1,np.ndarray):
plt.plot(shift1, 'C3-', linewidth=3)
else:
if shift1 != None:
plt.axvline(x=shift1, color = 'C3', linewidth=3)
except NameError:
print("shift1 = None")
# if not np.all([shift2, supply]) and not np.all([shift2, demand]):
try:
if isinstance(shift2,np.ndarray):
plt.plot(shift2, 'C3-', linewidth=3)
else:
if shift2 != None:
plt.axvline(x=shift2)
except NameError:
print("shift1 = None")
def plotVertAndHorizLines(curve1, curve2, inc, i, line,
xi = None, yi = None, vertSupply=False,shift1=None, shift2=None):
x2,y2 = findIntersection(curve1, curve2, inc)
# plt.plot((x2, x2), (0, y2), line, linewidth=1.5)
plt.plot((0,x2), (y2, y2), line,linewidth=1.5)
if i == 0:
p0 =plt.text(-600,y2, "$\pi_0$", fontsize=20)
q0 = plt.text(x2 - 200, -650, "$\%\Delta y_0$", fontsize=20)
return p0, q0
if i == 1:
p1 = plt.text(-600,y2, "$\pi_1$", fontsize=20)
if vertSupply:
if shift1=="Supply-Left" or shift1 == "Supply-Right":
q1 = plt.text(x2 - 200, -650, "$\%\Delta y_1$", fontsize=20)
else:
q1 = plt.text(x2 - 200, -650, "", fontsize=20)
else:
if shift1=="Supply-Left" or shift1 == "Supply-Right":
q1 = plt.text(x2 - 200 , -650, "$\%\Delta y_1$", fontsize=20)
return p1, q1
if i == 2:
if yi != y2:
p2 = plt.text(-600,y2, "$\pi_2$", fontsize=20)
else:
p2 = plt.text(-1450,y2, "$\pi_2=$", fontsize=20)
if xi != x2:
q2 = plt.text(x2 - 200, -650, "$\%\Delta y_2$", fontsize=20)
else:
q2 = plt.text(x2 + 200, -650, "$_{,2}$", fontsize=20)
return p2, q2
def setupAxes(frame):
frame.axes.get_xaxis().set_visible(False)
frame.axes.get_yaxis().set_visible(False)
plt.ylim(0, 10000)
plt.xlim(xmin = 0, xmax = 10000)
plt.xlabel("Real Income", fontsize=20)
plt.ylabel("Price Level", fontsize = 20)
plt.tick_params(axis='both', which='major', labelsize=16)
def findIntersection(curve1, curve2, inc):
try:
for x in range(len(curve1)):
dist = curve1[x] - curve2[x]
if abs(dist) < inc * 1.01:
print(curve1[x])
print(curve2[x])
print("curve1 and curve2 are " + str(dist) + " units apart at x= " + str(x))
return x, curve1[x]
except:
try:
return curve1, curve2[curve1]
except:
return curve2, curve1[curve2]
def selectShiftCurve(demand, supply, shift1, shift2 = None, order=1):
print(shift1)
if order == 1:
if shift1 == "Demand-Left":
return np.arange(7000,-3000, -1 * inc)
if shift1 == "Demand-Right":
return np.arange(12000,2000, -1 * inc)
if shift1 == "Supply-Left":
return np.arange(1500, 11500, 1 * inc)
if shift1 == "Supply-Right":
return np.arange(-1500,8500, 1 * inc)
if order == 2:
if shift2 == "Demand-Left" and shift1 == "Demand-Left":
return np.arange(5500,-4500, -1 * inc)
if shift2 == "Demand-Left" and shift1 == "Demand-Right":
return demand
if shift2 == "Demand-Right" and shift1 == "Demand-Right":
return np.arange(14500,4500, -1 * inc)
if shift2 == "Demand-Right" and shift1 == "Demand-Left":
return demand
if shift2 == "Supply-Left" and shift1 == "Supply-Left":
return np.arange(3000, 13000, 1 * inc)
if shift2 == "Supply-Left" and shift1 == "Supply-Right":
return supply
if shift2 == "Supply-Right" and shift1 == "Supply-Right":
return np.arange(-3000,7000, 1 * inc)
if shift2 == "Supply-Right" and shift1 == "Supply-Left":
return supply
else:
if shift2 == "Demand-Left":
return np.arange(8000,-2000, -1 * inc)
if shift2 == "Demand-Right":
return np.arange(11450,1450, -1 * inc)
if shift2 == "Supply-Left":
return np.arange(1500, 11500, 1 * inc)
if shift2 == "Supply-Right":
return np.arange(-1500,8500, 1 * inc)
inc = 1
demandInc = inc
supplyInc = inc
Supply = np.arange(0,10000, 1 * supplyInc)
Demand = np.arange(10000,0, -1 * demandInc)
vertSupply = True
#ACL = np.arange(0, 5000, .5 * supplyInc)
#priceFloor = np.arange(1, 10000)
#priceFloor[priceFloor > 0] = 8000
#name = ""
pp = PdfPages('Aggregate Supply and Demand Graphs with Inelastic Supply.pdf')
name = 'Dynamic AD-AS'
#pp = PdfPages(name + '.pdf')
Shift1 = None#"Demand-Right"
Shift2 = None#"Supply-Right"
supplyAndDemandWithShifts(Supply, Demand, vertSupply,Shift1, Shift2, inc, name, pp)
name = 'Aggregate Demand Increases'
#pp = PdfPages(name + '.pdf')
Shift1 = "Demand-Right"
Shift2 = None#"Supply-Right"
supplyAndDemandWithShifts(Supply, Demand, vertSupply,Shift1, Shift2, inc, name, pp)
name = 'Aggregate Demand Decreases'
Shift1 = "Demand-Left"
Shift2 = None#"Supply-Right"
supplyAndDemandWithShifts(Supply, Demand, vertSupply,Shift1, Shift2, inc, name, pp)
name = 'Aggregate Supply Increases'
Shift1 = "Supply-Right"
Shift2 = None#"Supply-Right"
supplyAndDemandWithShifts(Supply, Demand, vertSupply,Shift1, Shift2, inc, name, pp)
name = 'Aggregate Supply Decreases'
Shift1 = "Supply-Left"
Shift2 = None#"Supply-Right"
supplyAndDemandWithShifts(Supply, Demand, vertSupply,Shift1, Shift2, inc, name, pp)
name = 'Money Production Responds\nto Increase in Real Income'
Shift1 = "Supply-Right"
Shift2 = "Demand-Right"
supplyAndDemandWithShifts(Supply, Demand, vertSupply,Shift1, Shift2, inc, name, pp)
#
#name = 'Demand Shifts Left'
#pp = PdfPages(name + '.pdf')
#Shift1 = "Demand-Left"
#Shift2 = ""
#supplyAndDemandWithShifts(Supply, Demand, Shift1, Shift2, inc, name, pp)
##pp.close()
#
#name = 'Supply Shifts Right'
#pp = PdfPages(name + '.pdf')
#Shift1 = "Supply-Right"
#Shift2 = ""
#supplyAndDemandWithShifts(Supply, Demand, Shift1, Shift2, inc, name, pp)
##pp.close()
#
#name = 'Supply Shifts Left'
#pp = PdfPages(name + '.pdf')
#Shift1 = "Supply-Left"
#Shift2 = ""
#supplyAndDemandWithShifts(Supply, Demand, Shift1, Shift2, inc, name, pp)
##pp.close()
#
#name = 'Demand and Supply Shift Right'
#pp = PdfPages(name + '.pdf')
#Shift1 = "Demand-Right"
#Shift2 = "Supply-Right"
#supplyAndDemandWithShifts(Supply, Demand, Shift1, Shift2, inc, name, pp)
pp.close()
```
#### File: Macroeconomics-Growth-and-Monetary-Equilibrium/Chapter 10/laborMarket.py
```python
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
import pandas_datareader.data as web
import datetime
import matplotlib.pyplot as plt
import numpy as np
import os
import copy
from matplotlib.backends.backend_pdf import PdfPages
def plotDF(df, names,pp, title="", secondary_y=None, logy=False, legend=False, dataType="",
start=None,end=None):
# start = df.index[0]
plt.rcParams['axes.ymargin'] = 0
plt.rcParams['axes.xmargin'] = 0
fig, ax1 = plt.subplots(figsize=(24,12))
vlineYear = datetime.datetime(2008, 10, 31)
plt.axvline(vlineYear,color="k",ls="--")
#plot data synced to different y-axes
lns=[]
if secondary_y != None:
ax2 = ax1.twinx()
for i in range(len(names)):
name = names[i]
if name != secondary_y:
lns.append(ax1.plot(df[name])[0])
else:
## if "Divisia" in name:
## lns.append(ax2.plot(df[name], ls="--", linewidth=3)[0])
# else:
lns.append(ax2.plot(df[name], ls="--",color="C2", label=name + (" (right)"))[0])
if "Divisia" in name:
plt.axhline(max(df[name]), color = "k", ls = "--", linewidth = .5)
plt.axhline(min(df[name]), color = "k", ls = "--", linewidth = .5)
#Rotate date labels
ax1.tick_params(axis='x', rotation=90)
labs = [ln.get_label() for ln in lns]
# for i in range(len(labs)):
# print(labs[i])
# if labs[i] == secondary_y:
# labs[i] = labs[i] + " (right)"
plt.rcParams.update({'legend.fontsize': 22,'legend.handlelength': 2})
# ax1.legend(lns, labs, bbox_to_anchor=(.73 ,1.025 + .05 * len(labs)), loc=2)
ax1.legend(lns, labs, bbox_to_anchor=(.0, 0 - .05 * len(labs)), loc=2)
# ax1.legend(lns, labs, bbox_to_anchor=(.815, 1.025 + .05 * len(labs)), loc=2)
plt.title(title)
# ax1.legend(lns, labs, bbox_to_anchor=(.64, 1.025 + .05 * len(labs)), loc=2)
# df[keys].plot.line(figsize=(24,12), legend=False, secondary_y = keys[0])
# plt.title(str(keys).replace("[","").replace("]",""), fontsize=40)
# fig = df[names][start:end].plot.line(logy=logy, secondary_y=secondary_y, legend=legend,figsize=(10,6), color=['k', 'C3','C0'], fontsize=14).get_figure()
# if any([("Rate" in name) for name in names]) : plt.axhline(0, color="k", ls="--", linewidth=1)
# plt.xticks(rotation=90)
# plt.xlabel(df.index.name, fontsize=18)
# if title == "":
# if len(names) < 2:
# plt.title(names[0], fontsize=24)
# else:
# plt.title(title)
# plt.gcf()
if start == None:start=""
if end == None: end = ""
plt.savefig(dataType + " " + str(start).replace(":","") + "-" + str(end).replace(":","")+ " " +
str(names).replace('[',' ').replace(']',' Secondary Y = ' + str(secondary_y) + '.png'),bbox_inches="tight")
plt.show()
pp.savefig(fig, bbox_inches="tight")
plt.close()
def scatterPlot(df, key1,key2,pp,dataType, title=""):
fig,ax = plt.subplots(figsize=(24,12))
plt.scatter(x=df[key1], y=df[key2], s = 10**2)
plt.axhline(0, ls = "--", color="k", linewidth = 1)
plt.axvline(0, ls="--", color="k", linewidth = 1)
ax.set_xlabel(key1)
ax.set_ylabel(key2)
dfk1 = df[key1].dropna()
minDFK1 = min(dfk1)
maxDFK1 = max(dfk1)
dfk2 = df[key2].dropna()
minDFK2 = min(dfk2)
maxDFK2 = max(dfk2)
ax.set_xlim(minDFK1 - .01 * abs(minDFK1), maxDFK1 + .01 * abs(maxDFK1))
ax.set_ylim(minDFK2 - .01 * abs(minDFK2),maxDFK2 + .01 * abs(maxDFK2))
plt.title(title)
plt.savefig(dataType + " " + str(start).replace(":","") + "-" + str(end).replace(":","")+ " " +
key1 + " " + key2 +'scatter.png', bbox_inches="tight")
plt.show()
pp.savefig(fig, bbox_inches="tight")
plt.close()
def buildSummaryCSV(fullPredictResults, csvName, folder):
try:
os.mkdir(folder)
except:
print(folder, "already exists")
predictorModelResults= str(fullPredictResults.summary())
file = open(folder + "\\" + csvName + ".csv" ,"w")
file.write(predictorModelResults)
file.close()
plt.rcParams.update({'font.size': 22})
pp = PdfPages("FedPlots.pdf")
start = datetime.datetime(1948, 1, 1)
end = datetime.datetime(2018, 10, 1)
dfDict = {}
dfDict["Monthly"] = web.DataReader("UNRATE", "fred",start, end).resample("M").first() / 100
dfDict["Monthly"] = dfDict["Monthly"].rename(columns = {"UNRATE":"Unemployment Rate"})
dfDict["Monthly"]["Natural Rate of Unemployment"] = web.DataReader("NROU", "fred",start, end).resample("M").first() / 100
dfDict["Monthly"]["Labor Force Participation Rate"] = web.DataReader("CIVPART", "fred",start, end).resample("M").first() / 100
dfDict["Monthly"]["Labor Force (Nonfarm)"] = web.DataReader("PAYEMS", "fred",start, end).resample("M").first()
dfDict["Quarterly"] = web.DataReader("GDPC1", "fred", start, end).resample("Q").first()
dfDict["Quarterly"] = dfDict["Quarterly"].rename(columns = {"GDPC1":"Real GDP"})
dfDict["Quarterly"]["Nominal GDP"] = web.DataReader("GDP", "fred", start, end).resample("Q").first()
dfDict["Quarterly"]["GDP Deflator"] = web.DataReader("GDPDEF", "fred", start, end).resample("Q").first()
for key in dfDict["Monthly"]:
dfDict["Quarterly"][key]= dfDict["Monthly"][key].resample("Q").first()
# dfDict["Yearly"][key] = dfDict["Monthly"][key].resample("A").first()
#for key in dfDict["Quarterly"]:
# dfDict["Yearly"][key] = dfDict["Quarterly"][key].resample("A").first()
#dfDict["Yearly"].dropna()
dfDict["Year Over Year Rate (Monthly)"] = web.DataReader("CIVPART", "fred", start, end).resample("M").first()
dfDict["Year Over Year Rate (Monthly)"] = dfDict["Year Over Year Rate (Monthly)"].rename(columns = {"CIVPART":"Labor Force Participation Rate"})
dfDict["Year Over Year Rate (Quarterly)"] = web.DataReader("GDPC1", "fred", start, end).resample("Q").first()
dfDict["Year Over Year Rate (Quarterly)"] = dfDict["Year Over Year Rate (Quarterly)"].rename(columns = {"GDPC1":"Real GDP"})
for key in dfDict["Monthly"]:
if "Rate" not in key:
dfDict["Year Over Year Rate (Monthly)"][key] = dfDict["Quarterly"][key].pct_change(periods=12)
else:
print("Rate is in this key:", key)
dfDict["Year Over Year Rate (Monthly)"][key] = dfDict["Quarterly"][key]
for key in dfDict["Quarterly"]:
if "Rate" not in key:
dfDict["Year Over Year Rate (Quarterly)"][key] = dfDict["Quarterly"][key].pct_change(periods=4)
else:
print("Rate is in this key:", key)
dfDict["Year Over Year Rate (Quarterly)"][key] = dfDict["Quarterly"][key]
dfDict["Year Over Year Rate (Monthly)"] = dfDict["Year Over Year Rate (Monthly)"].dropna()
dfDict["Year Over Year Rate (Quarterly)"] = dfDict["Year Over Year Rate (Quarterly)"].dropna()
for key in dfDict:
print(dfDict[key])
namesDict = {}
namesDict[0] = ["Unemployment Rate", "Natural Rate of Unemployment"]
namesDict[1] = ["Unemployment Rate", "Labor Force Participation Rate"]
namesDict[2] = ["Real GDP", "Unemployment Rate"]
namesDict[3] = ["Nominal GDP", "Unemployment Rate"]
namesDict[4] = ["GDP Deflator", "Unemployment Rate"]
for dataType in dfDict:
# if "Year" in dataType and "Monthly" in dataType:
for key in namesDict:
names = namesDict[key]
if names[0] in dfDict[dataType].keys() and names[1] in dfDict[dataType].keys():
plotDF(dfDict[dataType], names,pp, title=dataType, secondary_y=None, logy=False, legend=True, dataType=dataType, start=start,end=end)
plotDF(dfDict[dataType], names,pp, title=dataType, secondary_y=names[1], logy=False, legend=True, dataType=dataType, start=start,end=end)
scatterPlot(dfDict[dataType], names[0],names[1], pp, dataType = dataType, title=dataType)
pp.close()
```
#### File: Macroeconomics-Growth-and-Monetary-Equilibrium/Chapter 8/Chapter 8.py
```python
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like
import pandas_datareader.data as web
import datetime
import matplotlib.pyplot as plt
import numpy as np
import os
from matplotlib.backends.backend_pdf import PdfPages
from statsmodels.tsa.api import VAR, DynamicVAR
def plotDF(df, names,pp, title="", secondary_y=None, logy=False, legend=False, dataType="",
start=None,end=None, importantYears = None):
plt.rcParams['axes.ymargin'] = 0
plt.rcParams['axes.xmargin'] = 0
plotDict = {}
color = ["k", "r", "C0","C2"]
fig, ax1 = plt.subplots(figsize=(24,12))
# ax2 = ax1.twinx() # mirror them
for i in range(len(names)):
if i + 1 < len(names):
plotDict["ln" + str(i)] = ax1.plot(df[names[i]][start:end], color = color[i])
else:
plotDict["ln" + str(i)] = ax1.plot(df[names[i]][start:end], color = color[i])
ax1.tick_params(axis='x', rotation=90)
for i in range(len(names)):
if i == 0:
lns = plotDict["ln" + str(i)]
else:
lns += plotDict["ln" + str(i)]
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, bbox_to_anchor=(.6,1.15), loc=2)
if importantYears != None:
for year in importantYears:
plt.axvline(year, color="k",ls= "--")
plt.axhline(0, color = "k", ls = "--", linewidth = .5)
# plt.xticks(rotation=90)
# plt.xlabel(df.index.name, fontsize=18)
if title == "":
if len(names) < 2:
plt.title(names[0], fontsize=24)
else:
plt.title(title)
plt.gcf()
if start == None:start=""
if end == None: end = ""
plt.savefig(str(names).replace('[',' ').replace(']',' ')+ dataType + " " +\
str(start).replace(":","") + "-" + str(end).replace(":","")+ '.png'
,bbox_inches="tight")
plt.show()
pp.savefig(fig)
plt.close()
start = datetime.datetime(1950, 1, 1)
end = datetime.datetime(2018, 6, 1)
pp = PdfPages("Fisher Equation Data.pdf")
dfDict = {}
goldDict = {}
dfDict["Data"] = web.DataReader("CPIAUCNS", "fred", start, end).resample("M").first()
dfDict["Data"] = dfDict["Data"].rename(columns = {"CPIAUCNS":"Consumer Price Index"})
dfDict["Data"]["3 Month Treasury Interest Rate"] = web.DataReader("TB3MS", "fred", start, end).resample("M").first()
dfDict["Data"]["Annualized Inflation Rate (CPI, Monthly)"] = np.log(dfDict["Data"]["Consumer Price Index"]).diff(12) * 100 #(1 + (dfDict["Data"]["Consumer Price Index"].diff() / dfDict["Data"]["Consumer Price Index"]).dropna())**12 - 1
plotDF(dfDict["Data"], ["Annualized Inflation Rate (CPI, Monthly)","3 Month Treasury Interest Rate"],pp=pp, secondary_y=None)
pp.close()
``` |
{
"source": "jlchamaa/CLImer",
"score": 3
} |
#### File: CLImer/resources/windowManager.py
```python
import curses
from resources.digits import bigDigits,bigDigitsIndexes
NumericWidth = 142
class windowManager:
def __init__(self,stdscr):
curses.curs_set(0)
self.initializeColors()
self.initializeWindows(stdscr)
self.resizeWindows()
self.blinking = False;
def initializeColors(self):
if curses.can_change_color():
curses.init_color(1,100,100,100) #color 1 is grey
curses.init_pair(1,curses.COLOR_CYAN,1) #timer
curses.init_pair(2,curses.COLOR_WHITE,1) # background
curses.init_pair(3,1,curses.COLOR_CYAN) # scramble
else:
curses.init_pair(1,curses.COLOR_WHITE,curses.COLOR_BLACK)
curses.init_pair(2,curses.COLOR_WHITE,curses.COLOR_BLACK)
def initializeWindows(self,stdscr):
self.mainScreen = stdscr
self.winTimer = curses.newwin(1,1,0,0)
self.winLog = curses.newwin(1,1,0,0)
self.winOptions = curses.newwin(1,1,0,0)
self.winScramble = curses.newwin(1,1,0,0)
self.winStats = curses.newwin(1,1,0,0)
def resizeWindows(self):
(maxY,maxX) = self.mainScreen.getmaxyx()
self.mainScreen.bkgd(' ',curses.color_pair(1))
self.mainScreen.refresh()
if(maxX>NumericWidth):
self.winTimer.mvwin(1,int((maxX-NumericWidth)/2))
self.winTimer.resize(16,NumericWidth+1)
self.winTimer.bkgd(' ',curses.color_pair(1))
self.winScramble.mvwin(17,0)
self.winScramble.resize(3,maxX)
self.winScramble.bkgd(' ',curses.color_pair(1))
self.winOptions.mvwin(21,0)
self.winOptions.resize(7,maxX)
self.winOptions.bkgd(' ',curses.color_pair(1))
self.winLog.mvwin(30,2)
self.winLog.resize(30,60)
self.winLog.bkgd(' ',curses.color_pair(1))
else:
raise ValueError('toosmall')
curses.doupdate()
def centerTime(self):
(maxY,maxX) = self.mainScreen.getmaxyx()
self.winTimer.mvwin(int((maxY-16)/3),int((maxX-NumericWidth)/3))
self.mainScreen.bkgd(' ',curses.color_pair(1))
self.mainScreen.refresh()
def showScramble(self,scramble):
#self.winScramble.erase()
(maxY,maxX)=self.winScramble.getmaxyx()
startXCoord = int((maxX-len(scramble))/2)
startYCoord = maxY-1
self.winScramble.erase()
self.winScramble.border()
self.winScramble.addstr(1,startXCoord,scramble)
self.winScramble.refresh()
def showLog(self,dataObj):
self.winLog.clear()
self.winLog.border()
line = 1
for i in dataObj:
stringToWrite = str(i[1])+ ". "
time=i[0]
if time == None:
stringToWrite += " DNF"
else:
mins = int(time / 60)
sex = time % 60
timeToWrite=""
if mins > 0:
timeToWrite += str(mins) + ":"
timeToWrite += "{:0>5.2f}".format(sex)
else:
timeToWrite += "{0:.2f}".format(sex)
stringToWrite += timeToWrite.rjust(8)
if i[2]:
stringToWrite+="+"
self.winLog.addstr(line,2,stringToWrite)
line +=1
self.winLog.refresh()
def showSessions(self,names,current):
self.winOptions.clear()
self.winOptions.border()
self.winOptions.addstr(4,1,"(Q)uit , (P)lus 2 , (D)NF , (E)rase Session , (R)emove Time, (space) Start")
column = 10
for curNum,curName in sorted(names.items()):
attributes = curses.A_NORMAL
if curNum == str(current):
attributes = curses.A_REVERSE
strToWrite = '{:^30}'.format(curNum +'. ' + curName)
self.winOptions.addstr(2,column,strToWrite,attributes)
column += len(strToWrite)
self.winOptions.refresh()
def ask(self,question,context):
if question == 'add':
strToWrite = "Do you want to create a new session? (y/n): "
self.winOptions.clear()
self.winOptions.border()
self.winOptions.addstr(2,7,strToWrite)
self.winOptions.refresh()
response = self.winOptions.getkey()
if response.lower() == 'y':
curses.echo()
curses.curs_set(1)
self.winOptions.addstr(" Name: ")
seshName = self.winOptions.getstr()
curses.curs_set(0)
curses.noecho()
return seshName
else:
return None
if question == 'removeSession':
strToWrite = "Do you want to delete this session and all of its times? (y/n): "
self.winOptions.clear()
self.winOptions.border()
self.winOptions.addstr(2,7,strToWrite)
self.winOptions.refresh()
response = self.winOptions.getkey()
if response.lower() == 'y':
return True
else:
return False
def drawTime(self,time,positive):
if not positive:
if int(time) == 3 or int(time) == 2 or int(time) == 4:
if not self.blinking:
if (int(time*10) % 10) == 1:
self.mainScreen.bkgd(' ',curses.color_pair(3))
self.mainScreen.refresh()
self.blinking = not self.blinking
if self.blinking:
if (int(time*10) % 10) == 0:
self.mainScreen.bkgd(' ',curses.color_pair(1))
self.mainScreen.refresh()
self.blinking = not self.blinking
if int(time) == 1 or int(time) == 0:
if not self.blinking:
if (int(time*10) % 4) == 2:
self.mainScreen.bkgd(' ',curses.color_pair(3))
self.mainScreen.refresh()
self.blinking = not self.blinking
if self.blinking:
if (int(time*10) % 4) == 0:
self.mainScreen.bkgd(' ',curses.color_pair(1))
self.mainScreen.refresh()
self.blinking = not self.blinking
if positive and self.blinking:
self.mainScreen.bkgd(' ',curses.color_pair(1))
self.mainScreen.refresh()
self.blinking = not self.blinking
digits = self.secondsToDigits(time)
i=0
for digitsLine in bigDigits:
lineToWrite = ""
lineToWrite += self.fetchDigitChunk(digitsLine,digits['tenmins'],time>600) #tens place of mins
lineToWrite += self.fetchDigitChunk(digitsLine,digits['minutes'],time>60) #singles of mins
lineToWrite += self.fetchDigitChunk(digitsLine,11,time>60) # add colon
lineToWrite += self.fetchDigitChunk(digitsLine,digits['tensPlace'],time>10) # add tensPlace
lineToWrite += self.fetchDigitChunk(digitsLine,digits['onesPlace'],True) # add onesPlace
lineToWrite += self.fetchDigitChunk(digitsLine,10,True) # add decimal
lineToWrite += self.fetchDigitChunk(digitsLine,digits['tenths'],True) # add tenths
lineToWrite += self.fetchDigitChunk(digitsLine,digits['hundredths'],positive) # add hundredths
indentation = (NumericWidth - len(lineToWrite))//2
self.winTimer.addstr(i,indentation,lineToWrite)
i += 1
def secondsToDigits(self,time):
timeDigits = {}
timeDigits['tenmins'] = int(time/600)
timeDigits['minutes'] = int(time/60) % 10
seconds = time%60
timeDigits['tensPlace'] = int(seconds/10)
timeDigits['onesPlace'] = int(seconds%10)
jiffies = seconds % 1
timeDigits['tenths'] = int(jiffies*10)
timeDigits['hundredths'] = int(jiffies*100 % 10)
return timeDigits
def fetchDigitChunk(self,line,number,show):
# 10 gets . 11 get :
if show:
return line[bigDigitsIndexes[number]:bigDigitsIndexes[number+1]]
else:
size = bigDigitsIndexes[number+1]-bigDigitsIndexes[number]
space = ""
for i in range(0,size):
space += " "
return space
def getKey(self):
return self.winTimer.getkey() # wait for ch input from user
def getCh(self):
return self.winTimer.getch() # wait for ch input from user
def noDelayOn(self,onSwitch):
self.winTimer.nodelay(onSwitch)
``` |
{
"source": "jlchamaa/mapPi",
"score": 2
} |
#### File: jlchamaa/mapPi/pysockets.py
```python
import asyncio
import json
import logging
import serial
import re
import time
import websockets
from teams import teams, gamma
logging.basicConfig(
level="INFO",
format='%(asctime)-15s %(message)s',
)
log = logging.getLogger("map")
ser = serial.Serial('/dev/ttyACM0', 38400)
class ScoreBoard:
def __init__(self):
self.mlb = {}
self.nba = {}
self.nfl = {}
self.games = set()
def clear_games(self):
self.games = set()
def blink_map(self, league, team, delta):
points = int(delta)
cityNum = int(teams[league][team]['lednum'])
temp = teams[league][team]['color1']
col1r = int(temp[1:3], 16)
col1g = int(temp[3:5], 16)
col1b = int(temp[5:7], 16)
temp = teams[league][team]['color2']
col2r = int(temp[1:3], 16)
col2g = int(temp[3:5], 16)
col2b = int(temp[5:7], 16)
ba = bytearray()
ba[0:8] = [cityNum, points, gamma[col1r], gamma[col1g], gamma[col1b], gamma[col2r], gamma[col2g], gamma[col2b], 0]
for index, value in enumerate(ba):
# ensures zerobyte is the sole zero. Adjust values back on Arduino Side!
ba[index] = min(255, value + 1)
ba[8] = int(0)
ser.write(ba)
def record_score(self, league, team, new_score):
scores = getattr(self, league)
old_score = scores.get(team, 0)
scores[team] = new_score
delta = new_score - old_score
if delta > 0 and delta < 10:
self.blink_map(league, team, new_score - old_score)
log.info("({}) {} scores {} -> {}".format(league, team, old_score, new_score))
sb = ScoreBoard()
def destring(obj):
for _ in range(2):
obj = json.loads(obj)
return obj
def tostring(obj):
for _ in range(2):
obj = json.dumps(obj)
return obj
async def subscribe_scoreboard(ws):
req = {"cmd": "subscribe", "topics": ["/mlb/scoreboard", "/nfl/scoreboard", "/nba/scoreboard"]}
await ws.send(tostring(req))
async def subscribe_to_game_topic(ws, game_topic):
req = {"cmd": "subscribe", "topics": game_topic}
await ws.send(tostring(req))
async def auth(ws):
req = {"cmd": "login", "access_token": "64d1553ce024ab863adf69cff277b1f2ed75d961"}
await ws.send(tostring(req))
def parse_nba_update(data):
try:
eventType = data["eventType"]
if eventType == "setState":
teams = data["body"]["ts"]
elif eventType == "update":
teams = data["body"]
for team_data in teams:
team_id = team_data["abbr"]
team_score = int(team_data["stats"]["points"])
sb.record_score("nba", team_id, team_score)
except Exception as e:
log.info(json.dumps(data, indent=2, sort_keys=True))
log.warning("Problem in the NBA")
log.info(e)
def parse_nfl_update(data):
try:
teams = re.search(r"_(\w{2,3})@(\w{2,3})", data["topic"]).groups()
if "scores" in data["body"]:
score_list = data["body"]["scores"]
if len(score_list) > 0:
score_obj = score_list[-1]
else:
return
else:
score_obj = data["body"][0]
sb.record_score("nfl", teams[0], int(score_obj["away_score"]))
sb.record_score("nfl", teams[1], int(score_obj["home_score"]))
except Exception as e:
log.info(json.dumps(data, indent=2, sort_keys=True))
log.warning("Problem in the NFL")
log.info(e)
def parse_mlb_update(data):
try:
et = data["eventType"]
if et != "update":
return
for entry in data["body"]:
team_id = entry["abbr"]
score = int(entry["batting"]["runs"])
sb.record_score("mlb", team_id, score)
except Exception as e:
log.info("Problem in the MLB")
print(json.dumps(data, indent=2))
def parse_update(data):
league = data["topic"][1:4]
if league == "nba":
parse_nba_update(data)
elif league == "nfl":
parse_nfl_update(data)
elif league == "mlb":
parse_mlb_update(data)
else:
log.info(f"Can't pase update with topic {data['topic']}")
async def handle(message, ws):
if message == "o":
log.info("auth time")
await auth(ws)
elif message == "h":
return
elif message[0] == "a":
data = (destring(message[2:-1]))
topic = data.get("topic")
if data.get("authorized", None) == "ok":
log.info("authorized. getting_scoreboard")
await subscribe_scoreboard(ws)
# top-line scoreboard update
elif "scoreboard" in topic and data.get("eventType") == "setState":
league = topic[1:4]
for game_info in data["body"]["games"]:
game_id = game_info["abbr"]
if league == "nfl":
game_topic = "/{}/gametracker/{}/scores".format(league, game_id)
if league == "nba":
game_topic = "/{}/gametracker/{}/ts".format(league, game_id)
if league == "mlb":
game_topic = "/{}/gametracker/{}/ts".format(league, game_id)
if game_topic in sb.games:
continue
sb.games.add(game_topic)
log.info("subscribing to {}".format(game_topic))
await subscribe_to_game_topic(ws, game_topic)
# per-game update
elif topic in sb.games and data.get("body", False):
parse_update(data)
else:
log.debug("Funny new message")
else:
log.info(message)
async def try_map():
uri = "wss://torq.cbssports.com/torq/handler/117/7v5ku21t/websocket"
try:
async with websockets.connect(uri, ssl=True) as ws:
while True:
message = await ws.recv()
await handle(message, ws)
except (websockets.exceptions.InvalidStatusCode, websockets.exceptions.ConnectionClosedError) as e:
log.warning(e)
def main():
while True:
try:
el = asyncio.get_event_loop()
el.run_until_complete(try_map())
log.warning("died for some reason")
sb.clear_games()
time.sleep(5)
except Exception as e:
log.warning("died for some very bad reason")
log.warning(e)
def cycle():
sb = ScoreBoard()
while True:
sb.blink_map("nba", "LAL", 3)
time.sleep(5)
if __name__ == "__main__":
# cycle()
main()
``` |
{
"source": "jlchamaa/Pymer",
"score": 3
} |
#### File: jlchamaa/Pymer/pymer.py
```python
from pymer.session import session
import Tkinter as tk
def main():
root = tk.Tk()
sesh = session(root)
root.mainloop()
try:
main()
except ValueError as ex:
if str(ex) == 'toosmall':
print("Window too narrow. Try resizing!")
else:
raise
``` |
{
"source": "jlchamaa/wf-cli",
"score": 3
} |
#### File: test/model/test_node_store.py
```python
import unittest
from unittest.mock import Mock, patch
from model.model_node import Node
from model.node_store import NodeStore
class Test_Node_Store(unittest.TestCase):
def setUp(self):
self.nodes = [
Node(pa="0", id="A"),
Node(pa="0", id="B"),
Node(pa="0", id="C"),
]
self.ns = NodeStore()
for node in self.nodes[0:2]:
self.ns.add_node(node)
def test_add_node(self):
# adding is done by the setup for the sake of the other tests.
# But here's where we test the basic sanity of that setUp
self.assertIs(self.ns.nodes["A"], self.nodes[0])
self.assertIs(self.ns.nodes["B"], self.nodes[1])
def test_get_node(self):
self.assertIs(self.ns.get_node("A"), self.nodes[0])
self.assertIs(self.ns.get_node("B"), self.nodes[1])
with self.assertRaises(KeyError):
self.ns.get_node("C")
def test_contains(self):
self.assertTrue("A" in self.ns)
self.assertTrue("B" in self.ns)
self.assertFalse("C" in self.ns)
def test_delete(self):
self.assertTrue("A" in self.ns)
del self.ns["A"]
self.assertFalse("A" in self.ns)
def test_len(self):
self.assertEqual(len(self.ns), 2)
# wait until the node digest is stable
# def test_digest(self):
# self.assertEqual(3381629315008440964, self.ns.digest)
if __name__ == "__main__":
unittest.main()
```
#### File: wf-cli/test/test_integration.py
```python
import unittest
from unittest.mock import Mock, patch
from model.model_node import Node
class Test_All(unittest.TestCase):
def test_reality(self):
self.assertEqual(1, 1)
if __name__ == "__main__":
unittest.main()
```
#### File: wf-cli/view_model/view_model.py
```python
import logging
from model.file_based import UserFile, ModelException
from view.view import View
log = logging.getLogger("wfcli")
class ViewModel:
# SETUP METHODS
def __init__(self):
self.m = UserFile()
def run(self):
try:
with View() as self.v:
self.render()
self.recieve_commands()
except BaseException as be:
log.error("Exception {} raised. Shut down".format(be))
raise be
finally:
self.m.save()
def recieve_commands(self):
for payload in self.v.send_command():
try:
if len(payload) == 2: # payload has a kwargs
getattr(self, payload[0])(**payload[1])
elif len(payload) == 1: # payload is just a command
getattr(self, payload[0])()
else:
raise ValueError("Payload of len {}, {}".format(
len(payload),
payload,
))
except ModelException as ae:
log.error("Command: {}\nError:{}".format(payload[0], ae))
def quit_app(self, **kwargs):
self.v.open = False
log.error("Closing App Legitimately")
# PRINTING METHODS
def render(self, **kwargs):
self.v.render_content(
self.visible_nodes,
self.cursor_y,
)
# COMMIT AND SAVE METHODS
def commit_data(self, **kwargs):
self.m.commit()
def save_data(self, **kwargs):
self.m.save()
log.info("saved")
def undo(self, content={}):
self.m.undo()
self.m.nav_up()
self.m.nav_down()
self.render()
def redo(self, content={}):
self.m.redo()
self.m.nav_up()
self.m.nav_down()
self.render()
# STATE METHODS
@property
def visible_nodes(self):
return self.m.visible
@property
def current_node(self):
return self.m.current_node(depth=True)
@property
def cursor_x(self):
return self.v.cursor_x(self.current_node)
@property
def cursor_y(self):
return self.m.cursor_y
# NAVIGATION METHODS
def nav_left(self, **kwargs):
self.v.nav_left(self.current_node)
self.render()
def nav_right(self, **kwargs):
self.v.nav_right(self.current_node)
self.render()
def nav_up(self, **kwargs):
self.m.nav_up()
self.render()
def nav_down(self, **kwargs):
self.m.nav_down()
self.render()
def zero(self, **kwargs):
self.v.lc.zero()
self.render()
def dollar_sign(self, **kwargs):
self.v.lc.dollar_sign()
self.render()
def top(self, **kwargs):
self.m.top()
self.render()
def bottom(self, **kwargs):
self.m.bottom()
self.render()
# EDIT NODE OBJECTS
def indent(self, **kwargs):
self.m.indent()
self.commit_data()
self.render()
def unindent(self, **kwargs):
self.m.unindent()
self.commit_data()
self.render()
def expand_node(self, **kwargs):
self.m.expand_node()
self.commit_data()
self.render()
def collapse_node(self, **kwargs):
self.m.collapse_node()
self.commit_data()
self.save_data()
self.render()
def open_above(self, **kwargs):
self.edit_mode()
self.m.open_above()
self.render()
def open_below(self, **kwargs):
self.edit_mode()
self.m.open_below()
self.nav_down()
self.render()
def move_up(self, **kwargs):
self.m.move_up()
self.commit_data()
self.save_data()
self.render()
def move_down(self, **kwargs):
self.m.move_down()
self.commit_data()
self.save_data()
self.render()
def complete(self, **kwargs):
self.m.complete()
self.commit_data()
self.save_data()
self.render()
def delete_item(self, **kwargs):
log.info("Delete Item")
self.m.delete_item()
self.commit_data()
self.save_data()
self.render()
# EDIT TEXT
def add_char(self, char="", **kwargs):
log.info("I'm adding a '{}' here".format(char))
self.m.add_char(char, self.cursor_x)
self.nav_right()
self.render()
def delete_char(self, num=1, **kwargs):
log.info("I'm deleting here")
self.m.delete_char(num, self.cursor_x)
if not self.v.align_cursor(self.current_node):
self.nav_left()
self.render()
# MODE CHANGING
def normal_mode(self, **kwargs):
log.info("Changing mode to normal")
self.v.change_mode("normal")
if not self.v.align_cursor(self.current_node):
self.nav_left()
self.commit_data()
self.save_data()
self.render()
def edit_mode(self, **kwargs):
log.info("Changing mode to edit")
self.v.align_cursor(self.current_node)
self.v.change_mode("edit")
self.render()
def edit_EOL(self, **kwargs):
self.edit_mode()
self.dollar_sign()
self.render()
```
#### File: view/modes/edit.py
```python
import curses
from view.modes import NormalMode
class EditMode(NormalMode):
key_mapping = {
27: "normal_mode", # RETURN TO NORMAL MODE
127: "delete_char", # BACKSPACE
curses.KEY_RESIZE: "render",
9: "indent", # TAB
10: "open_below", # ENTER
(27, 91, 90): "unindent", # SHIFT-TAB
(27, 91, 65): "nav_up", # UP ARROW
(27, 91, 66): "nav_down", # DOWN ARROW
(27, 91, 67): "nav_right", # RIGHT ARROW
(27, 91, 68): "nav_left", # LEFT ARROW
}
@property
def border_attr(self):
return curses.color_pair(3)
@property
def eol_offset(self):
return 1
@property
def note(self):
return "Edit Mode"
def get_command(self, keygen):
dict_to_inspect = self.key_mapping
while True:
try:
keypress = next(keygen)
result = dict_to_inspect[keypress]
if isinstance(result, dict):
dict_to_inspect = result
elif isinstance(result, str):
return (result, {})
except KeyError:
if isinstance(keypress, int):
return ("add_char", {"char": chr(keypress)})
dict_to_inspect = self.key_mapping
```
#### File: wf-cli/view/view.py
```python
from view.modes import NormalMode, EditMode
import curses
import logging
log = logging.getLogger("wfcli")
class LateralCursor:
def __init__(self):
self._index = float("-Inf")
self.allowed_offset = 0
def align_cursor(self, current_node):
if self._index != self.in_line(current_node):
self._index = self.in_line(current_node)
return True
return False
def in_line(self, node_pair):
current_node = node_pair[0]
linelength = max(
len(current_node.name) - 1 + self.allowed_offset,
0,
)
current = max(0, self._index)
res = min(linelength, current)
return res
def nav_left(self, current_node):
found_x = self.in_line(current_node)
if found_x > 0:
self._index = found_x - 1
else:
self._index = 0
def nav_right(self, current_node):
found_x = self.in_line(current_node)
log.info("Nav_right found_x: {}".format(found_x))
max_allowed = len(current_node[0].name) - 1 + self.allowed_offset
log.info("Nav_right max: {}".format(max_allowed))
if found_x < max_allowed:
self._index = found_x + 1
else:
self._index = max_allowed
def dollar_sign(self):
self._index = float("Inf")
def zero(self):
self._index = float("-Inf")
class View:
# SETUP METHODS
def __init__(self):
self.lc = LateralCursor()
self.indent_size = 2
self.inset = 1
self.downset = 1
self.mode_map = {
"normal": NormalMode(),
"edit": EditMode(),
}
self.change_mode("normal")
@staticmethod
def init_colors():
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_CYAN)
curses.init_pair(3, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
def __enter__(self):
self.sc = curses.initscr()
curses.start_color()
self.init_colors()
curses.noecho()
curses.curs_set(False)
curses.cbreak()
self.sc.timeout(10)
self.open = True
self.keygen = self.get_keypress_wait()
return self
def __exit__(self, *args):
curses.echo()
curses.nocbreak()
curses.endwin()
def send_command(self):
while self.open:
yield self.mode.get_command(self.keygen)
# MODE METHODS
def change_mode(self, mode):
if mode in self.mode_map:
self.mode = self.mode_map[mode]
self.lc.allowed_offset = self.mode.eol_offset
else:
raise ValueError("There isn't a {} mode".format(mode))
# CURSOR METHODS
def align_cursor(self, current_node):
return self.lc.align_cursor(current_node)
def cursor_x(self, current_node):
return self.lc.in_line(current_node)
def nav_left(self, current_node):
self.lc.nav_left(current_node)
def nav_right(self, current_node):
self.lc.nav_right(current_node)
# KEYPRESS METHODS
def get_keypress_no_wait(self):
return self.sc.getch()
def get_keypress_wait(self):
while True:
keypress = self.sc.getch()
if keypress < 0:
continue
# 27 is a special case, because it could mean I pressed
# the escape key, or it could mean it's an escape code
if keypress == 27:
a = self.get_keypress_no_wait()
if a == -1:
yield 27
else:
b = self.get_keypress_no_wait()
if b == -1:
yield 27
yield a
else:
yield (27, a, b)
else:
yield keypress
# PRINTING METHODS
def generate_lines(self, text, text_width):
if text == "":
return [""]
res = []
lead_index = 0
while lead_index < len(text):
res.append(text[lead_index:lead_index + text_width])
lead_index += text_width
return res
def render_content(self, content, curs_y):
additional_lines = 0
rows, cols = self.sc.getmaxyx()
for height, node_tuple in enumerate(content):
node, depth = node_tuple
indent_width = self.indent_size * depth + 3
text_width = cols - indent_width - 2
lines = self.generate_lines(node.name, text_width)
attribute = self.mode.selection_attr if height == curs_y else curses.A_NORMAL
if height + self.downset + additional_lines + len(lines) >= rows - 1:
break # stop us from going past the end of the screen!
new_additional_lines = -1
# Actual text
for line in lines:
new_additional_lines += 1
# indent space
self.sc.addstr(height + self.downset + additional_lines + new_additional_lines,
self.inset,
indent_width * " ",
attribute)
# indicator
if new_additional_lines == 0:
self.sc.addstr(height + self.downset + additional_lines,
self.inset + indent_width - 2,
self.mode.indicators[node.state],
attribute)
# real content
self.sc.addstr(height + self.downset + additional_lines + new_additional_lines,
self.inset + indent_width,
line,
attribute)
self.sc.clrtoeol()
# Cursor block
if height == curs_y:
simple_position = self.cursor_x(node_tuple)
extra_downset = simple_position // text_width
extra_inset = simple_position % text_width
cursor_x = self.inset + indent_width + extra_inset
cursor_y = self.downset + height + additional_lines + extra_downset
self.sc.chgat(cursor_y, cursor_x, 1, self.mode.cursor_attr)
additional_lines += new_additional_lines
# CLEAR EVERYTHING BELOW
y_to_delete_from = height + additional_lines + 2
if y_to_delete_from < rows:
self.sc.move(y_to_delete_from, 0)
self.sc.clrtobot()
# MAKE COLORED BORDER
self.sc.attrset(self.mode.border_attr)
self.sc.border()
self.sc.attrset(0)
# DRAW TO SCREEN
self.sc.refresh()
``` |
{
"source": "jlcmoore/MotherMayfly",
"score": 3
} |
#### File: MotherMayfly/hafez/hafez.py
```python
import json
import urllib2
import urllib
import re
from poetrydata import poem
BASE_URL = "http://vivaldi.isi.edu:8080/api/poem_check?"
BREAK_DEL = r'\s*<br\/\/>'
def query(topic):
"""
Queries the Hafez system at BASE_URL with certain parameters
and returns the lines of the poem generated on the topic given.
"""
# most integer parameters can range from -5 to 5 integers?
parameters = {'topic' : topic,
'k' : 1,
'model' : 0,
'nline' : 4,
'encourage_words' : '',
'disencourage_words' : '',
'enc_weight' : 0,
'cword' : -5,
'reps' : 0,
'allit' : 0,
'topical' : 1,
'wordlen' : 0,
'mono' : -5,
'sentiment' : 0,
'concrete' : 0,
'is_default' : 1,
'source' : "auto"}
web_params = urllib.urlencode(parameters)
full_url = BASE_URL + web_params
webpage = urllib2.urlopen(full_url)
results = webpage.read()
poem_info = json.loads(results)
poem_lines = re.split(BREAK_DEL, poem_info["poem"])
return [line for line in poem_lines if line]
def hafez_poem(topic):
"""
Returns a poem as generated by the Hafez system
"""
# this is very blocking
return poem.Poem(title=topic, lines=query(topic), author="computer")
``` |
{
"source": "jlcmoore/vuExposed",
"score": 2
} |
#### File: vuExposed/src/display_script.py
```python
import argparse
import datetime
import logging
import logging.handlers
import os
import Queue
import random
import re
import signal
import socket
import sqlite3
import ssl
import subprocess
import sys
import threading
import time
import urllib
import urllib2
from adblockparser import AdblockRules
import httpagentparser
from pyrepl import Mozrepl, DEFAULT_PORT
from daemon import Daemon
TMP_DIR = '/home/listen/Documents/vuExposed/display/'
ACCEPTABLE_HTTP_STATUSES = [200, 201]
BLOCK_FILES = ["block_lists/easylist.txt", "block_lists/unified_hosts_and_porn.txt"]
DEFAULT_PAGE_DIR = "file:///home/listen/Documents/vuExposed/docs/"
DEFAULT_PAGE = DEFAULT_PAGE_DIR + "monitor.html"
DEFAULT_PAGE_VIDEO = DEFAULT_PAGE_DIR + "video.html"
DISPLAY_SLEEP = 5
DISPLAY_TIME_NO_NEW_REQUESTS = 120
DISPLAY_CYCLES_NO_NEW_REQUESTS = int(DISPLAY_TIME_NO_NEW_REQUESTS / DISPLAY_SLEEP)
DOCTYPE_PATTERN = re.compile(r"!doctype html", re.IGNORECASE)
FILTER_LOAD_URL_TIMEOUT = 2
HTML_MIME = "text/html"
IGNORE_USER_AGENTS = [r"Python-urllib\/\d+\.\d+",
(r"Mozilla\/\d+\.\d+ \(X11; Ubuntu; Linux x86_64; rv:\d+\.\d+\) "
r"Gecko\/20100101 Firefox\/\d+\.\d+")]
INIT_SLEEP = 3
IS_PROXY = True
LOCAL_IP_PATTERN = re.compile(r"192\.168\.1\.\d{1,3}")
LOG_FILENAME = "listen.log"
LOG_LEVEL = logging.DEBUG # Could be e.g. "DEBUG" or "WARNING
MACHINE_IP = '192.168.1.125'
MACHINE_PROXY_PORT = 10000
MAX_MONITOR_LIST_URLS = 5
NUM_DISPLAYS = 3
PID_FILE = 'display_daemon.pid'
PORT_MIRRORING = True
SQL_DATABASE = "/var/db/httptosql.sqlite"
TABLE_NAME = "http"
QUERY_NO_END = ("select ts, source, host, uri, user_agent,referrer, source_port, "
"dest_port from " + TABLE_NAME + " WHERE ts > ?")
QUERY = QUERY_NO_END + ";"
DELETE_QUERY = "delete from " + TABLE_NAME + " where ts < ?;"
RULE_PATTERN = re.compile(r"(\d{1,3}\.){3}\d{1,3}\W([\w\-\d]+\.)+[\w\-\d]+")
WAIT_AFTER_BOOT = 15
WAIT_BETWEEN_FIREFOX_FAILS = 1
TIME_FORMAT = "%Y/%m/%d %H:%M:%S"
FILE_TIME_FORMAT = "%Y-%m-%d_%H:%M:%S"
VIDEO_LIKLIHOOD = .66
TEST_MODE = False
TEST_DISPLAY_SLEEP = DISPLAY_SLEEP
TEST_QUERY = QUERY_NO_END + " and ts < ?;"
TEST_SQL_DATABASE = "assets/test.sqlite"
TEST_TABLE_NAME = "requests"
TEST_TIME_START = "2017/08/01 15:48:01"
class MonitorInfo(object):
"""
A class to represent information about each monitor on a linux system
"""
def __init__(self, w, h, x, y):
self.width = w
self.height = h
self.x_offset = x
self.y_offset = y
def get_window_id(pid):
"""
Return the window id (using command `wmctl`) for process with pid pid
"""
window_info = subprocess.check_output(['wmctrl', '-lp']).split('\n')
for window in window_info:
cols = re.split(r'\s+', window)
if len(cols) > 2 and cols[2] == str(pid):
return cols[0]
return None
def move_windows(monitor_list, procs, logger):
"""
For each process in procs, move the window handled by the process
to a different monitor as defined in monitor_list
"""
monitor_counter = 0
for proc in procs:
# 'g,x,y,w,h'
# the hack here to 10 10 allows the windows to resize to the monitor
# in which they are placed
monitor = monitor_list[monitor_counter]
geometry = "0,%s,%s,%s,%s" % (monitor.x_offset, monitor.y_offset, 10, 10)
logger.info("firefox geometry " + geometry)
pid = proc.pid
window_id = get_window_id(pid)
if not window_id:
logger.error("Could not get window id of firefox instance; trying again")
return False
monitor_counter = monitor_counter + 1
logger.info("Moving %s to %s", window_id, geometry)
subprocess.Popen(['wmctrl', '-ir', window_id, '-e', geometry])
return True
def get_monitor_info(logger):
"""
Determine the current monitor configureation of the system using
`xrandr`. Returns a list of MonitorInfo objects
"""
monitor_list = []
line_pattern = r"^\w+-\d\Wconnected"
geometry_pattern = r"\d+x\d+\+\d+\+\d+"
r_line = re.compile(line_pattern)
r_geo = re.compile(geometry_pattern)
xrandr_out = subprocess.check_output(['xrandr', '-q']).split('\n')
monitors = filter(r_line.match, xrandr_out)
for line in monitors:
geometry = r_geo.findall(line)[0]
whxy = re.split(r"[+x]", geometry)
monitor = MonitorInfo(*whxy)
monitor_list.append(monitor)
logger.info("monitor at w %s h %s x %s y %s", *whxy)
return monitor_list
def create_logger(log_name):
"""
Create a logger with log_name and set it up to rotate at midnight and keep
the last five days of data
"""
if os.path.isfile(log_name):
now = datetime.datetime.now().strftime(FILE_TIME_FORMAT)
os.rename(log_name, log_name + "." + now + ".old")
log = logging.getLogger(__name__)
log.setLevel(LOG_LEVEL)
handler = logging.handlers.TimedRotatingFileHandler(log_name,
when="midnight",
backupCount=3)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
sys.stdout = LoggerWriter(log, logging.INFO)
sys.stderr = LoggerWriter(log, logging.ERROR)
return log
def get_init_time():
"""
Get the current time, or if test mode get the test start time
"""
init_time = datetime.datetime.now().strftime(TIME_FORMAT)
if TEST_MODE:
init_time = TEST_TIME_START
return init_time
def get_rules():
"""
Return an AdblockRules object representing the rules
expressed in BLOCK_FILES
"""
raw_rules = []
for filename in BLOCK_FILES:
file_rules = []
with open(filename) as rule_file:
for line in rule_file:
if '#' not in line and re.search(RULE_PATTERN, line):
ipandrule = re.split(r"\s", line)
if len(ipandrule) > 1:
rule = ipandrule[1]
file_rules.append(rule)
raw_rules = raw_rules + file_rules
return AdblockRules(raw_rules, use_re2=True)
def start_display(init_sleep):
"""
Main thread method for the display script. Spins off child
processes for each firefox instance and sends urls to them
from sqlite database.
"""
logger = create_logger(TMP_DIR + LOG_FILENAME)
dead = threading.Event()
restart = threading.Event()
killer = GracefulKiller(dead)
logger.info('Started')
monitor_list = get_monitor_info(logger)
firefox_procs = []
threads = []
num_firefox = len(monitor_list)
time.sleep(init_sleep)
try:
last_time = get_init_time()
rules = get_rules()
logger.info("Starting up firefox instances")
firefox_to_queue = dict()
for i in range(num_firefox):
firefox_to_queue[i] = Queue.Queue()
firefox_procs = setup_browsers(num_firefox, firefox_procs, monitor_list, dead, logger)
if not dead.is_set():
logger.info("Browsers setup")
# spawn a thread for each display
for i in range(num_firefox):
thread = threading.Thread(target=display_main, args=(i, firefox_to_queue[i],
dead, restart, logger))
thread.start()
threads.append(thread)
while not dead.is_set():
last_time = requests_to_queues(last_time, num_firefox, firefox_to_queue,
rules, logger)
time.sleep(sleep_time())
# how do we delete things from the sqlite database?
finally:
dead.set()
logger.info("Terminated main loop")
for thread in threads:
thread.join()
kill_firefox(firefox_procs)
logger.info("Finished waiting for threads")
logger.info("Threads finished")
logger.info('Finished')
logging.shutdown()
if restart.is_set():
os.execvp("display_start.sh", ["display_start.sh"])
def setup_browsers(firefox_num, firefox_procs, monitor_list, dead, logger):
"""
Create a firefox proceces for each of the monitors in monitor_list
up to firefox_num and move the window to the assigned monitor. Tries
until windows are successfully moved.
Returns the process ids in firefox_procs
"""
in_position = False
while not in_position and not dead.is_set():
logger.info("Trying to create firefox instances and move them")
for i in range(firefox_num):
firefox_procs.append(subprocess.Popen(["firefox", "-no-remote", "-P",
("display_%d" % i)],
preexec_fn=os.setsid,
stdout=subprocess.PIPE))
# let firefox start
time.sleep(INIT_SLEEP)
in_position = move_windows(monitor_list, firefox_procs, logger)
if not in_position:
kill_firefox(firefox_procs)
firefox_procs = []
time.sleep(WAIT_BETWEEN_FIREFOX_FAILS)
return firefox_procs
def kill_firefox(firefox_procs):
"""
Kills the processces corresponding to those in firefox_procs
"""
for firefox in firefox_procs:
os.killpg(os.getpgid(firefox.pid), signal.SIGTERM)
def query_for_requests(last_time, new_last_time, logger):
"""
Return the rows from the sqlite database after last_time
(and before new_last_time if test mode)
"""
database = SQL_DATABASE
if TEST_MODE:
database = TEST_SQL_DATABASE
conn = sqlite3.connect(database)
rows = []
try:
with conn:
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
if TEST_MODE:
cursor.execute(TEST_QUERY, (last_time, new_last_time,))
else:
cursor.execute(QUERY, (last_time,))
rows = cursor.fetchall()
except sqlite3.Error as err:
logger.debug("Sqlite error: %s", err)
return rows
def get_test_last_time(last_time):
"""
Computes a new last time, adding a delta to it
"""
last_time_dt = datetime.datetime.strptime(last_time, TIME_FORMAT)
delta = datetime.timedelta(seconds=(DISPLAY_SLEEP + 2))
up_to_time = last_time_dt + delta
new_last_time = up_to_time.strftime(TIME_FORMAT)
return new_last_time
def requests_to_queues(last_time, num_firefox, firefox_to_queue, rules, logger):
"""
Part of main thread.
Queries database for new entries, filters, and sends them off to the queues
for the firefox threads
"""
logger.debug("last time %s", last_time)
new_last_time = last_time
if TEST_MODE:
new_last_time = get_test_last_time(last_time)
logger.info("Querying for requests newer than %s", last_time)
rows = query_for_requests(last_time, new_last_time, logger)
last_time = new_last_time
# create intermediate lists for each display
inter_lists = dict()
for i in range(num_firefox):
inter_lists[i] = []
# populate lists
for request in rows:
if request['ts'] > last_time:
last_time = request['ts']
url = get_url(request)
logger.debug("Potential request for %s", url)
if is_wifi_request(request) and not rules.should_block(url):
logger.debug("Valid wifi and non-blocked request for %s from src ip %s",
url, request['source'])
to_firefox = hash(request['user_agent']) % num_firefox
if len(inter_lists[to_firefox]) < MAX_MONITOR_LIST_URLS and can_show_url(url, logger):
inter_lists[to_firefox].append(request)
logger.info("Added request for %s to browswer %d", url, to_firefox)
# send lists
for i in range(num_firefox):
firefox_to_queue[i].put(inter_lists[i])
# need to delete old entries in table, but database is read only..
# c.execute(DELETE_QUERY, (last_time,))
return last_time
def get_url(request):
"""
For the sqlite request with 'host' and 'uri' returns url
"""
url = request['host'] + urllib.quote(request['uri'].encode('utf-8'))
# hack to deal with mitmf
if url.startswith('wwww.'):
url = url[1:]
return "http://" + url
def can_show_url(url, logger):
"""
Returns true if the given url is a loadable full html document
"""
try:
res = urllib2.urlopen(url, timeout=FILTER_LOAD_URL_TIMEOUT)
http_message = res.info()
full = http_message.type # 'text/plain'
code = res.getcode()
# make sure there was not a redirect, that it is html, and the page was accepted
if res.geturl() == url and full == HTML_MIME and code in ACCEPTABLE_HTTP_STATUSES:
data = res.read()
return re.search(DOCTYPE_PATTERN, data)
except (urllib2.HTTPError, urllib2.URLError,
socket.timeout, socket.error, ssl.SSLError) as error:
logger.debug('url open error for %s, error: %s', url, error)
return False
def is_wifi_request(request):
"""
Returns True if the sqlite object request comes from a machine
on the wifi network
"""
ip = request['source']
if PORT_MIRRORING:
result = (not re.search(MACHINE_IP, ip))
for agent in IGNORE_USER_AGENTS:
result &= not re.search(agent, request['user_agent'])
else:
result = (re.search(MACHINE_IP, ip) and
request['source_port'] == MACHINE_PROXY_PORT)
return result
def sleep_time():
"""
Returns the current sleep time
"""
if TEST_MODE:
return TEST_DISPLAY_SLEEP
return DISPLAY_SLEEP
def choose_default_page():
"""
Randomly returns either DEFAULT_PAGE_VIDEO or DEFAULT_PAGE
with a VIDEO_LIKLIHOOD bias towards DEFAULT_PAGE_VIDEO
"""
val = random.random()
if val < VIDEO_LIKLIHOOD:
return DEFAULT_PAGE_VIDEO
return DEFAULT_PAGE
# thread main
def display_main(firefox_num, queue, dead, restart, logger):
"""
Thread main for the firefox handling threads.
Waits to receive well formatted urls from the main process
through queue and sends them the handled firefox instance
Finishes when dead.is_set()
"""
try:
logger.info("Thread %d starting", firefox_num)
port = DEFAULT_PORT + firefox_num
# Create repl to control firefox instance
with Mozrepl(port=port) as mozrepl:
current_entry = None
change_url(mozrepl, choose_default_page())
logger.info(mozrepl.js("repl.whereAmI()"))
cycles_without_new = 0
while True:
new_entry = None
try:
# get urls from the main thread
requests = queue.get_nowait()
logger.debug("thread %d with %d requests", firefox_num, len(requests))
new_entry = find_best_entry(requests, current_entry)
except Queue.Empty:
new_entry = None
logger.debug("thread %d queue empty", firefox_num)
time_slept = 0
# if the url should be changed
if not (new_entry is None and current_entry is None):
if new_entry:
# change to requested page
url = get_url(new_entry)
user_agent = get_nice_user_agent(new_entry['user_agent'])
logger.debug("thread %d new entry source: %s", firefox_num,
new_entry['source'])
logger.debug("thread %d new entry user agent: %s", firefox_num,
user_agent)
cycles_without_new = 0
else:
# change to default page
url = choose_default_page()
user_agent = None
cycles_without_new = cycles_without_new + 1
logger.debug("Thread %d %d cycles without new", firefox_num, cycles_without_new)
# if we should change
# (that is, if we have waited enough cycles to go back to default)
if cycles_without_new == 0 or cycles_without_new > DISPLAY_CYCLES_NO_NEW_REQUESTS:
logger.info("Thread %d changing url to %s", firefox_num, url)
change_url(mozrepl, url)
current_entry = new_entry
if user_agent:
# try to add the user agent to the page, waiting for when the page loads
logger.debug(page_complete(mozrepl))
while (not dead.is_set() and (time_slept < sleep_time()) and
(not page_complete(mozrepl))):
logger.debug("thread %d url not ready", firefox_num)
delta = 1
time_slept = time_slept + delta
time.sleep(delta)
add_user_agent(mozrepl, user_agent)
delta = 2
time_slept = time_slept + delta
if dead.is_set():
break
time.sleep(delta)
add_user_agent(mozrepl, user_agent)
logger.debug("Thread %d added user agent %s", firefox_num, user_agent)
if dead.is_set():
break
time_to_sleep = sleep_time() - time_slept
if time_to_sleep > 0:
time.sleep(time_to_sleep)
except Exception as err:
logger.exception(err)
dead.set()
restart.set()
finally:
logger.info("Thread %d ending", firefox_num)
def find_best_entry(requests, old):
"""
Return the best entry in the list of sqlite objects requests given the old entry
Best is defined as most recent request from the same browser as old or the newest
"""
sorted(requests, key=lambda request: request['ts'], reverse=True)
for request in requests:
if (old and request['user_agent'] == old['user_agent'] and
get_url(request) != get_url(old)):
return request
if requests:
return requests[0]
return None
def get_nice_user_agent(user_agent):
"""
Return the os and browser from user_agent as a string, if present
"""
uaobj = httpagentparser.detect(user_agent)
res = ""
if uaobj:
if uaobj['os'] and uaobj['os']['name']:
res = res + uaobj['os']['name'] + " "
if uaobj['browser'] and uaobj['browser']['name']:
res = res + uaobj['browser']['name']
return res
def page_complete(mozrepl):
"""
Return if the page in the firefox instance handled by mozrepl is ready
"""
return 'complete' in mozrepl.js("document.readyState")
def change_url(mozrepl, url):
"""
Change the url of the firefox instance handled by mozrepl to url
"""
mozrepl.js("content.location.href = '%s'" % url)
def add_user_agent(mozrepl, user_agent):
"""
Add an element to the current page of the firefox instance handled by
mozrepl to display the given user_agent
"""
mozrepl.js("body = content.document.body")
mozrepl.js("div = document.createElement('div')")
mozrepl.js("div.style.all = 'initial'")
mozrepl.js("div.style.backgroundColor = 'white'")
mozrepl.js("div.style.zIndex = '99999999'")
mozrepl.js("div.style.float = 'left'")
mozrepl.js("div.style.position = 'absolute'")
mozrepl.js("div.style.top = '20px'")
mozrepl.js("div.style.left = '20px'")
mozrepl.js("div.style.backgroundColor = 'black'")
mozrepl.js("h1 = document.createElement('h1')")
mozrepl.js("h1.style.all = 'initial'")
mozrepl.js("h1.style.fontSize = '4vw'")
mozrepl.js("h1.style.fontFamily = 'Arial'")
mozrepl.js("h1.style.color = 'white'")
mozrepl.js("h1.innerHTML = '%s'" % user_agent)
mozrepl.js("div.appendChild(h1)")
mozrepl.js("body.insertBefore(div, body.firstChild)")
class GracefulKiller(object):
"""
A class to signal when the SIGINT or SIGTERM signals are received
Originally from
https://stackoverflow.com/questions/18499497/how-to-process-sigterm-signal-gracefully
"""
def __init__(self, event):
self.dead = event
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, *_):
"""
Signal received, flag death
"""
self.dead.set()
class DisplayDaemon(Daemon):
"""
An instance of the Daemon class to allow running of the display script
like a daemon process
"""
def run(self, *args, **kwargs):
if args:
start_display(args[0])
else:
print "Init sleep not provided"
sys.exit(1)
class LoggerWriter(object):
"""
A logger handler that is intended to redirect stdin and stdout
"""
def __init__(self, log, level):
self.logger = log
self.level = level
def write(self, message):
"""
Write message to self.logger
"""
if message != '\n':
self.logger.log(self.level, message)
def flush(self):
"""
Not implemented, allow for flushing of stdin/out
"""
pass
def main():
"""
Main method
"""
parser = argparse.ArgumentParser()
parser.add_argument("command", choices=("start", "stop", "restart", "run"))
parser.add_argument("-i", "--init_sleep", type=int, help="initial sleep",
required=False)
args = parser.parse_args()
if args.init_sleep is None:
args.init_sleep = WAIT_AFTER_BOOT
if not os.path.isdir(TMP_DIR):
os.mkdir(TMP_DIR)
daemon = DisplayDaemon(TMP_DIR + PID_FILE)
if args.command == 'start':
daemon.start(args.init_sleep)
elif args.command == 'stop':
daemon.stop()
elif args.command == 'restart':
daemon.restart()
elif args.command == 'run':
daemon.run(args.init_sleep)
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
if __name__ == "__main__":
main()
``` |
{
"source": "jlconlin/PhDThesis",
"score": 2
} |
#### File: Geometry/OneDMesh/Mesh.py
```python
import _Mesh
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
class PySwigIterator(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self): raise AttributeError, "No constructor defined"
__repr__ = _swig_repr
__swig_destroy__ = _Mesh.delete_PySwigIterator
__del__ = lambda self : None;
def value(*args): return _Mesh.PySwigIterator_value(*args)
def incr(*args): return _Mesh.PySwigIterator_incr(*args)
def decr(*args): return _Mesh.PySwigIterator_decr(*args)
def distance(*args): return _Mesh.PySwigIterator_distance(*args)
def equal(*args): return _Mesh.PySwigIterator_equal(*args)
def copy(*args): return _Mesh.PySwigIterator_copy(*args)
def next(*args): return _Mesh.PySwigIterator_next(*args)
def previous(*args): return _Mesh.PySwigIterator_previous(*args)
def advance(*args): return _Mesh.PySwigIterator_advance(*args)
def __eq__(*args): return _Mesh.PySwigIterator___eq__(*args)
def __ne__(*args): return _Mesh.PySwigIterator___ne__(*args)
def __iadd__(*args): return _Mesh.PySwigIterator___iadd__(*args)
def __isub__(*args): return _Mesh.PySwigIterator___isub__(*args)
def __add__(*args): return _Mesh.PySwigIterator___add__(*args)
def __sub__(*args): return _Mesh.PySwigIterator___sub__(*args)
def __iter__(self): return self
PySwigIterator_swigregister = _Mesh.PySwigIterator_swigregister
PySwigIterator_swigregister(PySwigIterator)
class DVector(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(*args): return _Mesh.DVector_iterator(*args)
def __iter__(self): return self.iterator()
def __nonzero__(*args): return _Mesh.DVector___nonzero__(*args)
def __len__(*args): return _Mesh.DVector___len__(*args)
def pop(*args): return _Mesh.DVector_pop(*args)
def __getslice__(*args): return _Mesh.DVector___getslice__(*args)
def __setslice__(*args): return _Mesh.DVector___setslice__(*args)
def __delslice__(*args): return _Mesh.DVector___delslice__(*args)
def __delitem__(*args): return _Mesh.DVector___delitem__(*args)
def __getitem__(*args): return _Mesh.DVector___getitem__(*args)
def __setitem__(*args): return _Mesh.DVector___setitem__(*args)
def append(*args): return _Mesh.DVector_append(*args)
def empty(*args): return _Mesh.DVector_empty(*args)
def size(*args): return _Mesh.DVector_size(*args)
def clear(*args): return _Mesh.DVector_clear(*args)
def swap(*args): return _Mesh.DVector_swap(*args)
def get_allocator(*args): return _Mesh.DVector_get_allocator(*args)
def begin(*args): return _Mesh.DVector_begin(*args)
def end(*args): return _Mesh.DVector_end(*args)
def rbegin(*args): return _Mesh.DVector_rbegin(*args)
def rend(*args): return _Mesh.DVector_rend(*args)
def pop_back(*args): return _Mesh.DVector_pop_back(*args)
def erase(*args): return _Mesh.DVector_erase(*args)
def __init__(self, *args):
this = _Mesh.new_DVector(*args)
try: self.this.append(this)
except: self.this = this
def push_back(*args): return _Mesh.DVector_push_back(*args)
def front(*args): return _Mesh.DVector_front(*args)
def back(*args): return _Mesh.DVector_back(*args)
def assign(*args): return _Mesh.DVector_assign(*args)
def resize(*args): return _Mesh.DVector_resize(*args)
def insert(*args): return _Mesh.DVector_insert(*args)
def reserve(*args): return _Mesh.DVector_reserve(*args)
def capacity(*args): return _Mesh.DVector_capacity(*args)
__swig_destroy__ = _Mesh.delete_DVector
__del__ = lambda self : None;
DVector_swigregister = _Mesh.DVector_swigregister
DVector_swigregister(DVector)
class ULongVector(object):
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(*args): return _Mesh.ULongVector_iterator(*args)
def __iter__(self): return self.iterator()
def __nonzero__(*args): return _Mesh.ULongVector___nonzero__(*args)
def __len__(*args): return _Mesh.ULongVector___len__(*args)
def pop(*args): return _Mesh.ULongVector_pop(*args)
def __getslice__(*args): return _Mesh.ULongVector___getslice__(*args)
def __setslice__(*args): return _Mesh.ULongVector___setslice__(*args)
def __delslice__(*args): return _Mesh.ULongVector___delslice__(*args)
def __delitem__(*args): return _Mesh.ULongVector___delitem__(*args)
def __getitem__(*args): return _Mesh.ULongVector___getitem__(*args)
def __setitem__(*args): return _Mesh.ULongVector___setitem__(*args)
def append(*args): return _Mesh.ULongVector_append(*args)
def empty(*args): return _Mesh.ULongVector_empty(*args)
def size(*args): return _Mesh.ULongVector_size(*args)
def clear(*args): return _Mesh.ULongVector_clear(*args)
def swap(*args): return _Mesh.ULongVector_swap(*args)
def get_allocator(*args): return _Mesh.ULongVector_get_allocator(*args)
def begin(*args): return _Mesh.ULongVector_begin(*args)
def end(*args): return _Mesh.ULongVector_end(*args)
def rbegin(*args): return _Mesh.ULongVector_rbegin(*args)
def rend(*args): return _Mesh.ULongVector_rend(*args)
def pop_back(*args): return _Mesh.ULongVector_pop_back(*args)
def erase(*args): return _Mesh.ULongVector_erase(*args)
def __init__(self, *args):
this = _Mesh.new_ULongVector(*args)
try: self.this.append(this)
except: self.this = this
def push_back(*args): return _Mesh.ULongVector_push_back(*args)
def front(*args): return _Mesh.ULongVector_front(*args)
def back(*args): return _Mesh.ULongVector_back(*args)
def assign(*args): return _Mesh.ULongVector_assign(*args)
def resize(*args): return _Mesh.ULongVector_resize(*args)
def insert(*args): return _Mesh.ULongVector_insert(*args)
def reserve(*args): return _Mesh.ULongVector_reserve(*args)
def capacity(*args): return _Mesh.ULongVector_capacity(*args)
__swig_destroy__ = _Mesh.delete_ULongVector
__del__ = lambda self : None;
ULongVector_swigregister = _Mesh.ULongVector_swigregister
ULongVector_swigregister(ULongVector)
class Mesh(object):
"""Proxy of C++ Mesh class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, double length, double area, unsigned int num_zones) -> Mesh
__init__(self, DVector zone_lengths, double area=1.0) -> Mesh
__init__(self, DVector zone_lengths) -> Mesh
"""
this = _Mesh.new_Mesh(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _Mesh.delete_Mesh
__del__ = lambda self : None;
def length(*args):
"""length(self) -> double"""
return _Mesh.Mesh_length(*args)
def numZones(*args):
"""numZones(self) -> unsigned int"""
return _Mesh.Mesh_numZones(*args)
def numNodes(*args):
"""numNodes(self) -> unsigned int"""
return _Mesh.Mesh_numNodes(*args)
def numCorners(*args):
"""numCorners(self) -> unsigned int"""
return _Mesh.Mesh_numCorners(*args)
Mesh_swigregister = _Mesh.Mesh_swigregister
Mesh_swigregister(Mesh)
```
#### File: cpp/Markov/Power.py
```python
__id__ = "$Id: Power.py 234 2008-01-03 15:09:32Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 234 $"
__date__ = "$Date: 2008-01-03 08:09:32 -0700 (Thu, 03 Jan 2008) $"
import time
import math
import scipy
from Transport.Geometry import Field
from Transport.FissionSource import BankSource
import Transport.Markov.PowerBase
class Power(Transport.Markov.PowerBase.PowerBase):
"""
Power is the base class for power method algorithms. Eventually it will
be inherited to provide functionality for both Monte Carlo and
Determinstic calculations
"""
def __init__(self, seed, F):
"""
seed: Random number generator seed
F: Field.FieldZoneMat
"""
Transport.Markov.PowerBase.PowerBase.__init__(self, seed, F)
def power(self, currentSource, active, inactive, histories, discMesh):
"""
power is the main method for the power method
currentSource: Initial source for power method
active: Number of active iterations
inactive: Number of inactive iterations
histories: Number histories per iteration
discMesh: Mesh for discretization of FissionSource
"""
self.k = 1
self.eigEstI = [] # Estimate of eigenvalue from inactive iterations
self.meanEigI = [] # Mean of the eigenvalues from inactive iterations
self.varEigI = [] # Variance of the eigenvalues from inactive iterations
self.eigEst = [] # Estimate of eigenvalue from active iterations
self.meanEig = [] # Mean of the eigenvalues from active iterations
self.varEig = [] # Variance of the eigenvalues from active iterations
self.eigVector = [] # Eigenvector estimate for active iterations
start = time.time()
for i in xrange(inactive):
nextSource = self.Markov_Transport(currentSource, histories)
self.k = self.k*(len(currentSource)/float(histories))
self.eigEstI.append(self.k)
self.meanEigI.append(scipy.mean(self.eigEst)) # Mean eigenvalue
self.varEigI.append(scipy.var(self.eigEst)) # Variance eigenvalue
print "I: %5i, eigenvalue = %8.6f," %(i, self.k),
print " time: %8.3f sec" %(time.time() - start)
currentSource = nextSource
print "------------------ACTIVE ITERATIONS------------------"
for self.i in xrange(active):
nextSource = self.Markov_Transport(currentSource, histories)
self.k = self.k*(len(currentSource)/float(histories))
self.eigEst.append(self.k)
self.meanEig.append(scipy.mean(self.eigEst)) # Mean eigenvalue
self.varEig.append(scipy.var(self.meanEig)) # Variance eigenvalue
print "A: %5i, eigenvalue = %8.6f," %(self.i, self.k),
print " mean = %6.4f, std.dev = %6.4f, time: %8.3f sec" %(
self.meanEig[-1], math.sqrt(self.varEig[-1]),
(time.time() - start))
# Discretized fissionSource
discSource = nextSource.discretized(discMesh)
discSource = discSource/sum(discSource)
self.eigVector.append(discSource)
currentSource = nextSource
```
#### File: Arnoldi/Deterministic/arnoldiDTM.py
```python
_id__ = "$Id: arnoldiDTM.py 397 2008-10-13 21:06:07Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 397 $"
__date__ = "$Date: 2008-10-13 15:06:07 -0600 (Mon, 13 Oct 2008) $"
import math
import copy
import time
import os
import scipy
import scipy.linalg
import scipy.stats
import Errors
"""This module is a deterministic implementation of Arnoldi's method. It can be
used as comparison to the Monte Carlo implementation."""
class arnoldiDTM(object):
"""
This class is a demonstration of a deterministic Arnoldi's method.
"""
def __init__(self, A, Noise = 0.0, NoiseShape = 'normal',
storeVectors=False, verbose = False):
"""
A: Matrix
Noise: The amount of noise added to the matrix-vector product Aq
NoiseShape: The shape of the pdf of the noise
storeVectors: Whether the dominant eigenvector should be stored between
iterations
"""
self.A = A
if storeVectors:
self.vectorStorage = []
else:
self.vectorStorage = None
self.residual = []
self.Noise = Noise
self.NoiseShape = NoiseShape
self.verbose = verbose
self.convergence = [] # List of eigenvalues at each iteration
object.__init__(self)
def ERAM(self, q, R, I):
"""
ERAM is Explicitly Restarted Arnoldi's Method. ERAM returns the
estimated eigenvalues and eigenvector.
q: Initial vector.
R: Number of Arnoldi Restarts
I: Number of Arnoldi Iterations per restart
"""
self.eValues = []
self.cycleVectors = []
for self.r in xrange(1, R + 1):
if self.verbose:
print "Restart #: %4i" %(self.r)
Values, Vectors = self.arnoldi(q, I)
self.eValues.append(Values)
self.cycleVectors.append(Vectors[:,-1])
q = Vectors[:,-1]
if self.H[self.k,self.k-1] < 1e-6 or self.residual[-1] < 1e-6:
break
return Values, Vectors
def arnoldi(self, q, I):
"""
arnoldi is the primary method. arnoldi returns the estimated
eigenvalues and eigenvectors of the linear operator A.
q: Initial vector.
I: Number of Arnoldi Iterations per restart
"""
self.H = scipy.zeros((I+1, I))
q = q/scipy.linalg.norm(q,2)
self.Q = [q]
for self.k in xrange(1, I + 1):
self.iteration()
if self.verbose:
print "\titeration #: %3i" %(self.k)
Values, vectors = self._EigenPairs(self.H[:self.k, :self.k])
self.convergence.append(Values[-1])
# Calculate Residual
res = vectors[-1][-1]*self.H[self.k, self.k-1]
self.residual.append(abs(res))
Vectors = self._calc_eVectors(self.Q, vectors)
if self.H[self.k,self.k-1] < 1e-12:
if self.verbose:
print "I finished because H[%i,%i] = %8.4E < 1E-12" %(
self.k+1, self.k, self.H[self.k,self.k-1])
break
elif self.residual[-1] < 1e-12:
if self.verbose:
print "I finished because residual = %8.4E < 1E-12" %(
self.residual[-1])
break
return Values, Vectors
def iteration(self):
"""
iteration peforms one Arnoldi iteration. This step includes the
matrix-vector product (Aq), orthogonalization, and normalization.
"""
q = self.Aq(self.A, self.Q[-1])
# Orthogonalize
for j in xrange(1, self.k+1):
self.H[j-1, self.k-1] = scipy.dot(self.Q[j-1], q)
q = q - self.H[j-1, self.k-1]*self.Q[j-1]
# Normalize
self.H[self.k, self.k-1] = scipy.linalg.norm(q,2)
q = q/self.H[self.k, self.k-1]
self.Q.append(q)
def Aq(self, A, q):
"""
Aq simply returns the product of A on q. This method is included to
simplify customizing this feature. It currently adds a noise
proportional to self.Noise.
"""
if self.Noise != 0:
if self.NoiseShape == 'normal':
NoiseVector = scipy.stats.norm(0,self.Noise).rvs(len(q))
elif self.NoiseShape == 'uniform':
NoiseVector = scipy.stats.uniform(-self.Noise/2.0, self.Noise).rvs(len(q))
else:
raise Errors.NoiseShapeError(
"NoiseShape unknown. Please add NoiseShape to code.")
else:
NoiseVector = 0.0
return scipy.inner(A, q) + NoiseVector
def _EigenPairs(self, M):
"""
_EigenPairs calculates the eigenpairs of M.
It returns the sorted eigenpairs with the first element
being the smallest eigenvalue. The eigenvectors are column vectors.
M: Matrix whose eigenpairs _EigenPairs will sort
"""
values, vectors = scipy.linalg.eig(M)
sortedIndex = values.argsort()
sortedValues = values[sortedIndex]
sortedVectors = vectors[:,sortedIndex]
return (sortedValues, sortedVectors)
def _calc_eVectors(self, Q, V):
"""
calc_eVectors will calculate the eigenvectors. The
eigevector is a linear combination of the vectors of Q with the elements
of an eigenvector of H as the expansion coefficients. calc_eVecctors
returns a matrix who's columns are the eigenvectors.
Q: List of orthornormal basis vectors
V: Matrix of column vectors
"""
n = len(V)
m = len(Q[0])
Vectors = scipy.zeros((m,0))
for j in xrange(n):
Vector = scipy.zeros(m)
for i in xrange(n):
Vector = Vector + V[i,j]*Q[i]
Vectors = scipy.column_stack((Vectors,Vector))
return Vectors
if __name__ == "__main__":
n = 5
R = 5
I = 4
q = scipy.ones(n)
# A = scipy.diag(scipy.arange(1,n+1))
# A[1,0] = 1.0
A = scipy.zeros((n,n))
k = 0
for i in xrange(n):
for j in xrange(i-1, n):
A[i,j] = k
k += 1
print "A: \n%s" %A
print "q: %s" %q
arn = arnoldiDTM(A, verbose=True)
arn.ERAM(q, R, I)
```
#### File: MonteCarlo/investigate/amcNoRestartConvergence.py
```python
__id__ = "$Id: amcNoRestartConvergence.py 168 2007-10-25 19:25:39Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 168 $"
__date__ = "$Date: 2007-10-25 13:25:39 -0600 (Thu, 25 Oct 2007) $"
"""This module will investigate how the eigenvector converges without restarts. """
import optparse
import os
import sys
import string
import scipy
import pylab
import arnoldiMC
import Geometry
import CrossSection
import fissionSource
import gnuplotFile
import procOUTSN
def Movie():
"""
Movie will generate images for making a movie from the convergent eigenvectors
"""
repeat = True
while repeat:
try:
os.mkdir(options.movie)
os.chdir(options.movie)
repeat = False
except OSError:
print "Unable to make movie in that directory"
again = raw_input("Would you like to try again? [y/n]")
if again == "y":
options.movie = raw_input("Please enter a new folder name:")
repeat = True
else:
print "Stopping this script without creating movie."
return
gnuData = {}
evalueErrors = [scipy.std(amc.convergence[:i+1]) for i in
xrange(len(amc.convergence))]
gnuData['Eigenvalue'] = (scipy.arange(len(amc.convergence)),
amc.convergence, evalueErrors)
k = [sn3.k for i in xrange(len(amc.convergence))]
pylab.clf()
pylab.xlabel("Iteration")
pylab.ylabel("Eigenvalue estimate")
pylab.title("%s" %(string.join(sys.argv)), size=10)
pylab.errorbar(range(len(amc.convergence)), amc.convergence, evalueErrors)
pylab.plot(k, 'k-')
pylab.savefig('../%s.png' %options.filename)
for vec, i in zip(amc.iterVectors, xrange(len(amc.iterVectors))):
pylab.clf()
pylab.xlabel("Slab Width (mfp)")
pylab.plot(geo.edges[:-1], csHeight, 'k--', linestyle='steps')
pylab.plot(geo.edges[:-1], vec, 'r--', linestyle='steps')
pylab.title("Iteration %i" %i)
fname = "Iteration%i.png" %i
print "Saving %s" %fname
pylab.savefig(fname, dpi=150, orientation='landscape')
gnuData['Iter-%i' %i] = (geo.edges[:-1], vec)
os.chdir("..")
g = gnuplotFile.gnuplotFile(options.filename, gnuData)
usage = 'usage: %prog [options] args'
parser = optparse.OptionParser()
parser.add_option("-f", "--file", dest="filename", type='string',
default=None, help="gnuplot data filename")
parser.add_option("-b", "--bins", dest="bins", type="int", default="50",
help="Number of spatial bins.")
parser.add_option("-w", "--width", dest="width", type="float", default="20",
help="Width of slab.")
parser.add_option("-I", "--iterations", dest="I", type="int",
default="53", help="How many Arnoldi Iterations.")
parser.add_option("--histories", dest="H", type="int",
default="1000", help="How many histories per iteration")
parser.add_option("-s", "--source", dest="source", type="string",
default='uniform',
help="""Defines source distribution. Available sources:
'uniform' --- uniform distrbution
'random' --- random distribution
'pleft' --- point source in left most bin
'pright' --- point source in right most bin
'pcenter' --- point source in center bin""")
parser.add_option("-r", "--run", dest="run", action="store_true",
default=False, help="Perform calculation.")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="Verbosity of ArnoldiMC output.")
parser.add_option("-M", "--movie", dest="movie", type="string",
default=None, help="Directory for convergence of eigenvector.")
options, args = parser.parse_args()
geo = Geometry.Geometry(options.bins, [[0,options.width]])
xs = CrossSection.CrossSection(xS=0.5, nu=1.0, xF=0.5, xG=0)
if options.source == 'uniform':
s = scipy.ones(options.bins)
elif options.source == 'random':
s = scipy.rand(options.bin)
elif options.source == 'pleft':
s = scipy.zeros(options.bins)
s[0] = 1
elif options.source == 'pright':
s = scipy.zeros(options.bins)
s[-1] = 1
elif options.source == 'pcenter':
mid = int(options.bins/2.0)
s = scipy.zeros(options.bins)
s[mid] = 1
else:
s = eval(options.source)
try:
source = fissionSource.histogramSource(s, geo)
except:
raise ValueError, "Unsupported source distribution: %s" %options.source
for key in options.__dict__:
print "%10s: %s" %(key, options.__dict__[key])
print "source %s" %source
if options.run:
# SN3
sn3 = procOUTSN.procOutsn("LarsenComparison/HW%.0f.0.OUTSN" %(options.width/2))
csHeight, csCenters = sn3.coursenSNFS(geo)
amc = arnoldiMC.arnoldiMC(geo, xs, options.H, verbose=options.verbose,
storeVectors=True)
Values, Vectors = amc.arnoldi(source, options.I)
Movie()
```
#### File: MonteCarlo/test/test_objects.py
```python
__id__ = "$Id: test_objects.py 38 2007-04-03 16:53:35Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 38 $"
__date__ = "$Date: 2007-04-03 10:53:35 -0600 (Tue, 03 Apr 2007) $"
import math
import time
import scipy
import Geometry
import fissionSource
def main():
N = int(1E6)
geo = Geometry.Geometry(10,[[-0.5,0,5]])
uni = scipy.ones(10)
uSource = fissionSource.histogramSource(uni, geo)
start = time.time()
uSource.sample(N)
end = time.time()
elapsedTime = end - start
print "Elapsedtime to sample %i neutrons: %s" %(N, elapsedTime)
main()
```
#### File: python/Power/powerMC.py
```python
__id__ = "$Id: powerMC.py 163 2007-10-05 12:35:38Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 163 $"
__date__ = "$Date: 2007-10-05 06:35:38 -0600 (Fri, 05 Oct 2007) $"
import random
import math
import time
import Gnuplot
import scipy.stats
import Markov
import fissionBank
import fissionSource
class powerMC(Markov.Markov):
"""
powerMC performs a simple Monte Carlo Power Method to find the dominant
eigenvalue.
"""
def __init__(self, geo, xs, inactive, active, histories=1000):
"""
geo: Geometry of the simulation
xs: Cross sections for the simulation
inactive: Number of Monte Carlo generations to skip
active: Number of active Monte Carlo generations
histories: Number of histories to run in each cycle
storeVectors: Whether the dominant eigenvector should be stored between
iterations
"""
self.active = active
self.inactive = inactive
Markov.Markov.__init__(self, geo, xs, histories)
def power(self, source):
"""
power is the main method for this algorithm
source: Initial guess of fission source
"""
# Initialize
self.k = 1
self.cycle_k = [] # list of eigenvalues per iteration
self.convergence = []
self.sd = [] # list of standard deviaiton per iterations
self.k_inactive = []
self.vectorStorage = []
self.source = source
start = time.time()
elapsed = 0
totaltime = 0
for i in xrange(1, self.inactive+1):
self.nextBank = fissionBank.fissionBank()
self.transport(self.source)
self.k = self.k*len(self.nextBank)/float(self.histories)
self.k_inactive.append(self.k)
totaltime = time.time()-start
print "iteration: %5i, eigenvalue = %8.6f," %(i, self.k),
print " time: %8.3f sec" %(totaltime)
self.source = self.nextBank
print "------- Starting active cycles -------"
for self.i in xrange(1, self.active+1):
self.nextBank = fissionBank.fissionBank()
self.transport(self.source)
self.k = (self.k*len(self.nextBank)/float(self.histories))
self.cycle_k.append(self.k)
self.convergence.append(scipy.mean(self.cycle_k))
self.sd.append((1/math.sqrt(self.i))*scipy.std(self.cycle_k))
totaltime = time.time()-start
print "iteration: %5i, eigenvalue = %8.6f," %(self.i, self.k),
print " std.dev = %6.4f, time: %8.3f sec" %(
scipy.std(self.convergence), totaltime)
self.source = self.nextBank
Y = fissionSource.histogramSource(self.source,self.geo)
Y = Y/sum(Y)
self.vectorStorage.append(Y)
def _estVar(self):
"""
"""
if self.i > 1:
self.vark = scipy.stats.var(self.convergence)
else:
self.vark = 0.0
def score(self, history):
"""
score provides the functionality for scoring tallies in a Markov
process. This is meant to be overridden by subclasses.
history: Particle to be tracked.
bank: fissionBank where particles are added for next generation
k: Estimate of Multiplication factor
"""
ran = random.random()
N = math.floor(history.weight*((1.0/self.k)*(self.xs.nu*self.xs.xF/self.xs.xT)) + ran)
self.nextBank.append(history, N) # Contribute to fission source
```
#### File: python/SourceFiles/Geometry.py
```python
__id__ = "$Id: Geometry.py 51 2007-04-25 20:43:07Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 51 $"
__date__ = "$Date: 2007-04-25 14:43:07 -0600 (Wed, 25 Apr 2007) $"
import scipy
import Errors
class Geometry(object):
"""
Geometry is a class to hold information about the geometry of the problem.
"""
def __init__(self, bins, range):
"""
bins: A tuple each number is how many spatial bins in each dimension (up
to 3)
range: A list of [min, max] pairs; the limits of the spatial geometry in
each dimension.
"""
try:
self.dimension = len(bins)
except TypeError:
self.dimension = 1
if self.dimension != 1:
raise Errors.GeometryError(
"Geometry currently only suppors 1-D geometry")
elif self.dimension != len(range):
raise Errors.GeometryError(
"Bins and Range must have same degree")
else:
self.bins = bins
self.range = range
self.edges = scipy.zeros(self.bins+1)
self.centers = scipy.zeros(self.bins) # Bin centers
width = self.max - self.min
for i in xrange(self.bins+1):
edge = self.min + i*(width/float(self.bins))
self.edges[i] = edge
for i in xrange(len(self.centers)):
self.centers[i] = self.edges[i] + (self.edges[i+1] - self.edges[i])/2.0
def __repr__(self):
"""
"""
return "bins: %s, range: %s" %(self.bins, self.range)
def _getMinX(self):
return min(self.range[0])
def _getMaxX(self):
return max(self.range[0])
min = property(fget=_getMinX)
max = property(fget=_getMaxX)
```
#### File: python/SourceFiles/Matrix.py
```python
__id__ = "$Id: Matrix.py 98 2007-07-18 19:41:04Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 98 $"
__date__ = "$Date: 2007-07-18 13:41:04 -0600 (Wed, 18 Jul 2007) $"
import scipy
"""Matrix contains several methods that return a 2-D scipy.array with various
properties. Most of these come from the matrix market deli."""
def Diagonal(diag):
"""
DiagonalMatrix makes a diagonal matrix (clearly) with elements equal to the
elements of diag.
diag: array of elements for diagonal
"""
return scipy.diagflat(diag)
def DingDong(N):
"""
This function will generate an (N x N) matrix whose eigenvalues cluster around
pi/2 and -pi/2. See:
http://math.nist.gov/MatrixMarket/deli/DingDong/
References:
This generator is adapted from <NAME>'s Test Matrix Toolbox.
<NAME>, Compact Numerical Methods for Computers: Linear Algebra and Function
Minimisation, second edition, <NAME>, Bristol, 1990 (Appendix 1).
"""
A = scipy.zeros((N,N))
for i in xrange(N):
for j in xrange(N):
A[i,j] = 1.0/(2*(10-i-j+(3/2.0)))
return A
```
#### File: cpp/Misc/qsub.py
```python
__id__ = "$Id: qsub.py 666 2009-07-01 14:34:38Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 666 $"
__date__ = "$Date: 2009-07-01 08:34:38 -0600 (Wed, 01 Jul 2009) $"
"""
qsub.py will facilitate the creation of qsub job files and submit those jobs.
"""
import os
import optparse
class qsub(object):
def __init__(self, Filename, Jobname, path, cmd, time=10, memory='1000mb'):
"""
name: The name of the job as well as the output file name
path: Directory from which to begin command
cmd: Command to run
time: How many hours should be reserved
"""
FileLines = """#! /bin/sh
# Name of job
#PBS -N %s
# Resource list
#PBS -l nodes=1:ppn=1:opt2356
#PBS -l walltime=%s:00:00
#PBS -l qos=preempt
#PBS -l pmem=%s
# Is batch requeued if terminated prematurely
#PBS -r y
# Job working directory leave---blank for current directory
#PBS -d %s
# Output file path
#PBS -o %s
# Define a queue
#PBS -q route
# email communication to:
#PBS -M <EMAIL>
# email options
#PBS -m abe
# Join output and error streams
#PBS -joe
# Export qsub environment variables to batch job
#PBS -V
#this is for helping you if a problem happens always include it
echo "I ran on: " $PBS_NODEFILE
#run job
./%s
"""
self.filename = os.path.abspath(Filename)
self.path = os.path.abspath(path)
# Need to make sure directory exists
directory = os.path.dirname(self.filename)
if directory == '':
directory = self.path
if not os.path.isdir(directory):
os.mkdir(directory)
Screenname = os.path.splitext(self.filename)[0] + '.screen'
FileContents = FileLines %(Jobname, time, memory, self.path, Screenname,
cmd)
self.queFile = open(self.filename, 'w')
self.queFile.write(FileContents)
self.queFile.close()
def submit(self):
"""
submit job for queuing.
"""
os.system('qsub %s' %self.filename)
if __name__ == '__main__':
print "q-subbing"
parser = optparse.OptionParser()
parser.add_option('-f', '--filename', dest='filename',
default='data.q', help='Filename for job submission file')
parser.add_option('--name', dest='name', default='Test', help='Jobname')
parser.add_option('-p', dest='path', default=os.getcwd(),
help='Directory from whcih to begin command')
parser.add_option('--cmd', dest='cmd', default='ls',
help='Which command to run')
parser.add_option('-t', '--time', dest='jobtime', default=10,
help='How many hours for job')
parser.add_option('-m', '--mem', dest='memory', default='1000mb',
help='How much memory should I request')
parser.add_option('--sub', dest='submit', action='store_true',
help='Submit jobs to qsub', default=False)
options, args = parser.parse_args()
Qsub = qsub(options.filename, options.name, options.path,
options.cmd, time=options.jobtime, memory=options.memory)
if options.submit:
Qsub.submit()
```
#### File: Multimedia/Tracks/GetData.py
```python
__id__ = "$Id: GetData.py 704 2009-07-13 19:14:28Z jlconlin $"
__author__ = "$Author: jlconlin $"
__version__ = " $Revision: 704 $"
__date__ = "$Date: 2009-07-13 13:14:28 -0600 (Mon, 13 Jul 2009) $"
"""
This module is used to extract the data from the output files used in this
parameter study.
"""
import os
import sys
import numpy
import string
import re
p=os.path.join(os.path.expanduser('~'), 'Code/Misc')
try:
sys.path.index(p)
except ValueError:
sys.path.append(p)
import gnuFile
def main():
EntropyFile = "EntropyConvergence.dat"
ValuesFile = "ValuesConvergence.dat"
# Get files in directory
Files = os.listdir( os.getcwd() )
ScreenFiles = []
ArnoldiDatFiles = []
PowerDatFiles = []
for f in Files:
root, ext = os.path.splitext(f) # Get root and extension of file
if ext == '.screen':
ScreenFiles.append(f)
elif ext == '.dat':
parts = string.split(f, '.')
if( parts[0] == 'ArnoldiEntropy'):
ArnoldiDatFiles.append(f)
elif( parts[0] == 'PowerEntropy'):
PowerDatFiles.append(f)
EntropyData = {}
ValuesData = {}
PowerEigen = {}
ArnoldiEigen = {}
# Process Arnoldi files
for f in ArnoldiDatFiles:
gF = gnuFile.gnuFile(f)
parts = string.split(f, '.')
tracks = float(parts[1][1:]) # parts[1] looks like 't1000'
t = '%.1G' %tracks
EI = gF.Data['Entropy-Iterations']
ER = gF.Data['Entropy-Restarts']
RE = gF.Data['raw value-00-Real']
ME = gF.Data['RAM eigenvalue-0-Real']
EntropyData['Ent-Arn-Iter-%s' %t] = (EI[:,0], EI[:,1])
EntropyData['Ent-Arn-Rest-%s' %t] = (ER[:,0], ER[:,1])
ValuesData['Arn Raw Value-%s' %t] = (RE[:,0], RE[:,1])
ValuesData['Arn Mean Value-%s' %t] = (ME[:,0], ME[:,1], ME[:,2])
ArnoldiEigen[tracks] = ME[-1]
for f in PowerDatFiles:
gF = gnuFile.gnuFile(f)
parts = string.split(f, '.')
tracks = float(parts[1][1:]) # parts[1] looks like 't1000'
t = '%.1G' %tracks
E = gF.Data['Entropy']
V = gF.Data['Power values']
R = gF.Data['Power raw values']
EntropyData['Ent-Pow-%s' %t] = (E[:,0],E[:,1])
ValuesData['Pow Mean Value-%s' %t] = (V[:,0],V[:,1], V[:,2])
ValuesData['Pow Raw Value-%s' %t] = (R[:,0],R[:,1])
PowerEigen[tracks] = V[-1]
Powerkeys = PowerEigen.keys()
Arnoldikeys = ArnoldiEigen.keys()
Powerkeys.sort()
Arnoldikeys.sort()
ArnoldiValues = []
PowerValues = []
ArnoldiSD = []
PowerSD = []
for a,p in zip(Arnoldikeys, Powerkeys):
ArnoldiValues.append(ArnoldiEigen[a][1])
ArnoldiSD.append(ArnoldiEigen[a][2])
PowerValues.append(PowerEigen[a][1])
PowerSD.append(PowerEigen[a][2])
ValuesData['Arnoldi Values'] = (Arnoldikeys, ArnoldiValues, ArnoldiSD)
ValuesData['Power Values'] = (Powerkeys, PowerValues, PowerSD)
gnuFile.Write_gnuFile(EntropyFile, EntropyData)
gnuFile.Write_gnuFile(ValuesFile, ValuesData)
if __name__ == "__main__":
main()
```
#### File: PhDThesis/Notes/ArnoldiCompare.py
```python
import numpy
import math
"""
This file is merely a prototype and a checker for my C++ code.
"""
def Diagonal(n):
A = numpy.diag(numpy.arange(1,n+1),0)
return A
def Standard(n):
A = Diagonal(n)
A[2,1] = 1
return A
class Arnoldi():
def __init__(self, A):
self.A = A
def IRAM(self, q):
print "I'm in Arnoldi.IRAM\n"
n,n = A.shape # Assume square matrix
self.H = numpy.zeros((n+1,n), dtype='complex')
q = q/numpy.linalg.norm(q,2)
self.Q = [q]
# First Iteration
self.k = 0
new_q = numpy.dot(self.A, self.Q[0])
self.H[0,self.k] = numpy.dot(new_q, self.Q[0])
new_q = new_q - self.H[0, self.k]*self.Q[0]
self.H[self.k+1, self.k] = numpy.linalg.norm(new_q,2)
new_q = new_q/self.H[self.k+1, self.k]
self.Q.append(new_q)
print "k = %s, new_q: %s" %(self.k, new_q)
# Second Iteration
self.k += 1
new_q = numpy.dot(self.A, self.Q[self.k])
self.H[0,self.k] = numpy.dot(new_q, self.Q[0])
self.H[1,self.k] = numpy.dot(new_q, self.Q[1])
new_q = new_q - self.H[0, self.k]*self.Q[0]
new_q = new_q - self.H[1, self.k]*self.Q[1]
self.H[self.k+1, self.k] = numpy.linalg.norm(new_q,2)
new_q = new_q/self.H[self.k+1, self.k]
self.Q.append(new_q)
print "k = %s, new_q: %s" %(self.k, new_q)
# Third Iteration (First Inner Iteration)
self.k += 1
new_q = numpy.dot(self.A, self.Q[self.k])
self.H[0,self.k] = numpy.dot(new_q, self.Q[0])
self.H[1,self.k] = numpy.dot(new_q, self.Q[1])
self.H[2,self.k] = numpy.dot(new_q, self.Q[2])
new_q = new_q - self.H[0, self.k]*self.Q[0]
new_q = new_q - self.H[1, self.k]*self.Q[1]
new_q = new_q - self.H[2, self.k]*self.Q[2]
self.H[self.k+1, self.k] = numpy.linalg.norm(new_q,2)
new_q = new_q/self.H[self.k+1, self.k]
self.Q.append(new_q)
print "k = %s, new_q: %s" %(self.k, new_q)
# Fourth Iteration (Second Inner Iteration)
self.k += 1
new_q = numpy.dot(self.A, self.Q[self.k])
self.H[0,self.k] = numpy.dot(new_q, self.Q[0])
self.H[1,self.k] = numpy.dot(new_q, self.Q[1])
self.H[2,self.k] = numpy.dot(new_q, self.Q[2])
self.H[3,self.k] = numpy.dot(new_q, self.Q[3])
new_q = new_q - self.H[0, self.k]*self.Q[0]
new_q = new_q - self.H[1, self.k]*self.Q[1]
new_q = new_q - self.H[2, self.k]*self.Q[2]
new_q = new_q - self.H[3, self.k]*self.Q[3]
self.H[self.k+1, self.k] = numpy.linalg.norm(new_q,2)
new_q = new_q/self.H[self.k+1, self.k]
self.Q.append(new_q)
print "k = %s, new_q: %s" %(self.k, new_q)
# QR portion
# Shifts
self.Hs = self.H[:self.k+1,:self.k+1]
self.values = numpy.linalg.eigvals(self.Hs)
self.values.sort()
self.shifts = self.values[:2]
print "\nEigenvalues: %s, \nShifts: %s" %(self.values, self.shifts)
# First Shift
self.Hs = self.Hs - self.shifts[0]*numpy.eye(n-1)
print "Hs:\n%s" %(self.Hs)
v,r = numpy.linalg.qr(self.Hs)
self.Hs = numpy.dot(r,v) + self.shifts[0]*numpy.eye(n-1)
print "v:\n%s\nr:\n%s\nHs:\n%s" %(v,r,self.Hs)
# Second Shift
self.Hs = self.Hs - self.shifts[1]*numpy.eye(n-1)
print "Hs:\n%s" %(self.Hs)
v,r = numpy.linalg.qr(self.Hs)
self.Hs = numpy.dot(r,v) + self.shifts[1]*numpy.eye(n-1)
print "v:\n%s\nr:\n%s\nHs:\n%s" %(v,r,self.Hs)
if __name__ == "__main__":
n = 5
A = Standard(n)
arn = Arnoldi(A)
q = numpy.ones(n)
arn.IRAM(q)
```
#### File: Notes/LinearSpace/LinearSampling.py
```python
import numpy
import sys
import math
import matplotlib.pyplot as plt
"""
This file is used to test how sampling of a linear function should happen.
"""
class LinearSampler(object):
"""
LinearSampler is used to investigate different ways of sampling from a
linear function.
"""
def __init__(self, xmin = 0, xmax = 1, slope = (2.0/3.0), intercept = 1.0):
self._xmin = xmin
self._xmax = xmax
self._m = slope
self._b = intercept
self._slope = 0.0 # slope for scoring
self._intercept = 0.0 # intercept for scoring
object.__init__(self)
self._Normalize()
def __repr__(self):
return "%6.4f*x + %6.4f" %(self._m, self._b)
def _Normalize(self):
"""
_Normalize will normalize the slope and the intercept such that the
absolute value of the area under the curve will be 1 between self._xmin
and self._xmax.
"""
self._a = self.area()
self._b /= self._a
self._m /= self._a
def NewSampler(self):
"""
NewSampler will return a new LinearSampler object with
slope = self._slope and intercept = self._intercept. This is the
result of scoring multiple samples. It sets self._slope and
self._intercept to zero.
"""
NS = LinearSampler(self._xmin, self._xmax, self._slope, self._intercept)
self._slope = 0.0
self._intercept = 0.0
return NS
def Sample(self, N):
"""
Sample is just syntax sugar for calling Forrest's sampling scheme
"""
return self.Forrest(N)
def Score(self, xS):
"""
Score will 'score' for the linear function using a Legendre Expansion
with n = 2.
"""
pass
def area(self):
"""
area will return the absolute value of the area under the curve between
self._xmin and self._xmax
"""
ymin = self.f(self._xmin)
ymax = self.f(self._xmax)
if ymin*ymax < 0:
a1 = (1.0/2.0)*((-self._b/self._m) - self._xmin)*(2.0*self._b +
self._m*((-self._b/self._m) + self._xmin) )
a2 = (1.0/2.0)*(self._xmax - (-self._b/self._m))*(2.0*self._b +
self._m*(self._xmax + (-self._b/self._m)) )
return abs(a1) + abs(a2)
else: return abs( (1.0/2.0)*(self._xmax - self._xmin)*(2.0*self._b +
self._m*(self._xmax + self._xmin) ) )
def f(self, x, m = None, b = None):
if not m: m = self._m
if not b: b = self._b
return m*x + b
def Plot(self, xmin = None, xmax = None):
"""
Plot will plot the linear function on the interval [xmin, xmax].
"""
if not xmin: xmin = self._xmin
if not xmax: xmax = self._xmax
bins = 100
x = numpy.linspace(xmin, xmax, bins)
y = numpy.zeros(bins)
for i in xrange(bins):
y[i] = self.f(x[i])
y /= sum(abs(y))
plt.plot(x,y, lw=2, label="y=%s" %self)
return (x,y)
def PlotWithSampled(self, xS, title, bins = 100):
"""
PlotWithSampled will plot the function along with the sampled points
"""
h, edges = PlotSampled(xS, title, bins)
y = numpy.zeros(len(edges))
for n in xrange(len(y)):
y[n] = self.f(edges[n])
# plt.plot(edges, y, label="y=%s" %self)
plt.plot(edges, y)
def Forrest(self, N):
"""
This function samples according to Forrest's notes. It returns an
array of sample points of length N.
"""
ymin = self.f(self._xmin)
ymax = self.f(self._xmax)
yRatio = ymin/(ymax+ymin)
# Sampling
diff = self._xmax-self._xmin
xS = numpy.zeros(N)
for n in xrange(N):
r = numpy.random.random()
if r < yRatio:
xS[n] = self._xmax - (self._xmax-self._xmin)* \
math.sqrt( numpy.random.random() )
else:
xS[n] = self._xmin + (self._xmax-self._xmin)* \
math.sqrt( numpy.random.random() )
return xS
def Holloway(self, N):
"""
Holloway will sample from the linear function according to Holloway's
derivation. It returns an array of sample points of length N.
"""
ymin = self.f(self._xmin)
ymax = self.f(self._xmax)
# These variables are created to avoid recalculating them many times
two_m = 2.0*self._m
inner = self._b + self._m*self._xmin
inner *= inner
xS = numpy.zeros(N)
if not self._m: # do simple linear sampling
for n in xrange(N):
r = numpy.random.random()
xS[n] = self._xmin + (self._xmax - self._xmin)*r
else:
for n in xrange(N):
r = numpy.random.random()
xS[n] = ( -self._b + math.sqrt(inner + two_m*r) )/self._m
return xS
def PlotSampled(x, title, bins = 100):
"""
PlotSampled will plot an array of sampled points by binning them into a
historgram
"""
# h, edges, p = numpy.histogram(x, bins=bins, normed=True)
h, e, p = plt.hist(x, bins = bins, label=title, histtype='step',
normed=True)
return h, e
def SampleExamples(N):
LS = LinearSampler(xmin = 0, xmax = 3, slope = 1, intercept = 10.0)
plt.ylim(ymin=0)
xS = LS.Forrest(N)
LS.PlotWithSampled(xS, "F", bins=100)
xS = LS.Holloway(N)
LS.PlotWithSampled(xS, "H", bins=100)
if __name__ == "__main__":
if len(sys.argv) < 2:
N = 100000
else:
N = int(sys.argv[1])
plt.clf()
plt.title("N = %6.4G" %N)
SampleExamples(N)
plt.legend()
``` |
{
"source": "jlcordeiro/letterboxd_crawler",
"score": 3
} |
#### File: jlcordeiro/letterboxd_crawler/crawl.py
```python
import sys
import json
import time
import argparse
import threading
from requests import session
from lmatch import profile_crawler, parse, dao, film
class MovieFacade:
"""
Class to bridge movies between crawler and database layer.
Inserts into DB if a movie has never been seen before.
Caches the url -> id mapping.
"""
def __init__(self):
self.lock_ = threading.Lock()
self.hash_table_ = {}
def cacheOne(m):
self.hash_table_[m.url] = int(m.id)
print("Loaded ", m.url, " from db.")
self.db_ = dao.MovieDao()
self.db_.fetchAllMovies(lambda m: cacheOne(m))
def getId(self, movie_url):
with self.lock_:
# if the url hasn't been found yet, get it.
if movie_url not in self.hash_table_:
film = parse.parse_film(get_page('/film/' + movie_url))
self.db_.updateMovie(film)
self.hash_table_[movie_url] = int(film.id)
return self.hash_table_[movie_url]
movie_facade = MovieFacade()
s = session()
def get_page(path):
print(":: ", path)
base_url = "https://letterboxd.com/"
return s.get(base_url + path).text
def crawl(profiles, profile, first_page, parser):
result = []
page_next = first_page
while page_next is not None:
if profiles.keep_parsing is False:
return None
try:
page_text = get_page(page_next)
except:
return None
result.extend(parser(page_text))
page_next = parse.next_page(page_text)
return result
def crawl_profile(profiles, source_profile):
following = crawl(profiles, source_profile.username,
source_profile.username + "/following/page/1",
parse.following)
movies = crawl(profiles, source_profile.username,
source_profile.username + "/films/page/1",
parse.movies_watched)
if following and movies:
movie_id_to_rating = {}
for (url, rating) in movies:
try:
this_id = movie_facade.getId(url)
movie_id_to_rating[this_id] = rating
except:
print("Failed to get: {}.".format(movie_url))
profiles.on_parsed(source_profile.username, source_profile.depth,
following, movie_id_to_rating)
class LbThread (threading.Thread):
def __init__(self, profiles, thread_id, max_depth = None):
threading.Thread.__init__(self)
self.profiles = profiles
self.thread_id = thread_id
self.max_depth = max_depth
def run(self):
while self.profiles.keep_parsing is True:
profile = self.profiles.next_job()
if profile is None:
time.sleep(.5)
elif profile.depth > self.max_depth:
return
else:
crawl_profile(self.profiles, profile)
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument(
"first_profile",
metavar="LETTERBOXD_PROFILE",
help="username of the profile to use as the top of the crawl tree",
)
args = parser.parse_args(argv)
crawler = profile_crawler.ProfileCrawler()
dump_filename = 'dump.lmatch'
try:
with open(dump_filename, 'r') as infile:
crawler.loads(infile.read())
infile.close()
except FileNotFoundError:
crawler.enqueue(args.first_profile)
threads = []
for i in range(40):
thread = LbThread(crawler, i + 1, 3)
thread.start()
threads.append(thread)
try:
while True:
print("{} parsed. {} ongoing. {} queued.".format(len(crawler.parsed_),
len(crawler.ongoing_),
len(crawler.queued_)))
time.sleep(10)
# all threads stopped
if not any([t.is_alive() for t in threads]):
print("Ended successfully.")
break
except KeyboardInterrupt:
crawler.stop_parsing()
print("Waiting for ongoing threads")
for t in threads:
t.join()
print("Moving ongoing jobs back to queued")
crawler.cancel_ongoing_jobs()
print("Saving state to persistence layer")
with open(dump_filename, 'w') as outfile:
json.dump(crawler.dump(), outfile)
print ("Exiting Main Thread")
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: letterboxd_crawler/tests/test_film.py
```python
import unittest
from lmatch import film
class TestFilm(unittest.TestCase):
def setUp(self):
self.sample_film = film.Film(412, "path_that", "name_this", 5.21)
def test_ctor(self):
self.assertEqual(self.sample_film.id, 412)
self.assertEqual(self.sample_film.name, "name_this")
self.assertEqual(self.sample_film.url, "path_that")
self.assertEqual(self.sample_film.avg_rate, 5.21)
def test_hash(self):
self.assertEqual(hash(self.sample_film), 412)
def test_eq(self):
f1 = film.Film(412, "a", "b", 1.0)
f2 = film.Film(411, "a", "b", 1.0)
self.assertEqual(True, f1 == self.sample_film)
self.assertEqual(False, f2 == self.sample_film)
def test_repr(self):
self.assertEqual(repr(self.sample_film), "name_this")
``` |
{
"source": "jlcoto/elections_lambda_api",
"score": 3
} |
#### File: jlcoto/elections_lambda_api/response_adapter.py
```python
class Adapter:
def adapt_party_data(self, data):
items = data["Items"]
return [
{"party": item["party"]["S"], "votes": int(item["votes"]["N"])}
for item in items
]
``` |
{
"source": "JLCoulin/Cloudnet-TOSCA-toolbox",
"score": 2
} |
#### File: cloudnet/tosca/tosca_diagrams.py
```python
import logging # for logging purposes.
import cloudnet.tosca.configuration as configuration
import cloudnet.tosca.syntax as syntax
from cloudnet.tosca.processors import Generator
from cloudnet.tosca.utils import normalize_name, short_type_name
TOSCA_DIAGRAMS = "tosca_diagrams"
configuration.DEFAULT_CONFIGURATION[TOSCA_DIAGRAMS] = {
# Target directory where network diagrams are generated.
Generator.TARGET_DIRECTORY: "Results/ToscaDiagrams"
}
configuration.DEFAULT_CONFIGURATION["logging"]["loggers"][__name__] = {
"level": "INFO",
}
LOGGER = logging.getLogger(__name__)
class ToscaDiagramGenerator(Generator):
"""
This is the generator of TOSCA diagrams.
"""
def generator_configuration_id(self):
return TOSCA_DIAGRAMS
def get_node_name_id(self, node_name):
node_name_id = normalize_name(node_name)
if node_name_id == "node": # 'node' is a dot keyword
node_name_id = "node_node" # rename to 'node_node' to avoid dot error.
return node_name_id
def generation(self):
self.info("TOSCA diagram generation")
topology_template = syntax.get_topology_template(
self.tosca_service_template.get_yaml()
)
# Generate only for TOSCA topology template.
if topology_template is None:
return
# Generate the TOSCA diagram.
self.open_file(".dot")
self.generate("graph ToscaDiagram {")
self.generate(' rankdir="LR"')
target_capability_ids = {} # map<requirement_assignment_id,capability_id>
show_feature_capabilities = set() # set<node_name>
show_dependency_requirements = set() # set<node_name>
substitution_mappings = syntax.get_substitution_mappings(topology_template)
if substitution_mappings is not None:
for capability_name, capability_yaml in syntax.get_capabilities(
substitution_mappings
).items():
if capability_yaml:
if not isinstance(capability_yaml, list):
continue # TODO something when capability_yaml is not a list
capability_name_id = normalize_name(capability_name)
self.generate(
" ",
capability_name_id,
'[label="',
capability_name,
'" shape=cds style=filled fillcolor=orange]',
sep="",
)
self.generate(
" ",
capability_name_id,
" -- ",
normalize_name(capability_yaml[0]),
"_capability_",
normalize_name(capability_yaml[1]),
"[style=dotted]",
sep="",
)
if capability_yaml[1] == "feature":
show_feature_capabilities.add(capability_yaml[0])
substitution_mappings_node_type = syntax.get_node_type(
substitution_mappings
)
self.generate(" subgraph clusterSubstitutionMappings {")
self.generate(' label="', substitution_mappings_node_type, '"', sep="")
node_templates = syntax.get_node_templates(topology_template)
for node_name, node_yaml in node_templates.items():
node_type_requirements = syntax.get_requirements_dict(
self.type_system.merge_type(syntax.get_type(node_yaml))
)
for requirement in syntax.get_requirements_list(node_yaml):
for requirement_name, requirement_yaml in requirement.items():
# ACK for Alien4Cloud
requirement_name = syntax.get_type_requirement(
requirement_yaml, requirement_name
)
if requirement_yaml:
requirement_capability = syntax.get_requirement_capability(
node_type_requirements.get(requirement_name)
)
if requirement_capability is None:
self.error(
requirement_name + ": capability undefined",
requirement_name,
)
continue
requirement_node = syntax.get_requirement_node_template(
requirement_yaml
)
if requirement_node is None:
continue
capability_found = False
requirement_node_template = node_templates.get(requirement_node)
if requirement_node_template is None:
self.error(
requirement_node + " node template undefined",
requirement_node,
)
continue
for capability_name, capability_yaml in syntax.get_capabilities(
self.type_system.merge_node_type(
syntax.get_type(requirement_node_template)
)
).items():
if self.type_system.is_derived_from(
syntax.get_capability_type(capability_yaml),
requirement_capability,
):
capability_found = True
break
if capability_found:
target_capability_ids[id(requirement)] = (
self.get_node_name_id(requirement_node)
+ "_capability_"
+ normalize_name(capability_name)
)
if capability_name == "feature":
show_feature_capabilities.add(requirement_node)
if requirement_name == "dependency":
show_dependency_requirements.add(node_name)
else:
self.error(
' capability of type "'
+ requirement_capability
+ '" not found',
requirement_node_template,
)
for node_name, node_yaml in node_templates.items():
node_name_id = self.get_node_name_id(node_name)
node_type = syntax.get_type(node_yaml)
merged_node_type = self.type_system.merge_type(node_type)
self.generate(" subgraph cluster", node_name_id, " {", sep="")
self.generate(" color=white")
self.generate(' label=""')
self.generate(
" ",
node_name_id,
'[label="',
node_name,
": ",
short_type_name(node_type),
'|\l\l\l\l" shape=record style=rounded]',
sep="",
)
for capability_name, capability_yaml in syntax.get_capabilities(
merged_node_type
).items():
if (
capability_name != "feature"
or node_name in show_feature_capabilities
):
self.generate(
" ",
node_name_id,
"_capability_",
normalize_name(capability_name),
'[label="',
capability_name,
'" shape=cds style=filled fillcolor=orange]',
sep="",
)
self.generate(
" ",
node_name_id,
"_capability_",
normalize_name(capability_name),
" -- ",
node_name_id,
sep="",
)
for requirement_name, requirement_yaml in syntax.get_requirements_dict(
merged_node_type
).items():
if (
requirement_name != "dependency"
or node_name in show_dependency_requirements
):
self.generate(
" ",
node_name_id,
"_requirement_",
normalize_name(requirement_name),
'[label="',
requirement_name,
'" shape=cds style=filled fillcolor=turquoise]',
sep="",
)
self.generate(
" ",
node_name_id,
" -- ",
node_name_id,
"_requirement_",
normalize_name(requirement_name),
sep="",
)
self.generate(" }")
for node_name, node_yaml in node_templates.items():
node_name_id = self.get_node_name_id(node_name)
for requirement in syntax.get_requirements_list(node_yaml):
for requirement_name, requirement_yaml in requirement.items():
# ACK for Alien4Cloud
requirement_name = syntax.get_type_requirement(
requirement_yaml, requirement_name
)
capability_id = target_capability_ids.get(id(requirement))
if capability_id is not None:
self.generate(
" ",
node_name_id,
"_requirement_",
normalize_name(requirement_name),
" -- ",
capability_id,
"[style=dotted]",
sep="",
)
if substitution_mappings is not None:
self.generate(" }")
for (
requirement_name,
requirement_yaml,
) in syntax.get_substitution_mappings_requirements(
substitution_mappings
).items():
if requirement_yaml:
requirement_name_id = normalize_name(requirement_name)
self.generate(
" ",
requirement_name_id,
'[label="',
requirement_name,
'" shape=cds style=filled fillcolor=turquoise]',
sep="",
)
self.generate(
" ",
normalize_name(requirement_yaml[0]),
"_requirement_",
normalize_name(requirement_yaml[1]),
" -- ",
requirement_name_id,
"[style=dotted]",
sep="",
)
self.generate("}")
self.close_file()
``` |
{
"source": "jlcouto/3d-interaction-evaluation",
"score": 3
} |
#### File: SteamVR instructions/triad_openvr-master/triad_openvr.py
```python
import time
import sys
import openvr
import math
import json
# Function to print out text but instead of starting a new line it will overwrite the existing line
def update_text(txt):
sys.stdout.write('\r'+txt)
sys.stdout.flush()
#Convert the standard 3x4 position/rotation matrix to a x,y,z location and the appropriate Euler angles (in degrees)
def convert_to_euler(pose_mat):
yaw = 180 / math.pi * math.atan2(pose_mat[1][0], pose_mat[0][0])
pitch = 180 / math.pi * math.atan2(pose_mat[2][0], pose_mat[0][0])
roll = 180 / math.pi * math.atan2(pose_mat[2][1], pose_mat[2][2])
x = pose_mat[0][3]
y = pose_mat[1][3]
z = pose_mat[2][3]
return [x,y,z,yaw,pitch,roll]
#Convert the standard 3x4 position/rotation matrix to a x,y,z location and the appropriate Quaternion
def convert_to_quaternion(pose_mat):
r_w = math.sqrt(max(0, 1 + pose_mat[0][0] + pose_mat[1][1] + pose_mat[2][2])) * 0.5;
r_x = math.sqrt(max(0, 1 + pose_mat[0][0] - pose_mat[1][1] - pose_mat[2][2])) * 0.5;
r_y = math.sqrt(max(0, 1 - pose_mat[0][0] + pose_mat[1][1] - pose_mat[2][2])) * 0.5;
r_z = math.sqrt(max(0, 1 - pose_mat[0][0] - pose_mat[1][1] + pose_mat[2][2])) * 0.5;
r_x *= 1 if ((r_x * (pose_mat[2][1] - pose_mat[1][2])) >= 0) else -1;
r_y *= 1 if ((r_y * (pose_mat[0][2] - pose_mat[2][0])) >= 0) else -1;
r_z *= 1 if ((r_z * (pose_mat[1][0] - pose_mat[0][1])) >= 0) else -1;
## Per issue #2, adding a abs() so that sqrt only results in real numbers
#r_w = math.sqrt(abs(1+pose_mat[0][0]+pose_mat[1][1]+pose_mat[2][2]))/2
#r_x = (pose_mat[2][1]-pose_mat[1][2])/(4*r_w)
#r_y = (pose_mat[0][2]-pose_mat[2][0])/(4*r_w)
#r_z = (pose_mat[1][0]-pose_mat[0][1])/(4*r_w)
x = pose_mat[0][3]
y = pose_mat[1][3]
z = pose_mat[2][3]
return [x,y,z,r_w,r_x,r_y,r_z]
#Define a class to make it easy to append pose matricies and convert to both Euler and Quaternion for plotting
class pose_sample_buffer():
def __init__(self):
self.i = 0
self.index = []
self.time = []
self.x = []
self.y = []
self.z = []
self.yaw = []
self.pitch = []
self.roll = []
self.r_w = []
self.r_x = []
self.r_y = []
self.r_z = []
def append(self,pose_mat,t):
self.time.append(t)
self.x.append(pose_mat[0][3])
self.y.append(pose_mat[1][3])
self.z.append(pose_mat[2][3])
self.yaw.append(180 / math.pi * math.atan(pose_mat[1][0] /pose_mat[0][0]))
self.pitch.append(180 / math.pi * math.atan(-1 * pose_mat[2][0] / math.sqrt(pow(pose_mat[2][1], 2) + math.pow(pose_mat[2][2], 2))))
self.roll.append(180 / math.pi * math.atan(pose_mat[2][1] /pose_mat[2][2]))
r_w = math.sqrt(abs(1+pose_mat[0][0]+pose_mat[1][1]+pose_mat[2][2]))/2
self.r_w.append(r_w)
self.r_x.append((pose_mat[2][1]-pose_mat[1][2])/(4*r_w))
self.r_y.append((pose_mat[0][2]-pose_mat[2][0])/(4*r_w))
self.r_z.append((pose_mat[1][0]-pose_mat[0][1])/(4*r_w))
class vr_tracked_device():
def __init__(self,vr_obj,index,device_class):
self.device_class = device_class
self.index = index
self.vr = vr_obj
def get_serial(self):
return self.vr.getStringTrackedDeviceProperty(self.index,openvr.Prop_SerialNumber_String).decode('utf-8')
def get_model(self):
return self.vr.getStringTrackedDeviceProperty(self.index,openvr.Prop_ModelNumber_String).decode('utf-8')
def sample(self,num_samples,sample_rate):
interval = 1/sample_rate
rtn = pose_sample_buffer()
sample_start = time.time()
for i in range(num_samples):
start = time.time()
pose = self.vr.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0,openvr.k_unMaxTrackedDeviceCount)
rtn.append(pose[self.index].mDeviceToAbsoluteTracking,time.time()-sample_start)
sleep_time = interval- (time.time()-start)
if sleep_time>0:
time.sleep(sleep_time)
return rtn
def get_pose_euler(self):
pose = self.vr.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0,openvr.k_unMaxTrackedDeviceCount)
return convert_to_euler(pose[self.index].mDeviceToAbsoluteTracking)
def get_pose_quaternion(self):
pose = self.vr.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0,openvr.k_unMaxTrackedDeviceCount)
return convert_to_quaternion(pose[self.index].mDeviceToAbsoluteTracking)
def controller_state_to_dict(self, pControllerState):
# This function is graciously borrowed from https://gist.github.com/awesomebytes/75daab3adb62b331f21ecf3a03b3ab46
# docs: https://github.com/ValveSoftware/openvr/wiki/IVRSystem::GetControllerState
d = {}
d['unPacketNum'] = pControllerState.unPacketNum
# on trigger .y is always 0.0 says the docs
d['trigger'] = pControllerState.rAxis[1].x
# 0.0 on trigger is fully released
# -1.0 to 1.0 on joystick and trackpads
d['trackpad_x'] = pControllerState.rAxis[0].x
d['trackpad_y'] = pControllerState.rAxis[0].y
# These are published and always 0.0
# for i in range(2, 5):
# d['unknowns_' + str(i) + '_x'] = pControllerState.rAxis[i].x
# d['unknowns_' + str(i) + '_y'] = pControllerState.rAxis[i].y
d['ulButtonPressed'] = pControllerState.ulButtonPressed
d['ulButtonTouched'] = pControllerState.ulButtonTouched
# To make easier to understand what is going on
# Second bit marks menu button
d['menu_button'] = bool(pControllerState.ulButtonPressed >> 1 & 1)
# 32 bit marks trackpad
d['trackpad_pressed'] = bool(pControllerState.ulButtonPressed >> 32 & 1)
d['trackpad_touched'] = bool(pControllerState.ulButtonTouched >> 32 & 1)
# third bit marks grip button
d['grip_button'] = bool(pControllerState.ulButtonPressed >> 2 & 1)
# System button can't be read, if you press it
# the controllers stop reporting
return d
def get_controller_inputs(self):
result, state = self.vr.getControllerState(self.index)
return self.controller_state_to_dict(state)
class vr_tracking_reference(vr_tracked_device):
def get_mode(self):
return self.vr.getStringTrackedDeviceProperty(self.index,openvr.Prop_ModeLabel_String).decode('utf-8').upper()
def sample(self,num_samples,sample_rate):
print("Warning: Tracking References do not move, sample isn't much use...")
class triad_openvr():
def __init__(self):
# Initialize OpenVR in the
self.vr = openvr.init(openvr.VRApplication_Other)
# Initializing object to hold indexes for various tracked objects
self.object_names = {"Tracking Reference":[],"HMD":[],"Controller":[],"Tracker":[]}
self.devices = {}
poses = self.vr.getDeviceToAbsoluteTrackingPose(openvr.TrackingUniverseStanding, 0,
openvr.k_unMaxTrackedDeviceCount)
# Loading config file
self.config = None
try:
with open('config.json') as json_data:
self.config = json.load(json_data)
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
print('config.json not found, arbitrary id will be chosen.')
if self.config != None:
# Iterate through the pose list to find the active devices and determine their type
for i in range(openvr.k_unMaxTrackedDeviceCount):
if poses[i].bPoseIsValid:
device_serial = self.vr.getStringTrackedDeviceProperty(i,openvr.Prop_SerialNumber_String).decode('utf-8')
for device in self.config['devices']:
if device_serial == device['serial']:
device_name = device['name']
self.object_names[device['type']].append(device_name)
self.devices[device_name] = vr_tracked_device(self.vr,i,device['type'])
else:
# Iterate through the pose list to find the active devices and determine their type
for i in range(openvr.k_unMaxTrackedDeviceCount):
if poses[i].bPoseIsValid:
device_class = self.vr.getTrackedDeviceClass(i)
if (device_class == openvr.TrackedDeviceClass_Controller):
device_name = "controller_"+str(len(self.object_names["Controller"])+1)
self.object_names["Controller"].append(device_name)
self.devices[device_name] = vr_tracked_device(self.vr,i,"Controller")
elif (device_class == openvr.TrackedDeviceClass_HMD):
device_name = "hmd_"+str(len(self.object_names["HMD"])+1)
self.object_names["HMD"].append(device_name)
self.devices[device_name] = vr_tracked_device(self.vr,i,"HMD")
elif (device_class == openvr.TrackedDeviceClass_GenericTracker):
device_name = "tracker_"+str(len(self.object_names["Tracker"])+1)
self.object_names["Tracker"].append(device_name)
self.devices[device_name] = vr_tracked_device(self.vr,i,"Tracker")
elif (device_class == openvr.TrackedDeviceClass_TrackingReference):
device_name = "tracking_reference_"+str(len(self.object_names["Tracking Reference"])+1)
self.object_names["Tracking Reference"].append(device_name)
self.devices[device_name] = vr_tracking_reference(self.vr,i,"Tracking Reference")
def rename_device(self,old_device_name,new_device_name):
self.devices[new_device_name] = self.devices.pop(old_device_name)
for i in range(len(self.object_names[self.devices[new_device_name].device_class])):
if self.object_names[self.devices[new_device_name].device_class][i] == old_device_name:
self.object_names[self.devices[new_device_name].device_class][i] = new_device_name
def print_discovered_objects(self):
for device_type in self.object_names:
plural = device_type
if len(self.object_names[device_type])!=1:
plural+="s"
print("Found "+str(len(self.object_names[device_type]))+" "+plural)
for device in self.object_names[device_type]:
if device_type == "Tracking Reference":
print(" "+device+" ("+self.devices[device].get_serial()+
", Mode "+self.devices[device].get_model()+
", "+self.devices[device].get_model()+
")")
else:
print(" "+device+" ("+self.devices[device].get_serial()+
", "+self.devices[device].get_model()+")")
``` |
{
"source": "jlcrodrigues/Hangman",
"score": 3
} |
#### File: Hangman/src/bar.py
```python
import pygame
from config import *
class Bar():
def __init__(self, coords, length, pos):
'''
@text - What's displayed by the button.
@coords - Button's position. [x,y]
@length - Bar's length.
@pos - The relative position of the button to the bar.
'''
self.length = length
self.pos = pos
self.coords = coords
self.pointing = False
self.held = False
self.hitbox = [self.coords[0], self.coords[0] + length, self.coords[1], self.coords[1] + 40]
self.volume = pos
def render(self, win, dark_theme):
'''Renders the button on the screen.
@win - The game window.
@dark_theme - True if datk theme is on.'''
if dark_theme:
bar = pygame.image.load("../assets/images/bar.png")
if not self.pointing: bar_button = pygame.image.load("../assets/images/bar_button.png")
else: bar_button = pygame.image.load("../assets/images/bar_button_point.png")
else:
bar = pygame.image.load("../assets/images/bar_light.png")
if not self.pointing: bar_button = pygame.image.load("../assets/images/bar_button_light.png")
else: bar_button = pygame.image.load("../assets/images/bar_button_point.png")
win.blit(bar, self.coords)
win.blit(bar_button, (self.coords[0] + int(self.pos * self.length), self.coords[1]))
def allign_right(self, distance, width):
'''Alligns the button to the right.
@distance - Distance to the right border.
@width - The window's width.'''
self.coords[0] = width - distance - self.length
self.hitbox[0] = self.coords[0]
self.hitbox[1] = self.hitbox[0] + self.length
def set_volume(self, volume):
'''Sets the button's volume.
@volume - The new volume.
'''
self.volume = volume
def drag(self, mouse_pos, mouse_down):
'''Holds the logic for when the button is dragged.
@pos - Mouse's Coordinates.
@mouse_down - True if the mouse if being pressed.
'''
pygame.mixer.init()
button_point = pygame.mixer.Sound("../assets/sounds/button_point.mp3")
button_point.set_volume(self.volume)
button_click = pygame.mixer.Sound("../assets/sounds/button_click.mp3")
button_click.set_volume(self.volume)
if mouse_pos[0] > self.hitbox[0] and mouse_pos[0] < self.hitbox[1]: #clicked in the button
if mouse_pos[1] > self.hitbox[2] and mouse_pos[1] < self.hitbox[3]:
if not self.pointing and not self.held: pygame.mixer.Sound.play(button_point)
self.pointing = True
else: self.pointing = False
else: self.pointing = False
if self.pointing and mouse_down:
if not self.held: pygame.mixer.Sound.play(button_click)
self.held = True
self.held = mouse_down
if self.held and self.pointing:
if mouse_pos[0] <= self.coords[0]: self.pos = 0.0
elif mouse_pos[0] >= self.coords[0] + self.length: self.pos = 1.0
else: self.pos = (mouse_pos[0] - self.coords[0]) / self.length
```
#### File: Hangman/src/game.py
```python
import pygame
from word import Word
from hangman import Hangman
from config import *
from button import Button
from bar import Bar
class Game:
def __init__(self):
#Current displaying window booleans
self.menu = True
self.settings = False
self.playing = False
self.help = False
self.pre_play = False
#Game logic
self.player_text = "" # current input from player
self.used_letters = []
self.over = False
self.streak = 0
with open("../assets/stats/balance.txt", "r") as input_file:
self.balance = int(input_file.readlines()[0])
#Window and display
self.width = SCREEN_WIDTH
self.height = SCREEN_HEIGHT
self.volume_sfx = 0.5
self.volume_music = 0.5
self.dark_theme = True
self.language = "english"
self.key_words = EN_DIC # holds all key words depending on the active language
self.theme = "all"
self.themes = ["all", "animals", "capitals", "countries", "hardw"]
self.difficulty = "normal"
self.difficulties = ["easy", "normal", "hard"]
self.images = {}
#Buttons and bars
self.play_button = Button("play", [200, 400], LETTER_SIZE)
self.start_button = Button("start", [200, 500], LETTER_SIZE)
self.return_button = Button(" <", [0, 0], LETTER_SIZE)
self.restart_button = Button("../assets/images/restart.png", [50, 5], LETTER_SIZE, True)
self.settings_button = Button("settings", [200, 450], LETTER_SIZE)
self.help_button = Button("help", [200, 500], LETTER_SIZE)
self.pt_button = Button("PT", [self.width - 100, 200], LETTER_SIZE2)
self.en_button = Button("EN", [self.width - 200, 200], LETTER_SIZE2)
self.theme_button = Button("ON", [self.width - 100, 300], LETTER_SIZE2)
self.right_button1 = Button(">", [0, 0], LETTER_SIZE)
self.right_button2 = Button(">", [0, 0], LETTER_SIZE)
self.left_button1 = Button("<", [0, 0], LETTER_SIZE)
self.left_button2 = Button("<", [0, 0], LETTER_SIZE)
self.aid_button = Button("?", [0, 0], LETTER_SIZE)
self.sfx_bar = Bar([400, 500], 150, 0.5)
self.music_bar = Bar([400, 400], 150, 0.5)
self.buttons = [self.play_button, self.return_button, self.restart_button,
self.settings_button, self.help_button, self.pt_button,
self.en_button, self.start_button, self.right_button1,
self.right_button2, self.left_button1, self.left_button2,
self.aid_button]
self.en_button.press()
#Sounds
pygame.mixer.init()
self.music_playing = False
self.winning_sound = pygame.mixer.Sound("../assets/sounds/win.mp3")
self.lose_sound = pygame.mixer.Sound("../assets/sounds/lose.mp3")
pygame.mixer.music.load("../assets/sounds/menu.mp3")
self.game_over_played = False
def start(self):
'''Starts the game.'''
self.word = Word(self.theme, self.language, self.themes)
self.menu = False
self.playing = True
self.hangman = Hangman()
self.over = False
self.game_over_played = False
if self.difficulty == "easy":
for _ in range(int(self.word.length / 5)):
self.word.solve_letter()
#print(self.word.letters) #print the solution
def update_buttons(self):
'''Changes all the buttons display to the current language (if needed).'''
self.buttons = [self.play_button, self.return_button, self.restart_button,
self.settings_button, self.help_button, self.pt_button,
self.en_button, self.start_button, self.right_button1,
self.right_button2, self.left_button1, self.left_button2,
self.aid_button]
for i in self.buttons:
i.set_text(self.key_words[i.text], self.dark_theme)
i.set_volume(self.volume_sfx)
def write_stats(self):
'''Updates the current balance to memory.'''
with open("../assets/stats/balance.txt", "w") as input_file:
input_file.write(str(self.balance))
def get_images(self):
'''Updates the images according to the selected theme.'''
if self.dark_theme:
self.images["menu"] = pygame.image.load(
"../assets/images/menu.png")
self.images["1"] = pygame.image.load(
"../assets/images/hangman1.png")
self.images["2"] = pygame.image.load(
"../assets/images/hangman2.png")
self.images["3"] = pygame.image.load(
"../assets/images/hangman3.png")
self.images["4"] = pygame.image.load(
"../assets/images/hangman4.png")
self.images["5"] = pygame.image.load(
"../assets/images/hangman5.png")
self.images["6"] = pygame.image.load(
"../assets/images/hangman6.png")
self.images["7"] = pygame.image.load(
"../assets/images/hangman7.png")
self.images["8"] = pygame.image.load(
"../assets/images/hangman8.png")
self.images["help_english"] = pygame.image.load(
"../assets/images/help_english.png")
self.images["help_portuguese"] = pygame.image.load(
"../assets/images/help_portuguese.png")
else:
self.images["menu"] = pygame.image.load(
"../assets/images/menu_light.png")
self.images["1"] = pygame.image.load(
"../assets/images/hangman1_light.png")
self.images["2"] = pygame.image.load(
"../assets/images/hangman2_light.png")
self.images["3"] = pygame.image.load(
"../assets/images/hangman3_light.png")
self.images["4"] = pygame.image.load(
"../assets/images/hangman4_light.png")
self.images["5"] = pygame.image.load(
"../assets/images/hangman5_light.png")
self.images["6"] = pygame.image.load(
"../assets/images/hangman6_light.png")
self.images["7"] = pygame.image.load(
"../assets/images/hangman7_light.png")
self.images["8"] = pygame.image.load(
"../assets/images/hangman8_light.png")
self.images["help_english"] = pygame.image.load(
"../assets/images/help_english_light.png")
self.images["help_portuguese"] = pygame.image.load(
"../assets/images/help_portuguese_light.png")
def render_menu(self, win):
'''Renders the menu tab.
@win - The game window.
'''
menu_img = self.images["menu"]
win.blit(menu_img, (self.width / 2 - 300, self.height / 2 - 300))
# menu play button
self.play_button.center(self.width)
self.play_button.set_y(self.height / 2 + 100)
self.play_button.render(win)
# menu settings button
self.settings_button.center(self.width)
self.settings_button.set_y(self.height / 2 + 150)
self.settings_button.render(win)
# menu help button
self.help_button.center(self.width)
self.help_button.set_y(self.height / 2 + 200)
self.help_button.render(win)
def render_help(self, win):
'''Renders the help tab.
@win - The game window.
'''
font = pygame.font.Font(FONT_NAME, LETTER_SIZE)
image = self.images["help_%s" % (str(self.language))]
win.blit(image, (self.width / 2 - 300, self.height / 2 - 400))
self.return_button.render(win)
# Render the tab title
text = font.render(self.key_words["help"], True, WHITE)
win.blit(text, (self.width / 2 - text.get_width() / 2, 0))
def render_settings(self, win):
'''Renders the settings tab.
@win - The game window.
'''
font = pygame.font.Font(FONT_NAME, LETTER_SIZE)
font2 = pygame.font.Font(FONT_NAME, LETTER_SIZE2)
# Render the tab title
if self.dark_theme: text = font.render(self.key_words["settings"], True, WHITE)
else: text = font.render(self.key_words["settings"], True, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, 0))
# Render the language options
if self.dark_theme: text = font2.render(self.key_words["language"], True, WHITE)
else: text = font2.render(self.key_words["language"], True, BLACK)
win.blit(text, (50, 200))
self.pt_button.allign_right(50, self.width)
self.pt_button.render(win)
self.en_button.set_x(self.pt_button.coords[0] - 100)
self.en_button.render(win)
if self.dark_theme: text = font2.render(self.key_words["dark mode"], True, WHITE)
else: text = font2.render(self.key_words["dark mode"], True, BLACK)
win.blit(text, (50, 300))
self.theme_button.allign_right(50, self.width)
self.theme_button.render(win)
if self.dark_theme: text = font2.render(self.key_words["music"], True, WHITE)
else: text = font2.render(self.key_words["music"], True, BLACK)
win.blit(text, (50, 400))
self.music_bar.allign_right(50, self.width)
self.music_bar.render(win, self.dark_theme)
if self.dark_theme: text = font2.render(self.key_words["sfx"], True, WHITE)
else: text = font2.render(self.key_words["sfx"], True, BLACK)
win.blit(text, (50, 500))
self.sfx_bar.allign_right(50, self.width)
self.sfx_bar.render(win, self.dark_theme)
def render_playing(self, win):
'''Renders the playing tab.
@win - The game window.
'''
font = pygame.font.Font(FONT_NAME, LETTER_SIZE)
#####Render the alphabet in the bottom#####
pos_x = self.width / 2 - LETTER_SIZE * (len(ALPHABET) / 4) + 20
#the extra 20 are added because each letter only fills half of the
#available size which would leave space to the right
pos_y = self.height - 100
for i in ALPHABET:
if self.dark_theme: text = font.render(i, 1, WHITE)
else: text = font.render(i, 1, BLACK)
if i in self.word.used_letters or i in self.word.filled_letters:
if self.dark_theme: text = font.render(i, 1, BLACK)
else: text = font.render(i, 1, WHITE)
win.blit(text, (pos_x - (text.get_width() / 2), pos_y))
pos_x += LETTER_SIZE
if i == 'm':
pos_y += LETTER_SIZE + 1
pos_x = self.width / 2 - \
LETTER_SIZE * (len(ALPHABET) / 4) + 20
######Draw the hangman#####
self.hangman.draw(win, self.images, self.width)
#####Draw the playing word#####
self.word.draw(win, self.dark_theme, self.width, self.height)
#####Display game over messages#####
if self.hangman.state == 8:
if self.dark_theme: text = font.render(self.key_words["lost"], True, WHITE)
else: text = font.render(self.key_words["lost"], True, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 + 60))
elif not '_' in self.word.filled_letters:
if self.dark_theme: text = font.render(self.key_words["won"], 1, WHITE)
else: text = font.render(self.key_words["won"], 1, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 + 60))
#####Render buttons#####
self.restart_button.render(win)
self.aid_button.allign_right(20, self.width)
self.aid_button.set_y(self.height / 2 - 160)
self.aid_button.render(win)
#####Render the streak#####
if self.streak > 0:
if self.dark_theme: text = font.render(str(self.streak), True, WHITE)
else: text = font.render(str(self.streak), True, BLACK)
win.blit(text, (20, self.height / 2 - 160))
#####Render the balance#####
if self.dark_theme: text = font.render(str(self.balance), True, WHITE)
else: text = font.render(str(self.balance), True, BLACK)
win.blit(text, (self.width - len(str(self.balance)) * 20 - 20, 0))
def render_pre_play(self, win):
'''Renders the pre_play tab.
@win - The game window.
'''
font = pygame.font.Font(FONT_NAME, LETTER_SIZE)
font2 = pygame.font.Font(FONT_NAME, LETTER_SIZE2)
# Render the theme options
if self.dark_theme: text = font.render(self.key_words["theme"], True, WHITE)
else: text = font.render(self.key_words["theme"], True, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 - 200))
if self.dark_theme: text = font2.render(self.key_words[self.theme], True, WHITE)
else: text = font2.render(self.key_words[self.theme], True, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 - 140))
self.right_button1.set_x(self.width / 2 + 150)
self.right_button1.set_y(self.height / 2 - 140)
self.right_button1.render(win)
self.left_button1.set_x(self.width / 2 - 150)
self.left_button1.set_y(self.height / 2 - 140)
self.left_button1.render(win)
# Render the difficulty options
if self.dark_theme: text = font.render(self.key_words["difficulty"], True, WHITE)
else: text = font.render(self.key_words["difficulty"], True, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 - 40))
if self.dark_theme: text = font2.render(self.key_words[self.difficulty], True, WHITE)
else: text = font2.render(self.key_words[self.difficulty], True, BLACK)
win.blit(text, (self.width / 2 - text.get_width() / 2, self.height / 2 + 20))
self.right_button2.set_x(self.width / 2 + 150)
self.right_button2.set_y(self.height / 2 + 20)
self.right_button2.render(win)
self.left_button2.set_x(self.width / 2 - 150)
self.left_button2.set_y(self.height / 2 + 20)
self.left_button2.render(win)
self.start_button.center(self.width)
self.start_button.set_y(self.height - 100)
self.start_button.render(win)
def render(self, win):
'''Renders the current tab.
@win - The game window.
'''
self.width = pygame.display.get_surface().get_width()
self.height = pygame.display.get_surface().get_height()
self.update_buttons()
self.get_images()
win.fill(BLACK)
if not self.dark_theme: win.fill(WHITE)
if self.menu:
self.render_menu(win)
elif self.help:
self.render_help(win)
else:
win.fill(BLACK)
if not self.dark_theme: win.fill(WHITE)
self.return_button.render(win)
if self.settings:
self.render_settings(win)
elif self.playing:
self.render_playing(win)
elif self.pre_play:
self.render_pre_play(win)
pygame.display.update()
def handle_envents(self):
'''Handles key and mouse presses.
@return - False if the exit button was pressed else True.
'''
mouse = pygame.mouse.get_pos()
# print(mouse)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.write_stats()
return False
elif event.type == pygame.VIDEORESIZE:
width, height = event.size
if width < 600:
width = 600
if height < 600:
height = 600
win = pygame.display.set_mode((width,height), pygame.RESIZABLE)
click = pygame.mouse.get_pressed()
if self.menu:
self.play_button.click(mouse, click[0])
self.settings_button.click(mouse, click[0])
self.help_button.click(mouse, click[0])
else:
self.return_button.click(mouse, click[0])
if self.settings:
self.pt_button.click(mouse, click[0])
self.en_button.click(mouse, click[0])
self.theme_button.click(mouse, click[0])
self.sfx_bar.drag(mouse, click[0])
self.music_bar.drag(mouse, click[0])
if self.playing:
self.restart_button.click(mouse, click[0])
self.aid_button.click(mouse, click[0])
if self.pre_play:
self.start_button.click(mouse, click[0])
self.right_button1.click(mouse, click[0])
self.right_button2.click(mouse, click[0])
self.left_button1.click(mouse, click[0])
self.left_button2.click(mouse, click[0])
if event.type == pygame.KEYDOWN:
if self.playing:
if len(self.player_text) < 1:
self.player_text = pygame.key.name(event.key).lower()
if self.player_text not in ALPHABET:
self.player_text = ""
if event.key == pygame.K_RETURN:
if not self.over: self.streak = 0
self.start()
return True
def run_logic(self):
'''Executes the game logic.'''
if self.menu:
# start the game button
if self.play_button.clicked:
self.play_button.clicked = False
self.menu = False
self.pre_play = True
# settings button
if self.settings_button.clicked:
self.settings_button.clicked = False
self.menu = False
self.settings = True
if self.help_button.clicked:
self.help_button.clicked = False
self.menu = False
self.help = True
else:
# return to menu button
if self.return_button.clicked:
if self.playing: pygame.mixer.music.stop()
self.return_button.clicked = False
self.menu = True
self.playing = False
self.settings = False
self.help = False
if self.settings:
if self.language == "english":
self.en_button.press()
else:
self.pt_button.press()
if self.pt_button.clicked:
self.language = "portuguese"
self.key_words = PT_DIC
if self.en_button.clicked:
self.language = "english"
self.key_words = EN_DIC
if self.theme_button.check():
if self.dark_theme:
self.theme_button.set_text("OFF", self.dark_theme)
self.dark_theme = False
else:
self.theme_button.set_text("ON", self.dark_theme)
self.dark_theme = True
self.volume_sfx = self.sfx_bar.pos
self.sfx_bar.set_volume(self.sfx_bar.pos)
self.volume_music = self.music_bar.pos
if self.playing:
# restart button
if self.restart_button.check():
if not self.over: self.streak = 0
self.start()
if self.aid_button.check():
if self.balance >= 5:
self.balance -= 5
self.word.solve_letter()
# handling the guessing
if len(self.player_text) == 1 and not self.over:
if not self.word.fill(self.player_text):
self.used_letters.append(self.player_text)
if self.hangman.state < 8:
self.hangman.state += 1
if self.difficulty == "hard": self.hangman.state += 1
self.player_text = ""
if self.hangman.state == 8 :
self.over = True
self.streak = 0
self.word.solve()
if not '_' in self.word.filled_letters:
self.over = True
if not self.game_over_played:
self.streak += 1
self.balance += int(self.streak * 0.5)
if self.pre_play:
if self.start_button.check():
self.pre_play = False
pygame.mixer.music.stop()
self.start()
if self.right_button1.check():
self.theme = self.themes[min(len(self.themes) - 1, self.themes.index(self.theme) + 1)]
if self.left_button1.check():
self.theme = self.themes[max(0, self.themes.index(self.theme) - 1)]
if self.right_button2.check():
self.difficulty = self.difficulties[min(len(self.difficulties) - 1, self.difficulties.index(self.difficulty) + 1)]
if self.left_button2.check():
self.difficulty = self.difficulties[max(0, self.difficulties.index(self.difficulty) - 1)]
def play_sounds(self):
'''Plays the game's sounds.'''
pygame.mixer.music.set_volume(self.volume_music)
if self.playing:
if not pygame.mixer.music.get_busy():
pygame.mixer.music.load("../assets/sounds/play.mp3")
pygame.mixer.music.play(-1)
if not self.game_over_played:
if self.hangman.state == 8:
self.winning_sound.set_volume(self.volume_sfx)
pygame.mixer.Sound.play(self.winning_sound)
self.game_over_played = True
elif not '_' in self.word.filled_letters:
self.lose_sound.set_volume(self.volume_sfx)
pygame.mixer.Sound.play(self.lose_sound)
self.game_over_played = True
else:
if not pygame.mixer.music.get_busy():
pygame.mixer.music.load("../assets/sounds/menu.mp3")
pygame.mixer.music.play(-1)
```
#### File: Hangman/src/main.py
```python
from game import Game
from config import *
import pygame
def main():
pygame.init()
win = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), pygame.RESIZABLE)
pygame.display.set_caption("Hangman")
icon = pygame.image.load("../assets/images/icon.png")
pygame.display.set_icon(icon)
clock = pygame.time.Clock()
game = Game()
run = True
while run:
clock.tick(60)
# Render everything to the screen
game.render(win)
# Handle key presses and check if the window was closed
run = game.handle_envents()
# Execute all the game logic
game.run_logic()
#Play the sounds
game.play_sounds()
pygame.quit()
if __name__ == '__main__':
main()
``` |
{
"source": "jld23/mediapipe",
"score": 3
} |
#### File: mediapipe/tests/cv_cuda.py
```python
import timeit
from pathlib import Path
import cv2
count = cv2.cuda.getCudaEnabledDeviceCount()
print("GPUs found:{}".format(count))
video = Path('AV9_14.MOV')
def run():
video_capture = cv2.VideoCapture(str(video))
last_ret = None
last_frame = None
while video_capture.get(cv2.CAP_PROP_POS_FRAMES) < video_capture.get(cv2.CAP_PROP_FRAME_COUNT):
ret, frame = video_capture.read()
if last_ret is None or last_ret != ret:
last_ret = ret
print("Return code:{}".format(ret))
if last_frame is None or last_frame.shape != frame.shape:
last_frame = frame
print("Frame Shape:{}".format(frame.shape))
timer = timeit.timeit('run()', number=10, setup="from __main__ import run")
print("time to read video 10x :{}".format(timer))
``` |
{
"source": "jld23/python-dlpy",
"score": 2
} |
#### File: dlpy/model_conversion/onnx_graph.py
```python
import onnx
from onnx import helper, numpy_helper, mapping
from onnx import NodeProto
def _convert_onnx_attribute_proto(attr_proto):
'''
Convert ONNX AttributeProto into Python object
'''
if attr_proto.HasField('f'):
return attr_proto.f
elif attr_proto.HasField('i'):
return attr_proto.i
elif attr_proto.HasField('s'):
return str(attr_proto.s, 'utf-8')
elif attr_proto.HasField('t'):
return attr_proto.t # this is a proto!
elif attr_proto.floats:
return list(attr_proto.floats)
elif attr_proto.ints:
return list(attr_proto.ints)
elif attr_proto.strings:
str_list = list(attr_proto.strings)
str_list = list(map(lambda x: str(x, 'utf-8'), str_list))
return str_list
else:
raise ValueError("Unsupported ONNX attribute: {}".format(attr_proto))
class OnnxNode(object):
'''
Reimplementation of NodeProto from ONNX, but in a form
more convenient to work with from Python.
'''
def __init__(self, node):
'''
Create OnnxNode from NodeProto
Parameters
----------
node : NodeProto
Returns
-------
:class:`OnnxNode` object
'''
self.name = str(node.name)
self.op_type = str(node.op_type)
self.domain = str(node.domain)
self.attrs = dict([(attr.name,
_convert_onnx_attribute_proto(attr))
for attr in node.attribute])
self.input = list(node.input)
self.output = list(node.output)
self.node_proto = node
self.parents = []
self.children = []
self.tensors = {}
def add_child(self, child):
'''
Add child node
Parameters
----------
child : :class:`OnnxNode` object
'''
if not isinstance(child, (tuple, list)):
child = [child]
child = list(filter(lambda x: x not in self.children, child))
self.children.extend(child)
for c in child:
if self not in c.parents:
c.add_parent(self)
def add_parent(self, parent):
'''
Add OnnxNode parent
Parameters
----------
parent : :class:`OnnxNode` object
'''
if not isinstance(parent, (tuple, list)):
parent = [parent]
parent = list(filter(lambda x: x not in self.parents, parent))
self.parents.extend(parent)
for p in parent:
if self not in p.children:
p.add_child(self)
class OnnxGraph(object):
'''
Helper class for holding ONNX graph
Parameters
----------
graph_def : GraphProto
Returns
-------
:class:`OnnxGraph` object
'''
def __init__(self, graph_def):
self.name = graph_def.name
self.node = [OnnxNode(n) for n in graph_def.node]
self.value_info = list(graph_def.value_info)
self.input = list(graph_def.input)
self.output = list(graph_def.output)
self.initializer = list(graph_def.initializer)
self.tensor_dict = dict([(init.name, numpy_helper.to_array(init))
for init in graph_def.initializer])
self.uninitialized = [i for i in graph_def.input
if i.name not in self.tensor_dict]
def get_node(self, name):
'''
Get node by name
Parameters
----------
name : str
Name of the node.
Returns
-------
:class:`OnnxNode` object if node is in graph, otherwise None
'''
for n in self.node:
if n.name == name:
return n
return None
def get_node_index(self, name):
'''
Get index of node
Parameters
----------
name : str
Name of the node.
Returns
-------
int if node is in graph, otherwise None
'''
for idx, n in enumerate(self.node):
if n.name == name:
return idx
return None
def remove_node(self, name):
'''
Remove node from graph
Parameters
----------
name : str
Name of node to be removed.
'''
self.node = list(filter(lambda x: x.name != name, self.node))
self.connect_nodes()
def replace_node(self, name, node):
'''
Replace node in graph
Parameters
----------
name : str
Name of node to be replaced.
node : :class:`OnnxNode` object
The replacement node.
'''
idx = self.get_node_index(name)
if idx is not None:
self.node[idx] = node
self.connect_nodes()
def insert_node(self, name, node):
'''
Insert node in graph after named node
Parameters
----------
name : str
Name of the node to insert `node` after.
node : :class:`OnnxNode` object
The node to insert.
'''
idx = self.get_node_index(name)
if idx is not None:
self.node.insert(idx+1, node)
self.connect_nodes()
def get_input(self, name):
'''
Get graph input ValueInfoProto
Parameters
----------
name : str
Name of the ValueInfoProto.
Returns
-------
:class:`ValueInfoProto` object, or None if not present.
'''
for i in self.input:
if i.name == name:
return i
return None
def add_input(self, value_info):
'''
Add new graph input ValueInfoProto
Parameters
----------
value_info : :class:`ValueInfoProto` object
ValueInfoProto to add to graph input.
'''
if not isinstance(value_info, (list, tuple)):
value_info = [value_info]
self.input.extend(value_info)
def replace_input(self, name, value_info):
'''
Replace a graph input ValueInfoProto
Parameters
----------
name : str
Name of ValueInfoProto to be replaced.
value_info : :class:`ValueInfoProto` object
The replacement ValueInfoProto.
'''
for idx, proto in enumerate(self.input):
if proto.name == name:
self.input[idx] = value_info
def get_initializer(self, name):
'''
Get TensorProto from initializer
Parameters
----------
name : str
Name of the TensorProto.
Returns
-------
:class:`TensorProto` object, or None if not present.
'''
for i in self.initializer:
if i.name == name:
return i
return None
def add_initializer(self, init):
'''
Add TensorProto to initializer
Parameters
----------
init : :class:`TensorProto` object
TensorProto to add to initializer.
'''
if not isinstance(init, (list, tuple)):
init = [init]
self.initializer.extend(init)
def replace_initializer(self, name, init):
'''
Replace TensorProto in initializer
Parameters
----------
name : str
Name of TensorProto to be replaced.
init : :class:`TensorProto` object
The replacement TensorProto.
'''
for idx, proto in enumerate(self.initializer):
if proto.name == name:
self.initializer[idx] = init
def clean_init(self):
''' Remove inputs, initializers which are not part of graph '''
all_inputs = [i for n in self.node for i in n.input]
self.input = list(filter(lambda x: x.name in all_inputs,
self.input))
self.initializer = list(filter(lambda x: x.name in all_inputs,
self.initializer))
self.tensor_dict = {k:v for k,v in self.tensor_dict.items()
if k in all_inputs}
def connect_nodes(self):
''' Add parents and children for each node '''
# mapping from input to nodes
input_to_node = {}
for node in self.node:
# reset any existing links
node.parents = []
node.children = []
for input_ in node.input:
if input_to_node.get(input_) is None:
input_to_node[input_] = []
if node not in input_to_node[input_]:
input_to_node[input_].append(node)
for node in self.node:
for output_ in node.output:
if not input_to_node.get(output_):
continue
node.add_child(input_to_node[output_])
def make_onnx(self):
''' Generate ONNX model from current graph '''
self.clean_init()
nodes = []
for node in self.node:
n = NodeProto()
n.input.extend(node.input)
n.output.extend(node.output)
n.name = node.name
n.op_type = node.op_type
n.attribute.extend(
helper.make_attribute(key, value)
for key, value in sorted(node.attrs.items())
)
nodes.append(n)
inputs = []
initializer = []
for k,v in self.tensor_dict.items():
init = numpy_helper.from_array(v, name=k)
initializer.append(init)
value_info = helper.make_tensor_value_info(
name=k,
elem_type=mapping.NP_TYPE_TO_TENSOR_TYPE[v.dtype],
shape=list(v.shape)
)
inputs.append(value_info)
graph_ = helper.make_graph(
nodes=nodes,
name='dlpy_graph',
inputs=inputs+self.uninitialized,
outputs=self.output,
initializer=initializer
)
model = helper.make_model(graph_)
return model
@classmethod
def from_onnx(cls, graph):
''' Create a OnnxGraph object from ONNX GraphProto '''
graph_ = cls(graph)
# generate names for nodes
for idx, node in enumerate(graph_.node):
if not node.name:
node.name = '{}_{}'.format(node.op_type, idx)
elif '/' in node.name:
node.name.replace('/', '_')
graph_.connect_nodes()
# add initialized tensors to nodes
for node in graph_.node:
for input_ in node.input:
if input_ in graph_.tensor_dict:
node.tensors[input_] = graph_.tensor_dict[input_]
return graph_
```
#### File: python-dlpy/dlpy/model.py
```python
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import collections
import sys
from .utils import image_blocksize, unify_keys, input_table_check, random_name, check_caslib, caslibify
from .utils import filter_by_image_id, filter_by_filename, isnotebook
from dlpy.timeseries import TimeseriesTable
from dlpy.timeseries import _get_first_obs, _get_last_obs, _combine_table, _prepare_next_input
from dlpy.utils import DLPyError, Box, DLPyDict
from dlpy.lr_scheduler import _LRScheduler, FixedLR, StepLR, FCMPLR
from dlpy.network import Network
class Model(Network):
valid_res = None
feature_maps = None
valid_conf_mat = None
valid_score = None
n_epochs = 0
training_history = None
model_explain_table = None
valid_res_tbl = None
model_ever_trained = False
train_tbl = None
valid_tbl = None
score_message_level = 'note'
def change_labels(self, label_file, id_column, label_column):
'''
Overrides the labels already in the model
The label_file should be a csv file that has two columns: 1) id
column that contains ids starting from 0 and 2) label column that
contains the labels. This file should also have header columns
and those should be passed to this function (i.e., id_column and
label_column)
Parameters
----------
label_file : string
Specifies the name of the file that contains the new labels.
id_column : string
Specifies the name of the id column in label_file.
label_column : string
Specifies the name of the label column in label file.
'''
if self.model_weights is not None:
temp_name = random_name('new_label_table', 6)
temp_model_name = random_name('new_weights_table', 6)
labels = pd.read_csv(label_file, skipinitialspace=True, index_col=False)
self.conn.upload_frame(labels, casout=dict(name=temp_name, replace=True),
importoptions={'vars':[
{'name': id_column, 'type': 'int64'},
{'name': label_column, 'type': 'char', 'length': 20}
]})
rt = self._retrieve_('deeplearn.dllabeltarget', initWeights=self.model_weights,
modelTable=self.model_table, modelWeights=temp_model_name,
labelTable=temp_name)
if rt.severity == 0:
self.model_weights = self.conn.CASTable(temp_model_name)
else:
for m in rt.messages:
print(m)
raise DLPyError('Seems like something went well while changing the labels')
else:
raise DLPyError('We do not have any weights yet')
def get_model_info(self):
'''
Return the information about the model table
Returns
-------
:class:`CASResults`
'''
return self._retrieve_('deeplearn.modelinfo', modelTable=self.model_table)
def fit(self, data, inputs=None, target=None, data_specs=None, mini_batch_size=1, max_epochs=5, log_level=3,
lr=0.01, optimizer=None, nominals=None, texts=None, target_sequence=None, sequence=None, text_parms=None,
valid_table=None, valid_freq=1, gpu=None, attributes=None, weight=None, seed=0, record_seed=0,
missing='mean', target_missing='mean', repeat_weight_table=False, force_equal_padding=None,
save_best_weights=False, n_threads=None, target_order='ascending'):
"""
Fitting a deep learning model.
Note that this function surfaces several parameters from other parameters. For example,
while learning rate is a parameter of Solver (that is a parameter of Optimizer), it is leveled up
so that our users can easily set learning rate without changing the default optimizer and solver.
If a non-default solver or optimizer is passed, then these leveled-up
parameters will be ignored - even they are set - and the ones coming from
the custom solver and custom optimizer will be used. In addition to learning_rate (lr),
max_epochs and log_level are another examples of such parameters.
Parameters
----------
data : string
This is the input data. It might be a string that is the
name of a cas table. Alternatively, this might be a cas table.
inputs : string or list-of-strings, optional
Specifies the input variables to use in the analysis.
target : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
data_specs : :class:`DataSpec`, optional
Specifies the parameters for the multiple input cases.
mini_batch_size : int, optional
Specifies the number of observations per thread in a
mini-batch. You can use this parameter to control the number of
observations that the action uses on each worker for each thread
to compute the gradient prior to updating the weights. Larger
values use more memory. When synchronous SGD is used (the
default), the total mini-batch size is equal to
miniBatchSize * number of threads * number of workers. When
asynchronous SGD is used (by specifying the elasticSyncFreq
parameter), each worker trains its own local model. In this case,
the total mini-batch size for each worker is
miniBatchSize * number of threads.
max_epochs : int, optional
specifies the maximum number of epochs. For SGD with a
single-machine server or a session that uses one worker on a
distributed server, one epoch is reached when the action passes
through the data one time. For a session that uses more than one
worker, one epoch is reached when all the workers exchange the
weights with the controller one time. The syncFreq parameter
specifies the number of times each worker passes through the
data before exchanging weights with the controller. For L-BFGS
with full batch, each L-BFGS iteration might process more than
one epoch, and final number of epochs might exceed the maximum
number of epochs.
log_level : int, optional
Specifies how progress messages are sent to the client. The
default value, 0, indicates that no messages are sent. Specify 1
to receive start and end messages. Specify 2 to include the
iteration history.
lr : double, optional
Specifies the learning rate.
optimizer : :class:`Optimizer`, optional
Specifies the parameters for the optimizer.
nominals : string or list-of-strings, optional
Specifies the nominal input variables to use in the analysis.
texts : string or list-of-strings, optional
Specifies the character variables to treat as raw text.
These variables must be specified in the inputs parameter.
target_sequence : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
sequence : :class:`Sequence`, optional
Specifies the settings for sequence data.
text_parms : :class:`TextParms`, optional
Specifies the parameters for the text inputs.
valid_table : string or CASTable, optional
Specifies the table with the validation data. The validation
table must have the same columns and data types as the training table.
valid_freq : int, optional
Specifies the frequency for scoring the validation table.
gpu : :class:`Gpu`, optional
When specified, the action uses graphical processing unit hardware.
The simplest way to use GPU processing is to specify "gpu=1".
In this case, the default values of other GPU parameters are used.
Setting gpu=1 enables all available GPU devices for use. Setting
gpu=0 disables GPU processing.
attributes : string or list-of-strings, optional
Specifies temporary attributes, such as a format, to apply to
input variables.
weight : string, optional
Specifies the variable/column name in the input table containing the
prior weights for the observation.
seed : double, optional
specifies the random number seed for the random number generator
in SGD. The default value, 0, and negative values indicate to use
random number streams based on the computer clock. Specify a value
that is greater than 0 for a reproducible random number sequence.
record_seed : double, optional
specifies the random number seed for the random record selection
within a worker. The default value 0 disables random record selection.
Records are read as they are laid out in memory.
Negative values indicate to use random number streams based on the
computer clock.
missing : string, optional
Specifies the policy for replacing missing values with imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
target_missing : string, optional
Specifies the policy for replacing target missing values with
imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
repeat_weight_table : bool, optional
Replicates the entire weight table on each worker node when saving
weights.
Default: False
force_equal_padding : bool, optional
For convolution or pooling layers, this setting forces left padding
to equal right padding, and top padding to equal bottom padding.
This setting might result in an output image that is
larger than the input image.
Default: False
save_best_weights : bool, optional
When set to True, it keeps the weights that provide the smallest
loss error.
n_threads : int, optional
Specifies the number of threads to use. If nothing is set then
all of the cores available in the machine(s) will be used.
target_order : string, optional
Specifies the order of the labels. It can follow the natural order
of the labels or order them in the order they are recieved with
training data samples.
Valid Values: 'ascending', 'descending', 'hash'
Default: 'ascending'
Returns
--------
:class:`CASResults`
"""
# set reference to the training and validation table
self.train_tbl = data
self.valid_tbl = valid_table
input_tbl_opts = input_table_check(data)
input_table = self.conn.CASTable(**input_tbl_opts)
if data_specs is None and inputs is None:
from dlpy.images import ImageTable
if isinstance(input_table, ImageTable):
inputs = input_table.running_image_column
elif '_image_' in input_table.columns.tolist():
print('NOTE: Inputs=_image_ is used')
inputs = '_image_'
else:
raise DLPyError('either dataspecs or inputs need to be non-None')
if optimizer is None:
optimizer = Optimizer(algorithm=VanillaSolver(learning_rate=lr), mini_batch_size=mini_batch_size,
max_epochs=max_epochs, log_level=log_level)
else:
if not isinstance(optimizer, Optimizer):
raise DLPyError('optimizer should be an Optimizer object')
max_epochs = optimizer['maxepochs']
if target is None and '_label_' in input_table.columns.tolist():
target = '_label_'
# check whether the field is none or not
if self.model_weights is not None and self.model_weights.to_table_params()['name'].upper() in \
list(self._retrieve_('table.tableinfo').TableInfo.Name):
print('NOTE: Training based on existing weights.')
init_weights = self.model_weights
else:
print('NOTE: Training from scratch.')
init_weights = None
# when model_weights is none, reset it
if self.model_weights is None:
self.model_weights = self.conn.CASTable('{}_weights'.format(self.model_name))
if save_best_weights and self.best_weights is None:
self.best_weights = random_name('model_best_weights', 6)
r = self.train(table=input_tbl_opts, inputs=inputs, target=target, data_specs=data_specs,
optimizer=optimizer, nominals=nominals, texts=texts, target_sequence=target_sequence,
sequence=sequence, text_parms=text_parms, valid_table=valid_table, valid_freq=valid_freq,
gpu=gpu, attributes=attributes, weight=weight, seed=seed, record_seed=record_seed,
missing=missing, target_missing=target_missing, repeat_weight_table=repeat_weight_table,
force_equal_padding=force_equal_padding, init_weights=init_weights, target_order=target_order,
best_weights=self.best_weights, model=self.model_table, n_threads=n_threads,
model_weights=dict(replace=True, **self.model_weights.to_table_params()))
try:
temp = r.OptIterHistory
temp.Epoch += 1 # Epochs should start from 1
temp.Epoch = temp.Epoch.astype('int64') # Epochs should be integers
if self.n_epochs == 0:
self.n_epochs = max_epochs
self.training_history = temp
else:
temp.Epoch += self.n_epochs
self.training_history = self.training_history.append(temp)
self.n_epochs += max_epochs
self.training_history.index = range(0, self.n_epochs)
except:
pass
if r.severity < 2:
self.target = target
return r
def fit_and_visualize(self, data, inputs=None, target=None, data_specs=None, mini_batch_size=1, max_epochs=5,
lr=0.01, optimizer=None, nominals=None, texts=None, target_sequence=None, sequence=None,
text_parms=None, valid_table=None, valid_freq=1, gpu=None, attributes=None, weight=None,
seed=0, record_seed=0, missing='mean', target_missing='mean', repeat_weight_table=False,
force_equal_padding=None, save_best_weights=False, n_threads=None, target_order='ascending',
visualize_freq=100):
"""
Fitting a deep learning model while visulizing the fit and loss at each iteration.
This is exactly the same as the "fit()" function and if called, the training history, fiterror and loss,
in the iteration level is visualized with a line chart. This setting overrides the log-level and sets it
to 3 as it is the only level with iteration training history. It drops a point to the
graph for every visualize_freq (default=100).
NOTE THAT this function is experimental only as I did a lot of work-arounds to make it work
in Jupyter notebooks.
Parameters
----------
data : string
This is the input data. It might be a string that is the
name of a cas table. Alternatively, this might be a cas table.
inputs : string or list-of-strings, optional
Specifies the input variables to use in the analysis.
target : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
data_specs : :class:`DataSpec`, optional
Specifies the parameters for the multiple input cases.
mini_batch_size : int, optional
Specifies the number of observations per thread in a
mini-batch. You can use this parameter to control the number of
observations that the action uses on each worker for each thread
to compute the gradient prior to updating the weights. Larger
values use more memory. When synchronous SGD is used (the
default), the total mini-batch size is equal to
miniBatchSize * number of threads * number of workers. When
asynchronous SGD is used (by specifying the elasticSyncFreq
parameter), each worker trains its own local model. In this case,
the total mini-batch size for each worker is
miniBatchSize * number of threads.
max_epochs : int, optional
specifies the maximum number of epochs. For SGD with a
single-machine server or a session that uses one worker on a
distributed server, one epoch is reached when the action passes
through the data one time. For a session that uses more than one
worker, one epoch is reached when all the workers exchange the
weights with the controller one time. The syncFreq parameter
specifies the number of times each worker passes through the
data before exchanging weights with the controller. For L-BFGS
with full batch, each L-BFGS iteration might process more than
one epoch, and final number of epochs might exceed the maximum
number of epochs.
log_level : int, optional
Specifies how progress messages are sent to the client. The
default value, 0, indicates that no messages are sent. Specify 1
to receive start and end messages. Specify 2 to include the
iteration history.
lr : double, optional
Specifies the learning rate.
optimizer : :class:`Optimizer`, optional
Specifies the parameters for the optimizer.
nominals : string or list-of-strings, optional
Specifies the nominal input variables to use in the analysis.
texts : string or list-of-strings, optional
Specifies the character variables to treat as raw text.
These variables must be specified in the inputs parameter.
target_sequence : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
sequence : :class:`Sequence`, optional
Specifies the settings for sequence data.
text_parms : :class:`TextParms`, optional
Specifies the parameters for the text inputs.
valid_table : string or CASTable, optional
Specifies the table with the validation data. The validation
table must have the same columns and data types as the training table.
valid_freq : int, optional
Specifies the frequency for scoring the validation table.
gpu : :class:`Gpu`, optional
When specified, the action uses graphical processing unit hardware.
The simplest way to use GPU processing is to specify "gpu=1".
In this case, the default values of other GPU parameters are used.
Setting gpu=1 enables all available GPU devices for use. Setting
gpu=0 disables GPU processing.
attributes : string or list-of-strings, optional
Specifies temporary attributes, such as a format, to apply to
input variables.
weight : string, optional
Specifies the variable/column name in the input table containing the
prior weights for the observation.
seed : double, optional
specifies the random number seed for the random number generator
in SGD. The default value, 0, and negative values indicate to use
random number streams based on the computer clock. Specify a value
that is greater than 0 for a reproducible random number sequence.
record_seed : double, optional
specifies the random number seed for the random record selection
within a worker. The default value 0 disables random record selection.
Records are read as they are laid out in memory.
Negative values indicate to use random number streams based on the
computer clock.
missing : string, optional
Specifies the policy for replacing missing values with imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
target_missing : string, optional
Specifies the policy for replacing target missing values with
imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
repeat_weight_table : bool, optional
Replicates the entire weight table on each worker node when saving
weights.
Default: False
force_equal_padding : bool, optional
For convolution or pooling layers, this setting forces left padding
to equal right padding, and top padding to equal bottom padding.
This setting might result in an output image that is
larger than the input image.
Default: False
save_best_weights : bool, optional
When set to True, it keeps the weights that provide the smallest
loss error.
n_threads : int, optional
Specifies the number of threads to use. If nothing is set then
all of the cores available in the machine(s) will be used.
target_order : string, optional
Specifies the order of the labels. It can follow the natural order
of the labels or order them in the order they are recieved with
training data samples.
Valid Values: 'ascending', 'descending', 'hash'
Default: 'ascending'
visualize_freq: int, optional
Specifies the frequency of the points in the visualization history. Note that the chart will
get crowded, and possibly get slower, with more points.
Default: 100
Returns
--------
:class:`CASResults`
"""
# set reference to the training and validation table
self.train_tbl = data
self.valid_tbl = valid_table
input_tbl_opts = input_table_check(data)
input_table = self.conn.CASTable(**input_tbl_opts)
if data_specs is None and inputs is None:
from dlpy.images import ImageTable
if isinstance(input_table, ImageTable):
inputs = input_table.running_image_column
elif '_image_' in input_table.columns.tolist():
print('NOTE: Inputs=_image_ is used')
inputs = '_image_'
else:
raise DLPyError('either dataspecs or inputs need to be non-None')
if optimizer is None:
optimizer = Optimizer(algorithm=VanillaSolver(learning_rate=lr), mini_batch_size=mini_batch_size,
max_epochs=max_epochs, log_level=3)
else:
if not isinstance(optimizer, Optimizer):
raise DLPyError('optimizer should be an Optimizer object')
max_epochs = optimizer['maxepochs']
if target is None and '_label_' in input_table.columns.tolist():
target = '_label_'
if self.model_weights.to_table_params()['name'].upper() in \
list(self._retrieve_('table.tableinfo').TableInfo.Name):
print('NOTE: Training based on existing weights.')
init_weights = self.model_weights
else:
print('NOTE: Training from scratch.')
init_weights = None
if save_best_weights and self.best_weights is None:
self.best_weights = random_name('model_best_weights', 6)
if isnotebook() is True:
# prep work for visualization
freq=[]
freq.append(visualize_freq)
x = []
y = []
y_loss = []
e = []
total_sample_size = []
iter_history = []
status = []
status.append(0)
self._train_visualize(table=input_tbl_opts, inputs=inputs, target=target, data_specs=data_specs,
optimizer=optimizer, nominals=nominals, texts=texts, target_sequence=target_sequence,
sequence=sequence, text_parms=text_parms, valid_table=valid_table,
valid_freq=valid_freq, gpu=gpu, attributes=attributes, weight=weight, seed=seed,
record_seed=record_seed, missing=missing, target_missing=target_missing,
repeat_weight_table=repeat_weight_table, force_equal_padding=force_equal_padding,
init_weights=init_weights, target_order=target_order, best_weights=self.best_weights,
model=self.model_table, n_threads=n_threads,
model_weights=dict(replace=True, **self.model_weights.to_table_params()),
x=x, y=y, y_loss=y_loss, total_sample_size=total_sample_size, e=e,
iter_history=iter_history, freq=freq, status=status)
if status[0] == 0:
try:
temp = iter_history[0]
temp.Epoch += 1 # Epochs should start from 1
temp.Epoch = temp.Epoch.astype('int64') # Epochs should be integers
if self.n_epochs == 0:
self.n_epochs = max_epochs
self.training_history = temp
else:
temp.Epoch += self.n_epochs
self.training_history = self.training_history.append(temp)
self.n_epochs += max_epochs
self.training_history.index = range(0, self.n_epochs)
except:
pass
else:
print('Could not train the model')
else:
print('DLPy supports training history visualization in only Jupyter notebooks. '
'We are calling the fit method in anyways')
r = self.train(table=input_tbl_opts, inputs=inputs, target=target, data_specs=data_specs,
optimizer=optimizer, nominals=nominals, texts=texts, target_sequence=target_sequence,
sequence=sequence, text_parms=text_parms, valid_table=valid_table, valid_freq=valid_freq,
gpu=gpu, attributes=attributes, weight=weight, seed=seed, record_seed=record_seed,
missing=missing, target_missing=target_missing, repeat_weight_table=repeat_weight_table,
force_equal_padding=force_equal_padding, init_weights=init_weights,
target_order=target_order, best_weights=self.best_weights, model=self.model_table,
n_threads=n_threads,
model_weights=dict(replace=True, **self.model_weights.to_table_params()))
try:
temp = r.OptIterHistory
temp.Epoch += 1 # Epochs should start from 1
temp.Epoch = temp.Epoch.astype('int64') # Epochs should be integers
if self.n_epochs == 0:
self.n_epochs = max_epochs
self.training_history = temp
else:
temp.Epoch += self.n_epochs
self.training_history = self.training_history.append(temp)
self.n_epochs += max_epochs
self.training_history.index = range(0, self.n_epochs)
except:
pass
if r.severity < 2:
self.target = target
return r
def train(self, table, attributes=None, inputs=None, nominals=None, texts=None, valid_table=None, valid_freq=1,
model=None, init_weights=None, model_weights=None, target=None, target_sequence=None,
sequence=None, text_parms=None, weight=None, gpu=None, seed=0, record_seed=None, missing='mean',
optimizer=None, target_missing='mean', best_weights=None, repeat_weight_table=False,
force_equal_padding=None, data_specs=None, n_threads=None, target_order='ascending'):
"""
Trains a deep learning model
table : string or CASTable
Specifies the input data.
attributes : string or list-of-strings, optional
Specifies temporary attributes, such as a format, to apply
to input variables.
inputs : string or list-of-strings, optional
Specifies the input variables to use in the analysis.
nominals : string or list-of-strings
Specifies the nominal input variables to use in the analysis.
texts : string or list-of-strings, optional
Specifies the character variables to treat as raw text.
These variables must be specified in the inputs parameter.
valid_table : string or CASTable, optional
Specifies the table with the validation data. The validation
table must have the same columns and data types as the
training table.
valid_freq : int, optional
Specifies the frequency for scoring the validation table.
model : string or CASTable, optional
Specifies the in-memory table that is the model.
init_weights : string or CASTable, optional
Specifies an in-memory table that contains the model weights.
These weights are used to initialize the model.
model_weights : string or CASTable, optional
Specifies an in-memory table that is used to store the
model weights.
target : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
target_sequence : string or list-of-strings, optional
Specifies the target sequence variables to use in the analysis.
sequence : string or list-of-strings, optional
Specifies the settings for sequence data.
text_parms : TextParms, optional
Specifies the parameters for the text inputs.
weight : string, optional
Specifies the variable/column name in the input table
containing the prior weights for the observation.
gpu : GPU, optional
When specified, the action uses graphical processing unit hardware.
The simplest way to use GPU processing is to specify "gpu=1".
In this case, the default values of other GPU parameters are used.
Setting gpu=1 enables all available GPU devices for use. Setting
gpu=0 disables GPU processing.
seed : double, optional
specifies the random number seed for the random number
generator in SGD. The default value, 0, and negative values
indicate to use random number streams based on the computer
clock. Specify a value that is greater than 0 for a reproducible
random number sequence.
record_seed : double, optional
specifies the random number seed for the random record
selection within a worker. The default value 0 disables random
record selection. Records are read as they are laid out in memory.
Negative values indicate to use random number streams based
on the computer clock.
missing : string, optional
Specifies the policy for replacing missing values with imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
optimizer : Optimizer, optional
Specifies the parameters for the optimizer.
target_missing : string, optional
Specifies the policy for replacing target missing values with
imputed values.
Valid Values: MAX, MIN, MEAN, NONE
Default: MEAN
best_weights : string or CASTable, optional
Specifies that the weights with the smallest loss error will be
saved to a CAS table.
repeat_weight_table : bool, optional
Replicates the entire weight table on each worker node when
saving weights.
Default: False
force_equal_padding : bool, optional
For convolutional or pooling layers, this setting forces left padding
to equal right padding, and top padding to equal bottom padding.
This setting might result in an output image that is larger than the
input image. Default: False
data_specs : DataSpec, optional
Specifies the parameters for the multiple input cases.
n_threads : int, optional
Specifies the number of threads to use. If nothing is set then all
of the cores available in the machine(s) will be used.
target_order : string, optional
Specifies the order of the labels. It can follow the natural order
of the labels or order them in the order of the process.
Valid Values: 'ascending', 'descending', 'hash'
Default: 'ascending'
Returns
-------
:class:`CASResults`
"""
b_w = None
if best_weights is not None:
b_w = dict(replace=True, name=best_weights)
parameters = DLPyDict(table=table, attributes=attributes, inputs=inputs, nominals=nominals, texts=texts,
valid_table=valid_table, valid_freq=valid_freq, model=model, init_weights=init_weights,
model_weights=model_weights, target=target, target_sequence=target_sequence,
sequence=sequence, text_parms=text_parms, weight=weight, gpu=gpu, seed=seed,
record_seed=record_seed, missing=missing, optimizer=optimizer,
target_missing=target_missing, best_weights=b_w, repeat_weight_table=repeat_weight_table,
force_equal_padding=force_equal_padding, data_specs=data_specs, n_threads=n_threads,
target_order=target_order)
rt = self._retrieve_('deeplearn.dltrain', message_level='note', **parameters)
if rt.severity < 2:
self.model_ever_trained = True
return rt
def tune(self, data, inputs='_image_', target='_label_', **kwargs):
'''
Tunes hyper parameters for the deep learning model.
Parameters
----------
data : CASTable or string or dict
Specifies the CAS table containing the training data for the model
inputs : string, optional
Specifies the variable name of in the input_tbl, that is the
input of the deep learning model.
Default : '_image_'
target : string, optional
Specifies the variable name of in the input_tbl, that is the
response of the deep learning model.
Default : '_label_'
**kwargs : keyword arguments, optional
Specifies the optional arguments for the dltune action.
Returns
----------
:class:`CASResults`
'''
r = self._retrieve_('deeplearn.dltune',
message_level='note', model=self.model_table,
table=data,
inputs=inputs,
target=target,
**kwargs)
return r
def plot_training_history(self, items=['Loss', 'FitError'], fig_size=(12, 5), tick_frequency=1):
'''
Display the training iteration history. If using in Jupyter,
supress return object with semicolon - plot_training_history();
Parameters
----------
items : list, optional
Specifies the items to be displayed.
Default : ['Loss', 'FitError')
fig_size : tuple, optional
Specifies the size of the figure.
Default : (12, 5)
tick_frequency : int, optional
Specifies the frequency of the ticks visable on xaxis.
Default : 1
Returns
-------
:class:`matplotlib.axes.Axes`
'''
items_not_in_results = [x for x in items if x not in self.training_history.columns]
if items_not_in_results:
raise DLPyError('Columns {} are not in results'.format(items_not_in_results))
if self.training_history is not None:
if tick_frequency > 1 and tick_frequency <= self.n_epochs:
x_ticks = np.array([1] + list(range(tick_frequency,
len(self.training_history.Epoch) + 1, tick_frequency)))
else:
x_ticks = self.training_history.Epoch.values
return self.training_history.plot(x='Epoch', y=items,
figsize=fig_size,
xticks=x_ticks)
else:
raise DLPyError('model.fit should be run before calling plot_training_history')
def evaluate(self, data, text_parms=None, layer_out=None, layers=None, gpu=None, buffer_size=None,
mini_batch_buf_size=None, top_probs=None, use_best_weights=False):
"""
Evaluate the deep learning model on a specified validation data set
After the inference, a confusion matrix is created from the results.
This method is good for classification tasks.
Parameters
----------
data : string or CASTable, optional
Specifies the input data.
text_parms : TextParms, optional
Specifies the parameters for the text inputs.
layer_out : string, optional
Specifies the settings for an output table that includes
layer output values. By default, all layers are included.
You can filter the list with the layers parameter.
layers : list of strings
Specifies the names of the layers to include in the
output layers table.
gpu : GPU, optional
When specified, the action uses graphical processing
unit hardware. The simplest way to use GPU processing is
to specify "gpu=1". In this case, the default values of
other GPU parameters are used. Setting gpu=1 enables all
available GPU devices for use. Setting gpu=0 disables GPU
processing.
buffer_size : int, optional
Specifies the number of observations to score in a single
batch. Larger values use more memory.
Default: 10
mini_batch_buf_size : int, optional
Specifies the size of a buffer that is used to save input data
and intermediate calculations. By default, each layer allocates
an input buffer that is equal to the number of input channels
multiplied by the input feature map size multiplied by the
bufferSize value. You can reduce memory usage by specifying a
value that is smaller than the bufferSize. The only disadvantage
to specifying a small value is that run time can increase because
multiple smaller matrices must be multiplied instead of a single
large matrix multiply.
top_probs : int, optional
Specifies to include the predicted probabilities along with
the corresponding labels in the results. For example, if you
specify 5, then the top 5 predicted probabilities are shown in
the results along with the corresponding labels.
use_best_weights : bool, optional
When set to True, the weights that provides the smallest loss
error saved during a previous training is used while scoring
input data rather than the final weights from the training.
Default: False
Returns
-------
:class:`CASResults`
"""
input_tbl_opts = input_table_check(data)
input_table = self.conn.CASTable(**input_tbl_opts)
copy_vars = input_table.columns.tolist()
if self.valid_res_tbl is None:
valid_res_tbl = random_name('Valid_Res')
else:
valid_res_tbl = self.valid_res_tbl.name
lo = None
if layer_out is not None:
from swat import CASTable
if type(layer_out) is CASTable:
lo = layer_out
else:
lo = dict(replace=True, name=layer_out)
en = True
if self.model_type == 'RNN':
en = False
if use_best_weights and self.best_weights is not None:
print('NOTE: Using the weights providing the smallest loss error.')
res = self.score(table=input_table, model=self.model_table, init_weights=self.best_weights,
copy_vars=copy_vars, casout=dict(replace=True, name=valid_res_tbl),
encode_name=en, text_parms=text_parms, layer_out=lo,
layers=layers, gpu=gpu, mini_batch_buf_size=mini_batch_buf_size,
top_probs=top_probs, buffer_size=buffer_size)
else:
if self.model_weights is None:
raise DLPyError('We need some weights to do scoring.')
else:
res = self.score(table=input_table, model=self.model_table, init_weights=self.model_weights,
copy_vars=copy_vars, casout=dict(replace=True, name=valid_res_tbl),
encode_name=en, text_parms=text_parms, layer_out=lo,
layers=layers, gpu=gpu, mini_batch_buf_size=mini_batch_buf_size,
buffer_size=buffer_size, top_probs=top_probs)
if res.severity > 1:
raise DLPyError('something is wrong while scoring the input data with the model.')
if res.ScoreInfo is not None:
self.valid_score = res.ScoreInfo
# TODO work on here to make it more user friendly and remove assumptions
if self.target is not None:
self.valid_conf_mat = self.conn.crosstab(table=valid_res_tbl, row=self.target, col='I_' + self.target)
else:
v = self.conn.CASTable(valid_res_tbl)
temp_columns = v.columns.tolist()
output_names = [name for name in temp_columns if (name.startswith('I_'))]
if len(output_names) > 0:
self.target = output_names[0][2:]
self.valid_conf_mat = self.conn.crosstab(table=valid_res_tbl, row=self.target, col='I_' + self.target)
if self.model_type == 'CNN':
if not self.conn.has_actionset('image'):
self.conn.loadactionset(actionSet='image', _messagelevel='error')
self.valid_res_tbl = self.conn.CASTable(valid_res_tbl)
temp_columns = self.valid_res_tbl.columns.tolist()
columns = [item for item in temp_columns if item[0:9] == 'P_' + self.target or item == 'I_' + self.target]
img_table = self._retrieve_('image.fetchimages', fetchimagesvars=columns, imagetable=self.valid_res_tbl, to=1000)
img_table = img_table.Images
self.valid_res = img_table
else:
self.valid_res = res
return res
def evaluate_object_detection(self, ground_truth, coord_type, detection_data=None, classes=None,
iou_thresholds=np.linspace(0.5, 0.95, 10, endpoint=True)):
"""
Evaluate the deep learning model on a specified validation data set.
Parameters
----------
ground_truth : string or CASTable, optional
Specifies a ground truth table to evaluate its corresponding
prediction results
coord_type : string, optional
Specifies the format of how ground_truth table to represent
bounding boxes.
Valid Values: 'yolo', 'coco'
detection_data : string or CASTable, optional
Perform evaluation on the table. If the parameter is not specified,
the function evaluates the last prediction performed
by the model.
classes : string or list-of-strings, optional
The classes are selected to be evaluated. If you never set it,
then it will perform on all of classes in ground truth table
and detection_data table.
iou_thresholds : float or list-of-floats, optional
Specifying an iou threshold or a list of iou thresholds that
determines what is counted as a model predicted positive
detection of the classes defined by classes parameter.
Returns
-------
list containing calculated results.
"""
if coord_type.lower() not in ['yolo', 'coco']:
raise ValueError('coord_type, {}, is not supported'.format(coord_type))
#self.conn.update(table=dict(name = self.model_name, where='_DLChrVal_ eq "iouThreshold"'),
# set=[{'var':'_DLNumVal_', 'value':'0.5'}])
if detection_data is not None:
input_tbl_opts = input_table_check(detection_data)
det_tbl = self.conn.CASTable(**input_tbl_opts)
elif self.valid_res_tbl is not None:
det_tbl = self.valid_res_tbl
else:
raise DLPyError('Specify detection_data option or do predict() before processing the function')
det_bb_list = []
if '_image_' in det_tbl.columns.tolist():
det_tbl.drop(['_image_'], axis=1, inplace=1)
freq_variable = []
max_num_det = int(det_tbl.max(axis = 1, numeric_only = True)['_nObjects_'])
if max_num_det == 0:
print('NOTE: Cannot find any object in detection_data or predict() cannot detect any object.')
return
for i in range(max_num_det):
freq_variable.append('_Object{}_'.format(i))
use_all_class = False
if classes is None:
use_all_class = True
classes = set(self.conn.freq(det_tbl, inputs = freq_variable).Frequency['FmtVar'])
classes = sorted(classes)
classes = [x for x in classes if not (x is '' or x.startswith('NoObject'))]
elif isinstance(classes, str):
classes = [classes]
nrof_classes = len(classes)
for idx, row in det_tbl.iterrows():
if coord_type.lower() == 'yolo':
[det_bb_list.append(Box(row.loc['_Object{}_x'.format(i)],
row.loc['_Object{}_y'.format(i)],
row.loc['_Object{}_width'.format(i)],
row.loc['_Object{}_height'.format(i)],
row.loc['_Object{}_'.format(i)],
row.loc['_P_Object{}_'.format(i)],
row.loc['idjoin'])) for i in range(int(row.loc['_nObjects_']))]
elif coord_type.lower() == 'coco':
[det_bb_list.append(Box(row.loc['_Object{}_xmin'.format(i)],
row.loc['_Object{}_ymin'.format(i)],
row.loc['_Object{}_xmax'.format(i)],
row.loc['_Object{}_ymax'.format(i)],
row.loc['_Object{}_'.format(i)],
row.loc['_P_Object{}_'.format(i)],
row.loc['idjoin'], 'xyxy')) for i in range(int(row.loc['_nObjects_']))]
input_tbl_opts = input_table_check(ground_truth)
gt_tbl = self.conn.CASTable(**input_tbl_opts)
gt_bb_list = []
if '_image_' in gt_tbl.columns.tolist():
gt_tbl.drop(['_image_'], axis=1, inplace=1)
freq_variable = []
max_num_gt = int(gt_tbl.max(axis = 1, numeric_only = True)['_nObjects_'])
if max_num_gt == 0:
print('NOTE: Cannot find any object in ground_truth.')
return
for i in range(int(gt_tbl.max(axis = 1, numeric_only = True)['_nObjects_'])):
freq_variable.append('_Object{}_'.format(i))
classes_gt = set(self.conn.freq(gt_tbl, inputs = freq_variable).Frequency['FmtVar'])
classes_gt = sorted(classes_gt)
classes_gt = [x for x in classes_gt if not (x is '' or x.startswith('NoObject'))]
for idx, row in gt_tbl.iterrows():
if coord_type.lower() == 'yolo':
[gt_bb_list.append(Box(row.loc['_Object{}_x'.format(i)],
row.loc['_Object{}_y'.format(i)],
row.loc['_Object{}_width'.format(i)],
row.loc['_Object{}_height'.format(i)],
row.loc['_Object{}_'.format(i)],
1.0,
row.loc['idjoin'])) for i in range(int(row.loc['_nObjects_']))]
elif coord_type.lower() == 'coco':
[gt_bb_list.append(Box(row.loc['_Object{}_xmin'.format(i)],
row.loc['_Object{}_ymin'.format(i)],
row.loc['_Object{}_xmax'.format(i)],
row.loc['_Object{}_ymax'.format(i)],
row.loc['_Object{}_'.format(i)],
1.0,
row.loc['idjoin'], 'xyxy')) for i in range(int(row.loc['_nObjects_']))]
classes_not_detected = [x for x in classes_gt if x not in classes]
if not isinstance(iou_thresholds, collections.Iterable):
iou_thresholds = [iou_thresholds]
results = []
for iou_threshold in iou_thresholds:
results_iou = []
for i, cls in enumerate(classes):
if cls not in classes_gt:
print('Predictions contain the class, {}, that is not in ground truth'.format(cls))
continue
det_bb_cls_list = []
[det_bb_cls_list.append(bb) for bb in det_bb_list if bb.class_type == cls] # all of detections of the class
gt_bb_cls_list = []
[gt_bb_cls_list.append(bb) for bb in gt_bb_list if bb.class_type == cls]
det_bb_cls_list = sorted(det_bb_cls_list, key=lambda bb: bb.confidence, reverse=True)
tp = np.zeros(len(det_bb_cls_list)) # the detections of the class
fp = np.zeros(len(det_bb_cls_list))
gt_image_index_list = collections.Counter([bb.image_name for bb in gt_bb_cls_list])
for key, val in gt_image_index_list.items():
gt_image_index_list[key] = np.zeros(val)
print("Evaluating class: %s (%d detections)" % (str(cls), len(det_bb_cls_list)))
for idx, det_bb in enumerate(det_bb_cls_list):
gt_cls_image_list = [bb for bb in gt_bb_cls_list if bb.image_name == det_bb.image_name]
iou_max = sys.float_info.min
for j, gt_bb in enumerate(gt_cls_image_list):
if Box.iou(det_bb, gt_bb) > iou_max:
match_idx = j
iou_max = Box.iou(det_bb, gt_bb)
if iou_max >= iou_threshold:
if gt_image_index_list[det_bb.image_name][match_idx] == 0:
tp[idx] = 1
gt_image_index_list[det_bb.image_name][match_idx] = 1
else:
fp[idx] = 1
acc_tp = np.cumsum(tp)
acc_fp = np.cumsum(fp)
precision = np.divide(acc_tp, (acc_tp + acc_fp))
recall = np.divide(acc_tp, len(gt_bb_cls_list))
interpolated_precision = [0]
[interpolated_precision.append(i) for i in precision]
interpolated_precision.append(0)
for i in range(len(interpolated_precision) - 1, 0, -1):
interpolated_precision[i - 1] = max(interpolated_precision[i - 1], interpolated_precision[i])
interpolated_precision = interpolated_precision[1:-1]
recall_level = [i / 10.0 for i in range(10)]
interpolated_ap = np.interp([i for i in recall_level if i < recall[-1]], recall, interpolated_precision)
ap_cls = np.sum(interpolated_ap) / 11
results_class = {
'class': cls,
'precision': precision,
'recall': recall,
'AP': ap_cls,
'interpolated precision': interpolated_ap,
'interpolated recall': recall_level,
'total positives': len(gt_bb_cls_list),
'total TP': np.sum(tp),
'total FP': np.sum(fp)
}
results_iou.append(results_class)
ap_sum = 0
for i in results_iou:
ap_sum += i['AP']
if use_all_class:
mean_ap = ap_sum / (nrof_classes + len(classes_not_detected))
else:
mean_ap = ap_sum / nrof_classes
results.append({'IoU Threshold': iou_threshold, 'Class Evaluation': results_iou, 'AP': mean_ap})
return results
def predict(self, data, text_parms=None, layer_out=None, layers=None, gpu=None, buffer_size=10,
mini_batch_buf_size=None, top_probs=None, use_best_weights=False, n_threads=None,
layer_image_type=None, log_level=0):
"""
Evaluate the deep learning model on a specified validation data set
Unlike the `evaluate` function, this function just does the
inference and does not do further analysis. This function is
good for non-classification tasks.
Parameters
----------
data : string or CASTable, optional
Specifies the input data.
text_parms : :class:`TextParms`, optional
Specifies the parameters for the text inputs.
layer_out : string, optional
Specifies the settings for an output table that includes
layer output values. By default, all layers are included.
You can filter the list with the layers parameter.
layers : list of strings
Specifies the names of the layers to include in the output
layers table.
gpu : :class:`Gpu`, optional
When specified, the action uses graphical processing
unit hardware. The simplest way to use GPU processing is
to specify "gpu=1". In this case, the default values of
other GPU parameters are used. Setting gpu=1 enables all
available GPU devices for use. Setting gpu=0 disables GPU
processing.
buffer_size : int, optional
Specifies the number of observations to score in a single
batch. Larger values use more memory.
Default: 10
mini_batch_buf_size : int, optional
Specifies the size of a buffer that is used to save input
data and intermediate calculations. By default, each layer
allocates an input buffer that is equal to the number of
input channels multiplied by the input feature map size
multiplied by the bufferSize value. You can reduce memory
usage by specifying a value that is smaller than the
bufferSize. The only disadvantage to specifying a small
value is that run time can increase because multiple smaller
matrices must be multiplied instead of a single large
matrix multiply.
top_probs : int, optional
Specifies to include the predicted probabilities along with
the corresponding labels in the results. For example, if you
specify 5, then the top 5 predicted probabilities are shown
in the results along with the corresponding labels.
use_best_weights : bool, optional
When set to True, the weights that provides the smallest loss
error saved during a previous training is used while scoring
input data rather than the final weights from the training.
default: False
n_threads : int, optional
Specifies the number of threads to use. If nothing is set then
all of the cores available in the machine(s) will be used.
layer_image_type : string, optional
Specifies the image type to store in the output layers table.
JPG means a compressed image (e.g, jpg, png, and tiff)
WIDE means a pixel per column
Default: jpg
Valid Values: JPG, WIDE
log_level : int, optional
specifies the reporting level for progress messages sent to the client.
The default level 0 indicates that no messages are sent.
Setting the value to 1 sends start and end messages.
Setting the value to 2 adds the iteration history to the client messaging.
default: 0
Returns
-------
:class:`CASResults`
"""
input_tbl_opts = input_table_check(data)
input_table = self.conn.CASTable(**input_tbl_opts)
copy_vars = input_table.columns.tolist()
copy_vars = [x for x in copy_vars if not (x.startswith('_Object') or x.startswith('_nObject'))]
if self.valid_res_tbl is None:
valid_res_tbl = random_name('Valid_Res')
else:
valid_res_tbl = self.valid_res_tbl.name
lo = None
if layer_out is not None:
lo = dict(replace=True, name=layer_out)
en = True
if self.model_type == 'RNN':
en = False
if use_best_weights and self.best_weights is not None:
print('NOTE: Using the weights providing the smallest loss error.')
res = self.score(table=input_table, model=self.model_table, init_weights=self.best_weights,
copy_vars=copy_vars, casout=dict(replace=True, name=valid_res_tbl), encode_name=en,
text_parms=text_parms, layer_out=lo, layers=layers, gpu=gpu,
mini_batch_buf_size=mini_batch_buf_size, top_probs=top_probs, buffer_size=buffer_size,
n_threads=n_threads, layer_image_type=layer_image_type, log_level=log_level)
self.valid_res_tbl = self.conn.CASTable(valid_res_tbl)
return res
else:
res = self.score(table=input_table, model=self.model_table, init_weights=self.model_weights,
copy_vars=copy_vars, casout=dict(replace=True, name=valid_res_tbl), encode_name=en,
text_parms=text_parms, layer_out=lo, layers=layers, gpu=gpu,
mini_batch_buf_size=mini_batch_buf_size, top_probs=top_probs, buffer_size=buffer_size,
n_threads=n_threads, layer_image_type=layer_image_type, log_level=log_level)
self.valid_res_tbl = self.conn.CASTable(valid_res_tbl)
return res
def forecast(self, test_table=None, horizon=1, train_table=None, layer_out=None,
layers=None, gpu=None, buffer_size=10, mini_batch_buf_size=None,
use_best_weights=False, n_threads=None, casout=None):
"""
Make forecasts based on deep learning models trained on `TimeseriesTable`.
This method performs either one-step-ahead forecasting or multi-step-ahead
forecasting determined by the `horizon` parameter. If the model is autoregressive
(the value of the response variable depends on its values at earlier time steps),
it performs one-step-ahead forecasting recursively to achieve multi-step-ahead
forecasting. More specifically, the predicted value at the previous
time step is inserted into the input vector for predicting the next time step.
Parameters
----------
test_table : string or :class:`CASTable`, optional
Specifies the test table. If `test_table=None`, the model cannot have
additional static covariates or predictor timeseries, and can only
be a autoregressive model. In this case, the forecast extends the
timeseries from the last timestamp found in the training/validation set.
If the model contains additional static covariates or predictor
timeseries (that are available for predicting the target timeseries),
the test table has to be provided, and the forecast starts from the
first timestamp in the test data. If the model is autoregressive, and
the test data columns do not include all the required preceeding
time points of the target series (the lagged target variables),
the forecast will be extended from the last time timestamp in
training/validation set and only use the static covariates or
predictor timeseries information from the test data if they are
available for the corresponding time points.
Default : `None`
horizon : int, optional.
Specifies the forecasting horizon. If `horizon=1` and test data
is provided, it will make one-step-ahead forecasts for all timestamps
in the test data.(given the test data has all the columns required
to make prediction.) Otherwise, it will only make one forecasted series
per by-group, with the length specified by the `horizon` parameter.
Default : 1
train_table : :class:`TimeseriesTable`, optional.
If model has been fitted with a TimeseriesTable, this argument is ignored.
Otherwise, this argument is required, and reference to the TimeseriesTable
used for model training, as it contains information regarding when to
extend the forecast from, and sequence length etc.
layer_out : string, optional
Specifies the settings for an output table that includes
layer output values. By default, all layers are included.
You can filter the list with the layers parameter.
layers : list of strings
Specifies the names of the layers to include in the output
layers table.
gpu : :class:`Gpu`, optional
When specified, the action uses graphical processing
unit hardware. The simplest way to use GPU processing is
to specify "gpu=1". In this case, the default values of
other GPU parameters are used. Setting gpu=1 enables all
available GPU devices for use. Setting gpu=0 disables GPU
processing.
buffer_size : int, optional
Specifies the number of observations to score in a single
batch. Larger values use more memory.
Default: 10
mini_batch_buf_size : int, optional
Specifies the size of a buffer that is used to save input
data and intermediate calculations. By default, each layer
allocates an input buffer that is equal to the number of
input channels multiplied by the input feature map size
multiplied by the bufferSize value. You can reduce memory
usage by specifying a value that is smaller than the
bufferSize. The only disadvantage to specifying a small
value is that run time can increase because multiple smaller
matrices must be multiplied instead of a single large
matrix multiply.
use_best_weights : bool, optional
When set to True, the weights that provides the smallest loss
error saved during a previous training is used while scoring
input data rather than the final weights from the training.
default: False
n_threads : int, optional
Specifies the number of threads to use. If nothing is set then
all of the cores available in the machine(s) will be used.
casout : dict or :class:`CASTable`, optional
If it is dict, it specifies the output CASTable parameters.
If it is CASTable, it is the CASTable that will be overwritten.
None means a new CASTable with random name will be generated.
Default: None
Returns
-------
:class:`CASTable`
"""
if horizon > 1:
self.score_message_level = 'error' #prevent multiple notes in multistep forecast
if self.train_tbl is None:
self.train_tbl = train_table
if not isinstance(self.train_tbl, TimeseriesTable):
raise RuntimeError('If the model is not fitted with a TimeseriesTable '
'(such as being imported from other sources), '
'please consider use the train_table argument '
'to pass a reference to the TimeseriesTable used for training, '
'since model.forecast requires information '
'including the last timestamp to extend from and subsequence length etc, '
'which is stored in preprocessed TimeseriesTable. '
'If this information is not available, consider using model.predict '
'for non-timeseries prediction.')
if test_table is None:
print('NOTE: test_table is None, extending forecast from training/validation data')
if isinstance(self.valid_tbl, str):
self.valid_tbl = self.conn.CASTable(self.valid_tbl)
train_valid_tbl = _combine_table(self.train_tbl, self.valid_tbl)
cur_results = _get_last_obs(train_valid_tbl, self.train_tbl.timeid,
groupby=self.train_tbl.groupby_var)
self.conn.retrieve('table.droptable', _messagelevel='error', name=train_valid_tbl.name)
for i in range(horizon):
if i == 0:
autoregressive_series = self.train_tbl.autoregressive_sequence + [self.train_tbl.target]
else:
autoregressive_series = self.train_tbl.autoregressive_sequence + ['_DL_Pred_']
cur_input = _prepare_next_input(cur_results, timeid=self.train_tbl.timeid,
timeid_interval=self.train_tbl.acc_interval,
autoregressive_series=autoregressive_series,
sequence_opt=self.train_tbl.sequence_opt,
groupby=self.train_tbl.groupby_var)
if i == 0:
self.conn.retrieve('table.droptable', _messagelevel='error', name=cur_results.name)
self.predict(cur_input, layer_out=layer_out, layers=layers,
gpu=gpu, buffer_size=buffer_size,
mini_batch_buf_size=mini_batch_buf_size,
use_best_weights=use_best_weights,
n_threads=n_threads)
self.conn.retrieve('table.droptable', _messagelevel='error', name=cur_input.name)
cur_results = self.valid_res_tbl
if i == 0:
output_tbl = cur_results
if casout is None:
casout={'name': random_name('forecast_output', 6)}
# Use _combine_table here to serve as a renaming
output_tbl = _combine_table(output_tbl, casout=casout)
else:
output_tbl = _combine_table(output_tbl, cur_results, casout=output_tbl)
else:
if isinstance(test_table, str):
test_table = self.conn.CASTable(test_table)
if set(self.train_tbl.autoregressive_sequence).issubset(test_table.columns.tolist()):
if horizon == 1:
self.predict(test_table, layer_out=layer_out, layers=layers,
gpu=gpu, buffer_size=buffer_size,
mini_batch_buf_size=mini_batch_buf_size,
use_best_weights=use_best_weights,
n_threads=n_threads)
output_tbl = self.valid_res_tbl
if casout is None:
casout={'name': random_name('forecast_output', 6)}
# Use _combine_table here to serve as a renaming
output_tbl = _combine_table(output_tbl, casout=casout)
else:
cur_input = _get_first_obs(test_table, self.train_tbl.timeid,
groupby=self.train_tbl.groupby_var)
for i in range(horizon):
if i > 0:
autoregressive_series = self.train_tbl.autoregressive_sequence + ['_DL_Pred_']
cur_input = _prepare_next_input(cur_results, timeid=self.train_tbl.timeid,
timeid_interval=self.train_tbl.acc_interval,
autoregressive_series=autoregressive_series,
sequence_opt=self.train_tbl.sequence_opt,
covar_tbl = test_table,
groupby=self.train_tbl.groupby_var)
self.predict(cur_input, layer_out=layer_out, layers=layers,
gpu=gpu, buffer_size=buffer_size,
mini_batch_buf_size=mini_batch_buf_size,
use_best_weights=use_best_weights,
n_threads=n_threads)
self.conn.retrieve('table.droptable', _messagelevel='error', name=cur_input.name)
cur_results = self.valid_res_tbl
if i == 0:
output_tbl = cur_results
if casout is None:
casout={'name': random_name('forecast_output', 6)}
# Use _combine_table here to serve as a renaming
output_tbl = _combine_table(output_tbl, casout=casout)
else:
output_tbl = _combine_table(output_tbl, cur_results, casout=output_tbl)
else:
if isinstance(self.valid_tbl, str):
self.valid_tbl = self.conn.CASTable(self.valid_tbl)
train_valid_tbl = _combine_table(self.train_tbl, self.valid_tbl)
cur_results = _get_last_obs(train_valid_tbl, self.train_tbl.timeid,
groupby=self.train_tbl.groupby_var)
self.conn.retrieve('table.droptable', _messagelevel='error', name=train_valid_tbl.name)
for i in range(horizon):
if i == 0:
autoregressive_series = self.train_tbl.autoregressive_sequence + [self.train_tbl.target]
else:
autoregressive_series = self.train_tbl.autoregressive_sequence + ['_DL_Pred_']
cur_input = _prepare_next_input(cur_results, timeid=self.train_tbl.timeid,
timeid_interval=self.train_tbl.acc_interval,
autoregressive_series=autoregressive_series,
sequence_opt=self.train_tbl.sequence_opt,
covar_tbl = test_table,
groupby=self.train_tbl.groupby_var)
if i == 0:
self.conn.retrieve('table.droptable', _messagelevel='error', name=cur_results.name)
if cur_input.shape[0] == 0:
raise RuntimeError('Input test data does not have all the required autoregressive ' +
'lag variables that appeared in the training set. ' +
'In this case, it has to have the timestamp that succeeds ' +
'the last time point in training/validation set.')
self.predict(cur_input, layer_out=layer_out, layers=layers,
gpu=gpu, buffer_size=buffer_size,
mini_batch_buf_size=mini_batch_buf_size,
use_best_weights=use_best_weights,
n_threads=n_threads)
self.conn.retrieve('table.droptable', _messagelevel='error', name=cur_input.name)
cur_results = self.valid_res_tbl
if i == 0:
output_tbl = cur_results
if casout is None:
casout={'name': random_name('forecast_output', 6)}
# Use _combine_table here to serve as a renaming
output_tbl = _combine_table(output_tbl, casout=casout)
else:
output_tbl = _combine_table(output_tbl, cur_results, casout=output_tbl)
self.score_message_level = 'note'
return output_tbl
def score(self, table, model=None, init_weights=None, text_parms=None, layer_out=None,
layer_image_type='jpg', layers=None, copy_vars=None, casout=None, gpu=None, buffer_size=10,
mini_batch_buf_size=None, encode_name=False, random_flip='none', random_crop='none', top_probs=None,
random_mutation='none', n_threads=None, has_output_term_ids=False, init_output_embeddings=None,
log_level=None):
"""
Inference of input data with the trained deep learning model
Parameters
----------
table : string or CASTable
Specifies the input data.
model : string or CASTable, optional
Specifies the in-memory table that is the model.
init_weights : string or CASTable, optional
Specifies an in-memory table that contains the model weights.
text_parms : TextParms, optional
Specifies the parameters for the text inputs.
layer_out : string, optional
Specifies the settings for an output table that includes layer
output values. By default, all layers are included. You can
filter the list with the layers parameter.
layer_image_type : string, optional
Specifies the image type to store in the output layers table.
JPG means a compressed image (e.g, jpg, png, and tiff)
WIDE means a pixel per column
Default: jpg
Valid Values: JPG, WIDE
layers : list-of-strings, optional
Specifies the names of the layers to include in the output
layers table.
copy_vars : list-of-strings, optional
Specifies the variables to transfer from the input table to
the output table.
casout :, optional
Specifies the name of the output table.
gpu : GPU, optional
When specified, the action uses graphical processing unit hardware.
The simplest way to use GPU processing is to specify "gpu=1".
In this case, the default values of other GPU parameters are used.
Setting gpu=1 enables all available GPU devices for use. Setting
gpu=0 disables GPU processing.
buffer_size : int, optional
Specifies the number of observations to score in a single
batch. Larger values use more memory.
Default: 10
mini_batch_buf_size : int, optional
Specifies the size of a buffer that is used to save input data
and intermediate calculations. By default, each layer allocates
an input buffer that is equal to the number of input channels
multiplied by the input feature map size multiplied by the
bufferSize value. You can reduce memory usage by specifying a
value that is smaller than the bufferSize. The only disadvantage
to specifying a small value is that run time can increase because
multiple smaller matrices must be multiplied instead of a single
large matrix multiply.
encode_name : bool, optional
Specifies whether encoding the variable names in the generated
casout table such as the predicted probabilities of each
response variable level.
Default: False
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is used.
H stands for horizontal
V stands for vertical
HW stands for horizontal and vertical
Approximately half of the input data is subject to flipping.
Default: NONE
Valid Values: NONE, H, V, HV
random_crop : string, optional
Specifies how to crop the data in the input layer when image
data is used. Images are cropped to the values that are specified
in the width and height parameters. Only the images with one or
both dimensions that are larger than those sizes are cropped.
UNIQUE: specifies to crop images to the size specified in the
height and width parameters. Images that are less than or equal
to the size are not modified. For images that are larger, the
cropping begins at a random offset for x and y.
Default: NONE
Valid Values: NONE, UNIQUE
top_probs : int, optional
Specifies to include the predicted probabilities along with
the corresponding labels in the results. For example, if you
specify 5, then the top 5 predicted probabilities are shown in
the results along with the corresponding labels.
random_mutation : string, optional
Specifies how to mutate images.
Default: NONE
Valid Values: NONE, RANDOM
n_threads : int, optional
Specifies the number of threads to use. If nothing is set then
all of the cores available in the machine(s) will be used.
log_level : int, optional
specifies the reporting level for progress messages sent to the client.
The default level 0 indicates that no messages are sent.
Setting the value to 1 sends start and end messages.
Setting the value to 2 adds the iteration history to the client messaging.
default: 0
Returns
-------
:class:`CASResults`
"""
if self.model_type == 'CNN':
parameters = DLPyDict(table=table, model=model, init_weights=init_weights, text_parms=text_parms,
layer_image_type=layer_image_type, layers=layers, copy_vars=copy_vars, casout=casout,
gpu=gpu, mini_batch_buf_size=mini_batch_buf_size, buffer_size=buffer_size,
layer_out=layer_out, encode_name=encode_name, n_threads=n_threads,
random_flip=random_flip, random_crop=random_crop, top_probs=top_probs,
random_mutation=random_mutation, log_level=log_level)
else:
parameters = DLPyDict(table=table, model=model, init_weights=init_weights, text_parms=text_parms,
layers=layers, copy_vars=copy_vars, casout=casout,
gpu=gpu, mini_batch_buf_size=mini_batch_buf_size, buffer_size=buffer_size,
layer_out=layer_out, encode_name=encode_name, n_threads=n_threads,
random_flip=random_flip, random_crop=random_crop, top_probs=top_probs,
random_mutation=random_mutation, log_level=log_level)
return self._retrieve_('deeplearn.dlscore', message_level=self.score_message_level, **parameters)
def _train_visualize(self, table, attributes=None, inputs=None, nominals=None, texts=None, valid_table=None,
valid_freq=1, model=None, init_weights=None, model_weights=None, target=None,
target_sequence=None, sequence=None, text_parms=None, weight=None, gpu=None, seed=0,
record_seed=None, missing='mean', optimizer=None, target_missing='mean', best_weights=None,
repeat_weight_table=False, force_equal_padding=None, data_specs=None, n_threads=None,
target_order='ascending', x=None, y=None, y_loss=None, total_sample_size=None, e=None,
iter_history=None, freq=None, status=None):
"""
Function that calls the training action enriched with training history visualization.
This is an internal private function and for documentation, please refer to fit() or train()
"""
self._train_visualization()
b_w = None
if best_weights is not None:
b_w = dict(replace=True, name=best_weights)
if optimizer is not None:
optimizer['log_level'] = 3
else:
optimizer=Optimizer(log_level=3)
parameters = DLPyDict(table=table, attributes=attributes, inputs=inputs, nominals=nominals, texts=texts,
valid_table=valid_table, valid_freq=valid_freq, model=model, init_weights=init_weights,
model_weights=model_weights, target=target, target_sequence=target_sequence,
sequence=sequence, text_parms=text_parms, weight=weight, gpu=gpu, seed=seed,
record_seed=record_seed, missing=missing, optimizer=optimizer,
target_missing=target_missing, best_weights=b_w, repeat_weight_table=repeat_weight_table,
force_equal_padding=force_equal_padding, data_specs=data_specs, n_threads=n_threads,
target_order=target_order)
import swat
from ipykernel.comm import Comm
comm = Comm(target_name='%(plot_id)s_comm' % dict(plot_id='foo'))
with swat.option_context(print_messages=False):
self._retrieve_('deeplearn.dltrain', message_level='note',
responsefunc=_pre_parse_results(x, y, y_loss, total_sample_size,
e, comm, iter_history, freq, status),
**parameters)
if status[0] == 0:
self.model_ever_trained = True
return iter_history[0]
else:
return None
def _train_visualization(self):
from IPython.display import display, HTML
display(HTML('''
<canvas id='%(plot_id)s_canvas' style='width: %(plot_width)spx; height: %(plot_height)spx'></canvas>
<script language='javascript'>
<!--
requirejs.config({
paths: {
Chart: ['//cdnjs.cloudflare.com/ajax/libs/Chart.js/2.7.0/Chart.min']
}
});
require(['jquery', 'Chart'], function($, Chart) {
var comm = Jupyter.notebook.kernel.comm_manager.new_comm('%(plot_id)s_comm')
var ctx = document.getElementById('%(plot_id)s_canvas').getContext('2d');
var chart = new Chart(ctx, {
type: 'line',
data: {
labels: [],
datasets: [{
label: 'FitError',
borderColor: '#FF0000',
backgroundColor: '#FF0000',
fill: false,
data: [],
yAxisID: 'y-axis-1'
}, {
label: 'Loss',
borderColor: '#0000FF',
backgroundColor: '#0000FF',
fill: false,
data: [],
yAxisID: 'y-axis-2'
}],
},
options: {
stacked: false,
scales: {
yAxes: [{
type: 'linear',
display: true,
position: 'left',
id: 'y-axis-1',
data: []
}, {
type: 'linear',
display: true,
position: 'right',
id: 'y-axis-2',
data: [],
// grid line settings
gridLines: {
drawOnChartArea: false, // only want the grid lines for one axis to show up
},
}],
}
}
});
Jupyter.notebook.kernel.comm_manager.register_target('%(plot_id)s_comm',
function(comm, msg) {
comm.on_msg(function(msg) {
var data = msg.content.data;
chart.data.labels.push(data.label);
for ( var i = 0; i < chart.data.datasets.length; i++ ) {
chart.data.datasets[i].data.push(data.data[i]);
}
chart.update(0);
})
comm.on_close(function() {
comm.send({'command': 'stop'});
})
// Send message when plot is removed
$.event.special.destroyed = {
remove: function(o) {
if (o.handler) {
o.handler()
}
}
}
$('#%(plot_id)s_canvas').bind('destroyed', function() {
comm.send({'command': 'stop'});
});
}
);
});
//-->
</script>''' % dict(plot_id='foo', plot_width='950', plot_height='400')))
def plot_evaluate_res(self, cas_table=None, img_type='A', image_id=None, filename=None, n_images=5,
target='_label_', predicted_class=None, label_class=None, randomize=False,
seed=-1):
'''
Plot the bar chart of the classification predictions
Parameters
----------
cas_table : CASTable, optional
If None results from model.evaluate are used
Can pass in another table that has the same
prediction column names as in model.valid_res_tbl
img_type : str, optional
Specifies the type of classification results to plot
* A - All type of results
* C - Correctly classified results
* M - Miss classified results
image_id : list or int, optional
Specifies the image by '_id_' column to be displayed
filename : list of strings or string, optional
The name of a file in '_filename_0' or '_path_' if not unique
returns multiple
n_images : int, optional
Number of images to evaluate
target : string, optional
name of column for the correct label
predicted_class : string, optional
Name of desired prediction class to plot results
label_class : string, optional
Actual target label of desired class to plot results
randomize : bool, optional
If true randomize results
seed : int, optional
Random seed used if randomize is true
'''
from .utils import plot_predict_res
# create copy of cas_table so can be dropped after filtering
if not cas_table:
if self.valid_res_tbl:
cas_table = self.valid_res_tbl.partition(casout=dict(name='temp_plot', replace=True))['casTable']
else:
raise DLPyError("Need to run model.evaluate()")
else:
cas_table = cas_table.partition(casout=dict(name='temp_plot', replace=True))['casTable']
if target not in cas_table.columns:
if 'Label' in cas_table.columns:
target = 'Label'
else:
raise DLPyError("target column {} not found in cas_table {}".format(target, cas_table.name))
if 'I__label_' not in cas_table.columns:
raise DLPyError("cas_table must contain prediction column named 'I__lable_'."
"i.e. model.valid_res_tbl can be used after running model.evaluate")
filtered = None
if filename or image_id:
if '_id_' not in cas_table.columns.tolist():
print("'_id_' column not in cas_table, processing complete table")
else:
if filename and image_id:
print(" image_id supersedes filename, image_id being used")
if image_id:
filtered = filter_by_image_id(cas_table, image_id)
elif filename:
filtered = filter_by_filename(cas_table, filename)
if filtered:
if filtered.numrows == 0:
raise DLPyError(" image_id or filename not found in CASTable {}".format(cas_table.name))
self.conn.droptable(cas_table)
cas_table = filtered
if img_type == 'A':
if cas_table.numrows().numrows == 0:
raise DLPyError("No images to plot")
elif img_type == 'C':
cas_table = cas_table[cas_table[target] == cas_table['I__label_']]
cas_table = cas_table.partition(casout=dict(name=cas_table.name, replace=True))['casTable']
if cas_table.numrows().numrows == 0:
raise DLPyError("No correct labels to plot")
elif img_type == 'M':
cas_table = cas_table[cas_table[target] != cas_table['I__label_']]
cas_table.partition(casout=dict(name=cas_table.name, replace=True))['casTable']
if cas_table.numrows().numrows == 0:
raise DLPyError("No misclassified labels to plot")
else:
raise DLPyError('img_type must be one of the following:\n'
'A: for all the images\n'
'C: for correctly classified images\n'
'M: for misclassified images\n')
if label_class:
unique_labels = list(set(cas_table[target].tolist()))
cas_table = cas_table[cas_table['_label_'] == label_class]
cas_table.partition(casout=dict(name=cas_table.name, replace=True))['casTable']
if cas_table.numrows().numrows == 0:
raise DLPyError("There are no labels of {}. The labels consist of {}". \
format(label_class, unique_labels))
if predicted_class:
unique_predictions = list(set(cas_table['I__label_'].tolist()))
cas_table = cas_table[cas_table['I__label_'] == predicted_class]
cas_table.partition(casout=dict(name=cas_table.name, replace=True))['casTable']
if cas_table.numrows().numrows == 0:
raise DLPyError("There are no predicted labels of {}. The predicted labels consist of {}". \
format(predicted_class, unique_predictions))
columns_for_pred = [item for item in cas_table.columns
if item[0:9] == 'P__label_']
if len(columns_for_pred) == 0:
raise DLPyError("Input table has no columns for predictions. "
"Run model.predict the predictions are stored "
"in the attribute model.valid_res_tbl.")
fetch_cols = columns_for_pred + ['_id_']
if randomize:
cas_table.append_computedvars(['random_index'])
cas_table.append_computedvarsprogram('call streaminit({});' 'random_index=''rand("UNIFORM")'.format(seed))
img_table = cas_table.retrieve('image.fetchimages', _messagelevel='error',
table=dict(**cas_table.to_table_params()),
fetchVars=fetch_cols,
sortby='random_index', to=n_images)
else:
img_table = cas_table.retrieve('image.fetchimages', fetchVars=fetch_cols, to=n_images,
sortBy=[{'name': '_id_', 'order': 'ASCENDING'}])
self.conn.droptable(cas_table)
img_table = img_table['Images']
for im_idx in range(len(img_table)):
image = img_table['Image'][im_idx]
label = 'Correct Label for image {} : {}'.format(img_table['_id_'][im_idx], img_table['Label'][im_idx])
labels = [item[9:].title() for item in columns_for_pred]
values = np.asarray(img_table[columns_for_pred].iloc[im_idx])
values, labels = zip(*sorted(zip(values, labels)))
values = values[-5:]
labels = labels[-5:]
labels = [item[:(item.find('__') > 0) * item.find('__') +
(item.find('__') < 0) * len(item)] for item in labels]
labels = [item.replace('_', '\n') for item in labels]
plot_predict_res(image, label, labels, values)
def get_feature_maps(self, data, label=None, idx=0, image_id=None, **kwargs):
"""
Extract the feature maps for a single image
Parameters
----------
data : ImageTable
Specifies the table containing the image data.
label : str, optional
Specifies the which class of image to use.
Default : None
idx : int, optional
Specifies which row index to get feature map
Default : 1
image_id : list or int, optional
Filters data using '_id_' column
**kwargs : keyword arguments, optional
Specifies the optional arguments for the dlScore action.
"""
from .images import ImageTable
if image_id:
filtered = filter_by_image_id(data, image_id)
data = ImageTable.from_table(filtered)
self.conn.droptable(filtered)
try:
uid = data.uid
except:
raise TypeError("The input data should be an ImageTable.")
if label is None:
label = uid.iloc[0, 0]
uid = uid.loc[uid['_label_'] == label]
if len(uid) == 0:
raise DLPyError('No images were found. Please check input '
'table or label name.')
elif idx >= uid.shape[0]:
raise DLPyError('image_id should be an integer between 0'
' and {}.'.format(uid.shape[0] - 1))
uid_value = uid.iloc[idx, 1]
uid_name = uid.columns[1]
input_tbl = input_table_check(data)
feature_maps_tbl = random_name('Feature_Maps') + '_{}'.format(idx)
score_options = dict(model=self.model_table, initWeights=self.model_weights,
table=dict(where='{}="{}"'.format(uid_name,
uid_value), **input_tbl),
layerOut=dict(name=feature_maps_tbl),
randomflip='none',
randomcrop='none',
layerImageType='jpg',
encodeName=True)
score_options.update(kwargs)
self._retrieve_('deeplearn.dlscore', **score_options)
layer_out_jpg = self.conn.CASTable(feature_maps_tbl)
feature_maps_names = [i for i in layer_out_jpg.columninfo().ColumnInfo.Column]
feature_maps_structure = dict()
for feature_map_name in feature_maps_names:
feature_maps_structure[int(feature_map_name.split('_')[2])] = \
int(feature_map_name.split('_')[4]) + 1
self.feature_maps = FeatureMaps(self.conn, feature_maps_tbl,
structure=feature_maps_structure)
def get_features(self, data, dense_layer, target='_label_', **kwargs):
"""
Extract linear features for a data table from the layer specified by dense_layer
Parameters
----------
data : CASTable or string or dict
Specifies the table containing the image data
dense_layer : string
Specifies the name of the layer that is extracted
target : string, optional
Specifies the name of the column including the response variable
**kwargs : keyword arguments, optional
Specifies the optional arguments for the dlScore action.
Returns
-------
( nxp-ndarray, n-ndarray )
The first ndarray is of size n by p, where n is the sample size
and p is the number of features. The features extracted by the
model at the specified dense_layer. The second ndarray is of
size n and contains the response variable of the original data.
"""
input_tbl_opts = input_table_check(data)
input_table = self.conn.CASTable(**input_tbl_opts)
if target not in input_table.columns.tolist():
raise DLPyError('Column name "{}" not found in the data table.'.format(target))
feature_tbl = random_name('Features')
score_options = dict(model=self.model_table, initWeights=self.model_weights,
table=dict(**input_tbl_opts),
layerOut=dict(name=feature_tbl),
layerList=dense_layer,
layerImageType='wide',
randomflip='none',
randomcrop='none',
encodeName=True)
score_options.update(kwargs)
self._retrieve_('deeplearn.dlscore', **score_options)
x = self.conn.CASTable(feature_tbl).as_matrix()
y = self.conn.CASTable(**input_tbl_opts)[target].as_matrix().ravel()
return x, y
def heat_map_analysis(self, data=None, mask_width=None, mask_height=None, step_size=None,
display=True, img_type='A', image_id=None, filename=None, inputs="_image_",
target="_label_", max_display=5, **kwargs):
"""
Conduct a heat map analysis on table of images
Parameters
----------
data : ImageTable, optional
If data is None then the results from model.predict are used.
data specifies the table containing the image data which must contain
the columns '_image_', '_label_', '_id_' and '_filename_0'.
mask_width : int, optional
Specifies the width of the mask which cover the region of the image.
mask_height : int, optional
Specifies the height of the mask which cover the region of the image.
step_size : int, optional
Specifies the step size of the movement of the the mask.
display : bool, optional
Specifies whether to display the results.
img_type : string, optional
Can be 'A' for all images, 'C' for only correctly classified images, or
'M' for misclassified images.
image_id : list or int, optional
A unique image id to get the heatmap. A standard column of ImageTable
filename : list of strings or string, optional
The name of a file in '_filename_0' if not unique returns multiple
inputs : string, optional
Name of image column for the input into the model.predict function
target : string, optional
Name of column for the correct label
max_display : int, optional
Maximum number of images to display. Heatmap takes a significant amount
of time to run so a max of 5 is default.
**kwargs : keyword arguments, optional
Specifies the optional arguments for the dlScore action.
Notes
-----
Heat map indicates the important region related with classification.
Details of the process can be found at: https://arxiv.org/pdf/1311.2901.pdf.
Returns
-------
:class:`pandas.DataFrame`
Contains Columns: ['I__label_', 'P__label_(for each label)', '_filename_0',
'_id_', '_image_', '_label_', 'heat_map']
"""
def get_predictions(data=data, inputs=inputs, target=target, kwargs=kwargs):
input_tbl_opts = input_table_check(data)
input_table = self.conn.CASTable(**input_tbl_opts)
if target not in input_table.columns.tolist():
raise DLPyError('Column name "{}" not found in the data table.'.format(target))
if inputs not in input_table.columns.tolist():
raise DLPyError('Column name "{}" not found in the data table.'.format(inputs))
input_table = self.conn.CASTable(**input_tbl_opts)
input_table = ImageTable.from_table(input_table)
copy_vars = input_table.columns.tolist()
valid_res_tbl_com = random_name('Valid_Res_Complete')
dlscore_options_com = dict(model=self.model_table, initweights=self.model_weights,
table=input_table,
copyvars=copy_vars,
randomflip='none',
randomcrop='none',
casout=dict(replace=True, name=valid_res_tbl_com),
encodename=True)
try:
kwargs = unify_keys(kwargs)
except:
pass
dlscore_options_com.update(kwargs)
self._retrieve_('deeplearn.dlscore', **dlscore_options_com)
return self.conn.CASTable(valid_res_tbl_com)
from .images import ImageTable
run_predict = True
if data is None and self.valid_res_tbl is None:
raise ValueError('No input data and model.predict() has not been run')
elif data is None:
print("Using results from model.predict()")
data = self.valid_res_tbl
run_predict = False
elif data.shape[0] == 0:
raise ValueError('Input table is empty.')
data = data.partition(casout=dict(name='temp_anotated', replace=True))['casTable']
im_summary = data._retrieve('image.summarizeimages')['Summary']
output_width = int(im_summary.minWidth)
output_height = int(im_summary.minHeight)
if (int(im_summary.maxWidth) != output_width) or \
(int(im_summary.maxHeight) != output_height):
raise ValueError('Input images must have same size.')
if (mask_width is None) and (mask_height is None):
mask_width = max(int(output_width / 4), 1)
mask_height = max(int(output_height / 4), 1)
if mask_width is None:
mask_width = mask_height
if mask_height is None:
mask_height = mask_width
if step_size is None:
step_size = max(int(mask_width / 4), 1)
copy_vars = ImageTable.from_table(data).columns.tolist()
masked_image_table = random_name('MASKED_IMG')
blocksize = image_blocksize(output_width, output_height)
filtered = None
if filename or image_id:
print(" filtering by filename or _id_ ")
if '_id_' not in data.columns.tolist():
print("'_id_' column not in cas_table, processing complete table")
else:
if filename and image_id:
print(" image_id supersedes filename, image_id being used")
if image_id:
filtered = filter_by_image_id(data, image_id)
elif filename:
filtered = filter_by_filename(data, filename)
if filtered:
self.conn.droptable(data)
data = filtered
if run_predict:
print("Running prediction ...")
data = get_predictions(data)
print("... finished running prediction")
table_vars = data.columns.tolist()
if 'I__label_' in table_vars and img_type == 'C':
data_temp = data[data['_label_'] == data['I__label_']]
if data_temp.numrows().numrows != 0:
data = data_temp
else:
raise ValueError('No Correct Labels to Heatmap')
elif 'I__label_' in table_vars and img_type == 'M':
data_temp = data[data['_label_'] != data['I__label_']]
if data_temp.numrows().numrows != 0:
data = data_temp
else:
raise ValueError('No Misclassified Data to Heatmap')
if data.numrows().numrows > max_display:
print('NOTE: The number of images in the table is too large,'
' only {} randomly selected images are used in analysis.'.format(max_display))
te_rate = max_display / data.numrows().numrows * 100
if not self.conn.queryactionset('sampling')['sampling']:
self.conn.loadactionset('sampling', _messagelevel='error')
sample_tbl = random_name('SAMPLE_TBL')
self._retrieve_('sampling.srs',
table=data.to_table_params(),
output=dict(casout=dict(replace=True, name=sample_tbl,
blocksize=blocksize), copyvars='all'),
samppct=te_rate)
data= self.conn.CASTable(sample_tbl)
self._retrieve_('image.augmentimages',
table=data.to_table_params(),
copyvars=copy_vars,
casout=dict(replace=True, name=masked_image_table,
blocksize=blocksize),
cropList=[dict(sweepImage=True, x=0, y=0,
width=mask_width, height=mask_height,
stepsize=step_size,
outputwidth=output_width,
outputheight=output_height,
mask=True)])
masked_image_table = self.conn.CASTable(masked_image_table)
copy_vars = masked_image_table.columns.tolist()
copy_vars.remove('_image_')
valid_res_tbl = random_name('Valid_Res')
dlscore_options = dict(model=self.model_table, initWeights=self.model_weights,
table=masked_image_table,
copyVars=copy_vars,
randomflip='none',
randomcrop='none',
casout=dict(replace=True, name=valid_res_tbl),
encodeName=True)
dlscore_options.update(kwargs)
self._retrieve_('deeplearn.dlscore', **dlscore_options)
valid_res_tbl = self.conn.CASTable(valid_res_tbl)
temp_table = valid_res_tbl.to_frame()
image_id_list = temp_table['_parentId_'].unique().tolist()
n_masks = len(temp_table['_id_'].unique())
prob_tensor = np.empty((output_height, output_width, n_masks))
prob_tensor[:] = np.nan
model_explain_table = dict()
count_for_subject = dict()
for name in image_id_list:
model_explain_table.update({'{}'.format(name): prob_tensor.copy()})
count_for_subject.update({'{}'.format(name): 0})
for row in temp_table.iterrows():
row = row[1]
name = str(row['_parentId_'])
x = int(row['x'])
y = int(row['y'])
x_step = int(row['width'])
y_step = int(row['height'])
true_class = row['_label_'].replace(' ', '_')
true_pred_prob_col = 'P__label_' + true_class
prob = row[true_pred_prob_col]
model_explain_table[name][y:min(y + y_step, output_height), x:min(x + x_step, output_width), count_for_subject[name]] = prob
count_for_subject[name] += 1
original_image_table = data.fetchimages(fetchVars=data.columns.tolist(),
to=data.numrows().numrows).Images
prob_cols = []
for col in data.columns:
if 'P__label' in col:
prob_cols.append(col)
output_table = []
for id_num in model_explain_table.keys():
temp_dict = dict()
temp_dict.update({'_id_': id_num})
index = original_image_table['_id_'] == int(id_num)
temp_dict.update({
'_filename_0': original_image_table['_filename_0'][index].tolist()[0],
'_image_': original_image_table['Image'][index].tolist()[0],
'_label_': original_image_table['Label'][index].tolist()[0],
'I__label_': original_image_table['I__label_'][index].tolist()[0],
'heat_map': np.nanmean(model_explain_table[id_num], axis=2)
})
index2 = data['_id_'] == id_num
for col_name in prob_cols:
temp_dict.update({'{}'.format(col_name): data[col_name][index2].tolist()[0]})
output_table.append(temp_dict)
self._retrieve_('table.droptable', name=masked_image_table)
self._retrieve_('table.droptable', name=valid_res_tbl)
output_table = pd.DataFrame(output_table)
self.model_explain_table = output_table
if display:
n_images = output_table.shape[0]
if n_images > max_display:
print('NOTE: Only the results from the first {} images are displayed.'.format(max_display))
n_images = max_display
fig, axs = plt.subplots(ncols=3, nrows=n_images, figsize=(12, 4 * n_images))
if n_images == 1:
axs = [axs]
for im_idx in range(n_images):
label = output_table['_label_'][im_idx]
pred_label = output_table['I__label_'][im_idx]
id_num = output_table['_id_'][im_idx]
filename = output_table['_filename_0'][im_idx]
img = output_table['_image_'][im_idx]
heat_map = output_table['heat_map'][im_idx]
img_size = heat_map.shape
extent = [0, img_size[0], 0, img_size[1]]
vmin = heat_map.min()
vmax = heat_map.max()
axs[im_idx][0].imshow(img, extent=extent)
axs[im_idx][0].axis('off')
axs[im_idx][0].set_title('Original Image: {}'.format(label))
color_bar = axs[im_idx][2].imshow(heat_map, vmax=vmax, vmin=vmin,
interpolation='none',
extent=extent, cmap='jet_r')
axs[im_idx][2].axis('off')
axs[im_idx][2].set_title('Heat Map')
axs[im_idx][1].imshow(img, extent=extent)
axs[im_idx][1].imshow(heat_map, vmax=vmax, vmin=vmin,
interpolation='none', alpha=0.5,
extent=extent, cmap='jet_r')
axs[im_idx][1].axis('off')
axs[im_idx][1].set_title('Overlayed Image')
box = axs[im_idx][2].get_position()
ax3 = fig.add_axes([box.x1 * 1.02, box.y0 + box.height * 0.06,
box.width * 0.05, box.height * 0.88])
plt.colorbar(color_bar, cax=ax3)
left, width = .0, 1.0
bottom, height = -.14, .2
top = bottom + height
output_str = 'Predicted Label: {}'.format(pred_label)
output_str += ', filename: {}'.format(filename)
output_str += ', image_id: {},'.format(id_num)
axs[im_idx][0].text(left, 0.5 * (bottom + top), output_str,
horizontalalignment='left',
verticalalignment='center',
fontsize=14, color='black',
transform=axs[im_idx][0].transAxes)
plt.show()
self.conn.droptable(data)
return output_table
def plot_heat_map(self, idx=0, alpha=.2):
"""
Display the heat maps analysis results
Displays plot of three images: original, overlayed image and heat map,
from left to right.
Parameters
----------
idx : int, optional
Specifies the image to be displayed, starting from 0.
alpha : double, optional
Specifies transparent ratio of the heat map in the overlayed image.
Must be a numeric between 0 and 1.
"""
label = self.model_explain_table['_label_'][idx]
img = self.model_explain_table['_image_'][idx]
heat_map = self.model_explain_table['heat_map'][idx]
img_size = heat_map.shape
extent = [0, img_size[0], 0, img_size[1]]
vmin = heat_map.min()
vmax = heat_map.max()
fig, (ax0, ax2, ax1) = plt.subplots(ncols=3, figsize=(12, 4))
ax0.imshow(img, extent=extent)
ax0.axis('off')
ax0.set_title('Original Image: {}'.format(label))
color_bar = ax1.imshow(heat_map, vmax=vmax, vmin=vmin,
interpolation='none', extent=extent, cmap='jet_r')
ax1.axis('off')
ax1.set_title('Heat Map')
ax2.imshow(img, extent=extent)
ax2.imshow(heat_map, vmax=vmax, vmin=vmin, interpolation='none',
alpha=alpha, extent=extent, cmap='jet_r')
ax2.axis('off')
ax2.set_title('Overlayed Image')
box = ax1.get_position()
ax3 = fig.add_axes([box.x1 * 1.02, box.y0 + box.height * 0.06,
box.width * 0.05, box.height * 0.88])
plt.colorbar(color_bar, cax=ax3)
plt.show()
class FeatureMaps(object):
'''
Feature Maps object
Parameters
----------
conn : CAS
Specifies the CAS connection object
feature_maps_tbl : CAS table
Specifies the CAS table to store the feature maps.
structure : dict, optional
Specifies the structure of the feature maps.
Returns
-------
:class:`FeatureMaps`
'''
def __init__(self, conn, feature_maps_tbl, structure=None):
self.conn = conn
self.tbl = feature_maps_tbl
self.structure = structure
def display(self, layer_id, filter_id=None):
'''
Display the feature maps
Parameters
----------
layer_id : int
Specifies the id of the layer to be displayed.
filter_id : list-of-ints, optional
Specifies the filters to be displayed.
Default: None
'''
if filter_id is None:
n_images = self.structure[layer_id]
filter_id = list(range(n_images))
if len(filter_id) > 64:
filter_id = filter_id[0:64]
print('NOTE: The maximum number of filters to be displayed is 64.\n'
'NOTE: Only the first 64 filters are displayed.')
n_images = len(filter_id)
n_col = min(n_images, 8)
n_row = int(np.ceil(n_images / n_col))
fig = plt.figure(figsize=(16, 16 // n_col * n_row))
title = 'Activation Maps for Layer_{}'.format(layer_id)
if layer_id == 0:
image = []
for i in range(3):
col_name = '_LayerAct_{}_IMG_{}_'.format(layer_id, i)
temp = self.conn.retrieve('image.fetchimages', _messagelevel='error',
table=self.tbl,
image=col_name).Images.Image[0]
image.append(np.asarray(temp))
image = np.dstack((image[2], image[1], image[0]))
plt.imshow(image)
plt.xticks([]), plt.yticks([])
else:
for i in range(n_images):
filter_num = filter_id[i]
col_name = '_LayerAct_{}_IMG_{}_'.format(layer_id, filter_num)
image = self.conn.retrieve('image.fetchimages', _messagelevel='error',
table=self.tbl,
image=col_name).Images.Image[0]
image = np.asarray(image)
fig.add_subplot(n_row, n_col, i + 1)
plt.imshow(image, cmap='gray')
plt.xticks([]), plt.yticks([])
plt.title('Filter {}'.format(filter_num))
plt.suptitle(title, fontsize=20)
plt.tight_layout(pad=2.5, rect=[0, 0.03, 1, 0.95])
plt.show()
class Solver(DLPyDict):
'''
Solver object
Parameters
----------
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously with
multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are less
than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches one
of the specified steps, the learning rate is multiplied by the value
of the gamma parameter. For example, if you specify {5, 9, 13}, then
the learning rate is multiplied by gamma after the fifth, ninth, and
thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`Solver`
'''
def __init__(self, learning_rate=0.001, learning_rate_policy='fixed', gamma=0.1, step_size=10, power=0.75,
use_locking=True, clip_grad_max=None, clip_grad_min=None, steps=None,
fcmp_learning_rate=None, lr_scheduler=None):
DLPyDict.__init__(self, learning_rate=learning_rate, learning_rate_policy=learning_rate_policy, gamma=gamma,
step_size=step_size, power=power, use_locking=use_locking, clip_grad_max=clip_grad_max,
clip_grad_min=clip_grad_min, steps=steps, fcmp_learning_rate=fcmp_learning_rate)
# lr_scheduler default as None and if it is specified, it will overwrite lr option in _solver
if lr_scheduler is not None:
if not isinstance(lr_scheduler, _LRScheduler):
raise TypeError('{} is not an LRScheduler'.format(type(lr_scheduler).__name__))
if lr_scheduler.get('fcmp_learning_rate'):
self.pop('learning_rate_policy', 0)
args_wrapped_in_lr_scheduler = ['learning_rate', 'learning_rate_policy', 'gamma', 'step_size',
'power', 'steps', 'fcmp_learning_rate']
not_none_args = [i for i in args_wrapped_in_lr_scheduler if self.get(i) is not None]
if len(not_none_args) > 0:
print('The following argument(s) {} are overwritten by the according arguments '
'specified in lr_scheduler.'.format(', '.join(not_none_args)))
for key, value in lr_scheduler.items():
self.__setitem__(key, value)
def set_method(self, method):
'''
Sets the solver method in the parameters list.
Parameters
----------
method : string
Specifies the type of the solver method.
Possible values: ['vanilla', 'momentum', 'adam', 'lbfg', 'natgrad']
'''
self.add_parameter('method', method)
def add_parameter(self, key, value):
'''
Adds a parameter to the parameter list of a solver.
Parameters
---------
key : string
Specifies the name of the parameter to be added to the list
value : string
Specifies the actual values of the parameter to be added to the list
'''
self.__setitem__(key, value)
def __str__(self):
return super().__str__()
class VanillaSolver(Solver):
'''
Vanilla solver object
Parameters
----------
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously with
multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are less
than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches
one of the specified steps, the learning rate is multiplied by the
value of the gamma parameter. For example, if you specify {5, 9, 13},
then the learning rate is multiplied by gamma after the fifth, ninth,
and thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`VanillaSolver`
'''
def __init__(self, learning_rate=0.001, learning_rate_policy='fixed', gamma=0.1, step_size=10, power=0.75,
use_locking=True, clip_grad_max=None, clip_grad_min=None, steps=None,
fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('vanilla')
class MomentumSolver(Solver):
'''
Momentum solver object
Parameters
-----------
momentum : double, optional
Specifies the momentum for stochastic gradient descent.
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously
with multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are
less than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches
one of the specified steps, the learning rate is multiplied by the
value of the gamma parameter. For example, if you specify {5, 9, 13},
then the learning rate is multiplied by gamma after the fifth,
ninth, and thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`MomentumSolver`
'''
def __init__(self, momentum=0.9, learning_rate=0.001, learning_rate_policy='fixed', gamma=0.1, step_size=10,
power=0.75, use_locking=True, clip_grad_max=None, clip_grad_min=None, steps=None,
fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('momentum')
self.add_parameter('momentum', momentum)
class AdamSolver(Solver):
'''
Adam solver object
Parameters
----------
beta1 : double, optional
Specifies the exponential decay rate for the first moment in
the Adam learning algorithm.
beta2 : double, optional
Specifies the exponential decay rate for the second moment in
the Adam learning algorithm.
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size: int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously with
multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are less
than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches
one of the specified steps, the learning rate is multiplied by the
value of the gamma parameter. For example, if you specify {5, 9, 13},
then the learning rate is multiplied by gamma after the fifth, ninth,
and thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`AdamSolver`
'''
def __init__(self, beta1=0.9, beta2=0.999, learning_rate=0.001, learning_rate_policy='fixed', gamma=0.1,
step_size=10, power=0.75, use_locking=True, clip_grad_max=None, clip_grad_min=None, steps=None,
fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('adam')
self.add_parameter('beta1', beta1)
self.add_parameter('beta2', beta2)
class LBFGSolver(Solver):
'''
LBFG solver object
Parameters
----------
m : int
Specifies the number of corrections used in the L-BFGS update.
max_line_search_iters : int
Specifies the maximum number of line search iterations for
L-BFGS solver.
max_iters : int
Specifies the maximum number of iterations for the L-BFGS solver.
When the miniBatchSize option is not specified, each iteration
goes through at least one epoch. When the miniBatchSize option is
specified, each L-BFGS iteration processes one mini-batch.
The L-BFGS solver stops when the iteration number reaches the value
of the maxIters= option or the epoch number reaches the value of
the maxEpochs= option.
backtrack_ratio : double
Specifies the backtrack ratio of line search iterations for L-BFGS solver.
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously with
multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are less
than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches one
of the specified steps, the learning rate is multiplied by the value
of the gamma parameter. For example, if you specify {5, 9, 13}, then
the learning rate is multiplied by gamma after the fifth, ninth, and
thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`LBFGSolver`
'''
def __init__(self, m, max_line_search_iters, max_iters, backtrack_ratio, learning_rate=0.001,
learning_rate_policy='fixed', gamma=0.1, step_size=10, power=0.75, use_locking=True,
clip_grad_max=None, clip_grad_min=None, steps=None, fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('lbfg')
self.add_parameters('m', m)
self.add_parameters('maxlinesearchiters', max_line_search_iters)
self.add_parameters('maxiters', max_iters)
self.add_parameters('backtrackratio', backtrack_ratio)
class NatGradSolver(Solver):
'''
Natural gradient solver object
Parameters
----------
approximation_type : int, optional
Specifies the approximate natural gradient type.
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously with
multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are less
than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches one
of the specified steps, the learning rate is multiplied by the value
of the gamma parameter. For example, if you specify {5, 9, 13}, then
the learning rate is multiplied by gamma after the fifth, ninth, and
thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`NatGradSolver`
'''
def __init__(self, approximation_type=1, learning_rate=0.001, learning_rate_policy='fixed', gamma=0.1,
step_size=10, power=0.75, use_locking=True, clip_grad_max=None, clip_grad_min=None, steps=None,
fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('natgrad')
self.add_parameter('approximationtype', approximation_type)
class Optimizer(DLPyDict):
'''
Optimizer object
Parameters
----------
algorithm : Algorithm, optional
Specifies the deep learning algorithm.
mini_batch_size : int, optional
Specifies the number of observations per thread in a mini-batch.
You can use this parameter to control the number of observations
that the action uses on each worker for each thread to compute
the gradient prior to updating the weights. Larger values use more
memory. When synchronous SGD is used (the default), the total
mini-batch size is equal to miniBatchSize * number of threads *
number of workers. When asynchronous SGD is used (by specifying
the elasticSyncFreq parameter), each worker trains its own local
model. In this case, the total mini-batch size for each worker is
miniBatchSize * number of threads.
seed : double, optional
Specifies the random number seed for the random number generator
in SGD. The default value, 0, and negative values indicate to use
random number streams based on the computer clock. Specify a value
that is greater than 0 for a reproducible random number sequence.
max_epochs : int, optional
Specifies the maximum number of epochs. For SGD with a single-machine
server or a session that uses one worker on a distributed server,
one epoch is reached when the action passes through the data one time.
For a session that uses more than one worker, one epoch is reached
when all the workers exchange the weights with the controller one time.
The syncFreq parameter specifies the number of times each worker
passes through the data before exchanging weights with the controller.
For L-BFGS with full batch, each L-BFGS iteration might process more
than one epoch, and final number of epochs might exceed the maximum
number of epochs.
reg_l1 : double, optional
Specifies the weight for the L1 regularization term. By default,
L1 regularization is not performed and a value of 0 also disables the
regularization. Begin with small values such as 1e-6. L1 regularization
can be combined with L2 regularization.
reg_l2 : double, optional
Specifies the weight for the L2 regularization term. By default,
L2 regularization is not performed and a value of 0 also disables the
regularization. Begin with small
values such as 1e-3. L1 regularization can be combined with
L2 regularization.
dropout : double, optional
Specifies the probability that the output of a neuron in a fully
connected layer will be set to zero during training. The specified
probability is recalculated each time an observation is processed.
dropout_input : double, optional
Specifies the probability that an input variable will be set to zero
during training. The specified probability is recalculated each time
an observation is processed.
dropout_type : string, optional
Specifies what type of dropout to use.
Valid Values: STANDARD, INVERTED
Default: STANDARD
stagnation : int, optional
Specifies the number of successive iterations without improvement
before stopping the optimization early. When the validTable parameter
is not specified, the loss error is monitored for stagnation. When
the validTable parameter is specified, the validation scores are
monitored for stagnation.
threshold : double, optional
Specifies the threshold that is used to determine whether the loss
error or validation score is improving or is stagnating. When
abs(current_score - previous_score) <= abs(current_score)*threshold,
the current iteration does not improve the optimization and the
stagnation counter is incremented. Otherwise, the stagnation counter
is set to zero.
f_conv : double, optional
Specifies the relative function convergence criterion. If the relative
loss error abs(previous_loss - current_loss) / abs(previous_loss) does
not result in a change in the objective function, then the optimization
is stopped. By default, the relative function convergence is not checked.
snapshot_freq : int, optional
Specifies the frequency for generating snapshots of the neural weights
and storing the weights in a weight table during the training process.
When asynchronous SGD is used, the action synchronizes all the weights
before writing out the weights.
log_level : int, optional
Specifies how progress messages are sent to the client. The default
value, 0, indicates that no messages are sent. Specify 1 to receive
start and end messages. Specify 2 to include the iteration history.
bn_src_layer_warnings : bool, optional
Turns warning on or off, if batch normalization source layer has
an atypical type, activation, or include_bias setting. Default: False
freeze_layers_to : string
Specifies a layer name to freeze this layer and all the layers before
this layer.
total_mini_batch_size : int, optional
specifies the number of observations in a mini-batch. You can use
this parameter to control the number of observations that the action
uses to compute the gradient prior to updating the weights. Larger
values use more memory. If the specified size cannot be evenly divided
by the number of threads (if using asynchronous SGD), or the number of
threads * number of workers (if using synchronous SGD), then the action
will terminate with an error unless the round parameter was specified
to be TRUE, in which case, the total mini-batch size will be rounded
up so that it will be evenly divided.
flush_weights : bool, optional
Specifies whether flush the weight table to the disk.
Default: False
mini_batch_buf_size : int, optional
specifies the size of a buffer that is used to save input data and
intermediate calculations. By default, each layer allocates an input
buffer that is equal to the number of input channels multiplied by
the input feature map size multiplied by the bufferSize value. You
can reduce memory usage by specifying a value that is smaller than
the bufferSize. The only disadvantage to specifying a small value is
that run time can increase because multiple smaller matrices must be
multiplied instead of a single large matrix multiply.
freeze_layers_to : string
Specifies a layer name to freeze this layer and all the layers before
this layer.
freeze_batch_norm_stats : Boolean
When set to True, freezes the statistics of all batch normalization layers.
Default : False
freeze_layers : list of string
Specifies a list of layer names whose trainable parameters will be frozen.
Returns
-------
:class:`Optimizer`
'''
def __init__(self, algorithm=VanillaSolver(), mini_batch_size=1, seed=0, max_epochs=1, reg_l1=0, reg_l2=0,
dropout=0, dropout_input=0, dropout_type='standard', stagnation=0, threshold=0.00000001, f_conv=0,
snapshot_freq=0, log_level=0, bn_src_layer_warnings=True, freeze_layers_to=None, flush_weights=False,
total_mini_batch_size=None, mini_batch_buf_size=None,
freeze_layers=None, freeze_batch_norm_stats=False):
DLPyDict.__init__(self, algorithm=algorithm, mini_batch_size=mini_batch_size, seed=seed, max_epochs=max_epochs,
reg_l1=reg_l1, reg_l2=reg_l2, dropout=dropout, dropout_input=dropout_input,
dropout_type=dropout_type, stagnation=stagnation, threshold=threshold, f_conv=f_conv,
snapshot_freq=snapshot_freq, log_level=log_level,
bn_src_layer_warnings=bn_src_layer_warnings, freeze_layers_to=freeze_layers_to,
flush_weights=flush_weights, total_mini_batch_size=total_mini_batch_size,
mini_batch_buf_size=mini_batch_buf_size,
freeze_layers=freeze_layers, freeze_batch_norm_stats=freeze_batch_norm_stats)
def add_optimizer_mode(self, solver_mode_type='sync', sync_freq=None, alpha=None, damping=None):
'''
Sets the mode of the solver.
Parameters
----------
solver_mode_type : string
Specifies the mode of the solver.
sync_freq : int
Specifies the synchronization frequency
This parameter has different details for different solver types:
For solver_mode_type='sync' and 'downpour'
specifies the synchronization frequency for SGD in terms of epochs. Set this value to
0 to use asynchronous SGD.
For solver_mode_type='elastic'
Specifies the frequency for communication between the workers and controller for exchanging weights.
You can exchange weights more often than once each epoch by setting a value that is less than the number of
batches in an epoch. If this value is greater than the number of batches in an epoch, then the weights
are exchanged once for each epoch.
alpha : double
This parameter should be set only when solver_mode_type='elastic'.
Specifies the significance level that is used for elastic SGD. When each worker exchanges weights with
the controller, this value is used to adjust the weights.
damping : double
This parameter should be set only when solver_mode_type='elastic'.
Specifies the damping factor that is used with asynchronous SGD. When each worker exchanges the weights
with the controller, the weights are combined with this damping factor.
'''
mode = {}
if solver_mode_type == 'downpour':
mode['type'] = 'downpour'
elif solver_mode_type == 'elastic':
mode['type'] = 'elastic'
if alpha is None:
mode['alpha'] = 0
else:
mode['alpha'] = alpha
if sync_freq is None:
mode['syncfreq'] = 0
else:
mode['syncfreq'] = sync_freq
if damping is None:
mode['damping'] = 0.1
else:
mode['damping'] = damping
else:
mode['type'] = 'synchronous'
if sync_freq is None:
mode['syncfreq'] = 1
else:
mode['syncfreq'] = sync_freq
self.__setitem__('mode', mode)
class TextParms(DLPyDict):
'''
Text parameters object
Parameters
----------
init_input_embeddings : string or CASTable, optional
specifies an in-memory table that contains the word embeddings.
By default, the first column is expected to be the terms and
the rest of the columns are the embedded content.
init_output_embeddings : string or CASTable, optional
specifies an in-memory table that contains the word embeddings.
By default, the first column is expected to be the terms and
the rest of the columns are the embedded content.
has_input_term_ids : bool, optional
Specifies whether the second column of the initial input embedding
table contains term IDs.
has_output_term_ids : bool, optional
Specifies whether the second column of the initial output embedding
table contains term IDs.
model_output_embeddings : string or CASTable, optional
Specifies the output embeddings model table.
language : string, optional
Specifies the language for text tokenization.
Valid Values: ENGLISH, GERMAN, FRENCH, SPANISH, CHINESE, DUTCH,
FINNISH, ITALIAN, KOREAN, PORTUGUESE, RUSSIAN, TURKISH, JAPANESE,
POLISH, NORWEGIAN, ARABIC, CZECH, DANISH, INDONESIAN, SWEDISH,
GREEK, SLOVAK, HEBREW, THAI, VIETNAMESE, SLOVENE, CROATIAN,
TAGALOG, FARSI, HINDI, HUNGARIAN, ROMANIAN
default: ENGLISH
Returns
-------
:class:`TextParms`
'''
def __init__(self, init_input_embeddings=None, init_output_embeddings=None, has_input_term_ids=False,
has_output_term_ids=False, model_output_embeddings=None, language='english'):
DLPyDict.__init__(self, init_input_embeddings=init_input_embeddings,
init_output_embeddings=init_output_embeddings,
has_input_term_ids=has_input_term_ids,
has_output_term_ids=has_output_term_ids,
model_output_embeddings=model_output_embeddings,
language=language)
class Sequence(DLPyDict):
'''
Sequence parameters object
Parameters
----------
input_length : string, optional
This should be a column in the input table.
Specifies the variable that stores the input sequence length
(number of tokens) of the row.
target_length : string, optional
This should a column / variable in the input table.
Specifies the variable that stores the target sequence length
(number of tokens) of the row.
token_size : int, optional
Specifies the number of variables that compose one token for
sequence input data.
Returns
-------
:class:`Sequence`
'''
def __init__(self, input_length=None, target_length=None, token_size=1):
DLPyDict.__init__(self, input_length=input_length, target_length=target_length, token_size=token_size)
class Gpu(DLPyDict):
'''
Gpu parameters object.
Parameters
----------
devices : list-of-ints, optional
Specifies a list of GPU devices to be used.
use_tensor_rt : bool, optional
Enables using TensorRT for fast inference.
Default: False.
precision : string, optional
Specifies the experimental option to incorporate lower computational
precision in forward-backward computations to potentially engage tensor cores.
Valid Values: FP32, FP16
Default: FP32
use_exclusive : bool, optional
Specifies exclusive use of GPU devices.
Default: False
Returns
-------
:class:`Gpu`
'''
def __init__(self, devices=None, use_tensor_rt=False, precision='fp32', use_exclusive=False):
DLPyDict.__init__(self, devices=devices, use_tensor_rt=use_tensor_rt, precision=precision,
use_exclusive=use_exclusive)
class DataSpecNumNomOpts(DLPyDict):
"""
Data spec numeric nominal parameters.
Parameters
----------
length : string, optional
Specifies the variable / column that contains the length of the
data spec input.
token_size : int, optional
If positive, data is treated as sequence, else non-sequence
Returns
-------
:class:`DataSpecNumNomOpts`
"""
def __init__(self, length, token_size=0):
DLPyDict.__init__(self, length=length, token_size=token_size)
class DataSpec(DLPyDict):
"""
Data spec parameters.
Parameters
-----------
type_ : string
Specifies the type of the input data in the data spec.
Valid Values: NUMERICNOMINAL, NUMNOM, TEXT, IMAGE, OBJECTDETECTION
layer : string
Specifies the name of the layer to data spec.
data : list, optional
Specifies the name of the columns/variables as the data, this might
be input or output based on layer type.
data_layer : string, optional
Specifies the name of the input layer that binds to the output layer.
nominals : list, optional
Specifies the nominal input variables to use in the analysis.
numeric_nominal_parms : :class:`DataSpecNumNomOpts`, optional
Specifies the parameters for the numeric nominal data spec inputs.
loss_scale_factor : double, optional
Specifies the value to scale the loss for a given task layer. This option only affects the task layers.
Returns
-------
:class:`DataSpec`
A dictionary of data spec parameters.
"""
def __init__(self, type_, layer, data=None, data_layer=None, nominals=None, numeric_nominal_parms=None,
loss_scale_factor=None):
DLPyDict.__init__(self, type=type_, layer=layer, data=data, data_layer=data_layer, nominals=nominals,
numeric_nominal_parms=numeric_nominal_parms, loss_scale_factor=loss_scale_factor)
def _pre_parse_results(x, y, y_loss, total_sample_size, e, comm, iter_history, freq, status):
def parse_results(response, connection, userdata):
if len(response.messages) == 0:
for key, value in response:
if key == 'OptIterHistory':
iter_history.append(value)
elif len(response.messages) == 1:
line = response.messages[0].split(" ")
numbers=[]
for l in line:
if len(l.strip()) > 0 and l.strip().replace('.','',1).isdigit():
numbers.append( float(l.strip()))
#print(line)
if len(numbers) == 5:
t = 1 # this is for when epoch history hits
#print('geldi')
# TODO: do something with epoch values, maybe another graph
elif len(numbers) >= 6:
batch_id = numbers[0]
sample_size = numbers[1]
learning_rate = numbers[2]
loss = numbers[3]
fit_error = numbers[4]
le = len(x)
if le == 0:
y.append( fit_error)
y_loss.append( loss)
x.append( len(x))
total_sample_size.append( sample_size)
else:
temp = (y[-1]*total_sample_size[0])
temp += (fit_error * sample_size)
temp2 = (y_loss[-1]*total_sample_size[0])
temp2 += (loss * sample_size)
total_sample_size[0] += sample_size
if total_sample_size[0] > 0:
y.append( temp / total_sample_size[0])
y_loss.append( temp2 / total_sample_size[0])
else:
y.append( y[-1])
y_loss.append( y_loss[-1])
x.append( len(x))
if le % freq[0] == 0:
comm.send({'label': x[-1], 'data': [y[-1], y_loss[-1]]})
if response.disposition.status_code != 0:
status[0] = response.disposition.status_code
print(response.disposition.status)
print(response.disposition.debug)
return parse_results
```
#### File: dlpy/tests/test_onnx_graph.py
```python
import unittest
import numpy as np
class TestGraph(unittest.TestCase):
def _generate_graph1(self):
try:
from onnx import helper, numpy_helper, TensorProto
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
input0 = helper.make_tensor_value_info('data0',
TensorProto.FLOAT,
[1, 3, 224, 224])
input1 = helper.make_tensor_value_info('conv0',
TensorProto.FLOAT,
[64, 3, 7, 7])
output0 = helper.make_tensor_value_info('output0',
TensorProto.FLOAT,
[1, 64, 122, 122])
conv_op = helper.make_node('Conv',
inputs=['data0', 'conv0'],
outputs=['output0'],
kernel_shape=[7, 7],
pads=[3, 3, 3, 3],
strides=[2, 2])
conv0 = np.random.rand(64, 3, 7, 7).astype('float32')
init0 = numpy_helper.from_array(conv0,
name='conv0')
graph = helper.make_graph(
nodes=[conv_op],
name='',
inputs=[input0, input1],
outputs=[output0],
initializer=[init0]
)
return graph
def test_graph1(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
self.assertEqual(len(graph.node), 1)
self.assertEqual(len(graph.initializer), 1)
self.assertEqual(len(graph.input), 2)
self.assertEqual(len(graph.output), 1)
self.assertEqual(len(graph.uninitialized), 1)
self.assertEqual(graph.node[0].name, 'Conv_0')
self.assertTrue(not graph.node[0].parents)
self.assertTrue(not graph.node[0].children)
self.assertEqual(graph.initializer[0].name, 'conv0')
self.assertEqual(graph.input[0].name, 'data0')
self.assertEqual(graph.input[1].name, 'conv0')
self.assertEqual(graph.output[0].name, 'output0')
self.assertTrue('conv0' in graph.tensor_dict)
self.assertEqual(graph.uninitialized[0].name, 'data0')
def test_graph_connection(self):
try:
import onnx
from onnx import helper, numpy_helper, TensorProto
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
input0 = helper.make_tensor_value_info('data0',
TensorProto.FLOAT,
[1, 3, 224, 224])
input1 = helper.make_tensor_value_info('conv0',
TensorProto.FLOAT,
[64, 3, 7, 7])
output0 = helper.make_tensor_value_info('output0',
TensorProto.FLOAT,
[1, 64, 122, 122])
conv_op = helper.make_node('Conv',
inputs=['data0', 'conv0'],
outputs=['conv_out'],
kernel_shape=[7, 7],
pads=[3, 3, 3, 3],
strides=[2, 2])
identity_op = helper.make_node('Identity',
inputs=['conv_out'],
outputs=['output0'])
conv0 = np.random.rand(64, 3, 7, 7).astype('float32')
init0 = numpy_helper.from_array(conv0,
name='conv0')
graph_ = helper.make_graph(
nodes=[conv_op, identity_op],
name='',
inputs=[input0, input1],
outputs=[output0],
initializer=[init0]
)
graph = OnnxGraph.from_onnx(graph_)
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[0].name, 'Conv_0')
self.assertTrue(not graph.node[0].parents)
self.assertEqual(len(graph.node[0].children), 1)
self.assertEqual(graph.node[0].children[0].name, 'Identity_1')
self.assertTrue('conv0' in graph.node[0].tensors)
self.assertEqual(graph.node[1].name, 'Identity_1')
self.assertEqual(len(graph.node[1].parents), 1)
self.assertEqual(graph.node[1].parents[0].name, 'Conv_0')
self.assertTrue(not graph.node[1].children)
self.assertTrue(not graph.node[1].tensors)
def test_get_node(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
self.assertEqual(graph.get_node('Conv_0').name, 'Conv_0')
self.assertEqual(graph.get_node_index('Conv_0'), 0)
self.assertEqual(graph.get_node('abcdef'), None)
self.assertEqual(graph.get_node_index('abcdef'), None)
def test_remove_node(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
graph.remove_node('Conv_0')
self.assertTrue(not graph.node)
def test_remove_node1(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
graph.remove_node('abcdef')
self.assertEqual(len(graph.node), 1)
def test_replace_node(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
from onnx import helper
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
node_ = graph.node[0]
new_node = helper.make_node('Identity',
inputs=node_.input,
outputs=node_.output,
name='test_node')
new_node = OnnxNode(new_node)
graph.replace_node('Conv_0', new_node)
self.assertEqual(len(graph.node), 1)
self.assertEqual(graph.node[0].name, 'test_node')
def test_replace_node1(self):
try:
import onnx
from onnx import helper
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
node_ = graph.node[0]
new_node = helper.make_node('Identity',
inputs=node_.input,
outputs=node_.output,
name='test_node')
new_node = OnnxNode(new_node)
graph.replace_node('abcdef', new_node)
self.assertEqual(len(graph.node), 1)
self.assertEqual(graph.node[0].name, 'Conv_0')
def test_insert_node(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
from onnx import helper
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
new_node = helper.make_node('Identity',
inputs=[],
outputs=[],
name='test_node')
new_node = OnnxNode(new_node)
graph.insert_node('Conv_0', new_node)
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[1].name, 'test_node')
def test_insert_node1(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
from onnx import helper
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
new_node = helper.make_node('Identity',
inputs=[],
outputs=[],
name='test_node')
new_node = OnnxNode(new_node)
graph.insert_node('abcdef', new_node)
self.assertEqual(len(graph.node), 1)
self.assertEqual(graph.node[0].name, 'Conv_0')
def test_get_input(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
i = graph.get_input('data0')
self.assertEqual(i.name, 'data0')
self.assertEqual([d.dim_value for d in i.type.tensor_type.shape.dim],
[1, 3, 224, 224])
def test_get_input1(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
i = graph.get_input('abcdef')
self.assertEqual(i, None)
def test_add_input(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
from onnx import helper, TensorProto
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
value_info = helper.make_tensor_value_info('data1',
TensorProto.FLOAT,
[1, 3, 299, 299])
graph.add_input(value_info)
self.assertEqual(len(graph.input), 3)
self.assertEqual(graph.input[-1], value_info)
def test_replace_input(self):
try:
import onnx
from onnx import helper, TensorProto
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
value_info = helper.make_tensor_value_info('data1',
TensorProto.FLOAT,
[1, 3, 299, 299])
graph.replace_input('data0', value_info)
self.assertEqual(len(graph.input), 2)
self.assertEqual(graph.input[0], value_info)
def test_get_initializer(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
from onnx import numpy_helper
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
init = graph.get_initializer('conv0')
conv0 = numpy_helper.to_array(init)
self.assertEqual(init.name, 'conv0')
self.assertTrue(np.array_equal(conv0, graph.tensor_dict['conv0']))
def test_get_initializer1(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
init = graph.get_initializer('abcdef')
self.assertEqual(init, None)
def test_add_initializer(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
from onnx import numpy_helper
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
conv1 = np.random.rand(64, 3, 7, 7).astype('float32')
init1 = numpy_helper.from_array(conv1,
name='conv1')
graph.add_initializer(init1)
self.assertEqual(len(graph.initializer), 2)
self.assertEqual(graph.initializer[1], init1)
def test_replace_initializer(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
from onnx import numpy_helper
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
conv1 = np.random.rand(64, 3, 7, 7).astype('float32')
init1 = numpy_helper.from_array(conv1,
name='conv1')
graph.replace_initializer('conv0', init1)
self.assertEqual(len(graph.initializer), 1)
self.assertEqual(graph.initializer[0], init1)
def test_clean_init(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
graph.remove_node('Conv_0')
graph.clean_init()
self.assertTrue(not graph.input)
self.assertTrue(not graph.initializer)
self.assertTrue(not graph.tensor_dict)
def test_make_model(self):
try:
import onnx
from dlpy.model_conversion.onnx_graph import OnnxGraph, OnnxNode
except:
unittest.TestCase.skipTest(self, 'onnx package not found')
graph_ = self._generate_graph1()
graph = OnnxGraph.from_onnx(graph_)
model = graph.make_onnx()
g = model.graph
self.assertEqual(len(g.node), 1)
self.assertEqual(g.node[0].name, 'Conv_0')
self.assertEqual(len(g.input), 2)
``` |
{
"source": "jld23/python-pipefitter",
"score": 2
} |
#### File: cas/estimator/neural_net.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
from . import _assess
from ....estimator import neural_net
from .base import EstimatorMixIn, ModelMixIn, ModelType, check_action
class NeuralNetwork(neural_net.NeuralNetwork, EstimatorMixIn):
''' Neural Network for CAS '''
def fit(self, table, **kwargs):
''' Fit function for neural network '''
params = self.remap_params(type(self).static_params, kwargs)
params['casout'] = self.create_model_table(table.get_connection(),
prefix='nnmodel')
# the default tanh is wrong for regression. Reset it to none
# to let neural pick the right one. Forces even regressions
# to predict classification scores.
params['targetact'] = None
table.loadactionset('neuralnet', _apptag='UI', _messagelevel='error')
return NeuralNetworkModel(params['casout'], kwargs,
check_action(table.neuralnet.anntrain(**params)),
backend=self._get_backend(table))
class NeuralNetworkModel(neural_net.NeuralNetworkModel, ModelMixIn):
''' Neural Network model for CAS '''
def get_predicted_col_name(self, level_info):
if self._model_type == ModelType.classification:
return '_NN_P_', '_NN_LEVEL_', None
else:
return '_NN_Pred_', None, None
def get_default_event_level(self, n_levels):
return n_levels - 1
def score(self, table, event=None):
'''
Score function for neural network
Parameters
----------
table : CASTable
The CASTable to score
Returns
-------
:class:`pandas.DataFrame`
'''
self._check_backend(table)
self._model_type = self.get_model_type(table)
table.loadactionset('neuralnet', _apptag='UI', _messagelevel='error')
score_out = self.create_output_table(table.get_connection(), prefix='kscoreneuralnet')
if self._model_type == ModelType.classification:
check_action(table.neuralnet.annscore(modeltable=self.data,
copyvars=[self.params['target']],
casout=score_out,
assess=True, encodename = True,
assessonerow=True))
else:
check_action(table.neuralnet.annscore(modeltable=self.data,
copyvars=[self.params['target']],
casout=score_out))
assess_res, assess_info = _assess.assess_model(self, score_out, event)
return self.make_score_output(assess_res, assess_info)
def unload(self):
''' Drop the model table '''
if self.data is not None:
self.data.table.droptable(_messagelevel='error', _apptag='UI')
```
#### File: sas/estimator/regression.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
from ....estimator import regression
from .base import EstimatorMixIn, ModelMixIn
## from pdb import set_trace as bp
class LogisticRegression(regression.LogisticRegression, EstimatorMixIn):
''' Logistic Regression for SAS '''
def fit(self, table, **kwargs):
''' Fit function for Logistic Regression'''
params = kwargs.copy()
params.update(type(self).static_params)
params = self.remap_params('REG', params)
model = self.create_model_table(table.sas)
codename = model.get('path') + model.get('name')
method = params.pop('selection', {})
sl = params.pop('sig_level', 0.05)
criterion = params.pop('criterion', None)
selection = {}
if method == "backward":
selection['method'] = 'backward'
selection['select'] = criterion or 'aic'
selection['stop'] = criterion or 'aic'
selection['choose'] = criterion or 'aic'
elif method == "forward":
selection['method'] = 'forward'
selection['details'] = 'all'
selection['select'] = criterion or 'sbc'
selection['stop'] = criterion or 'sbc'
selection['choose'] = criterion or 'sbc'
if selection['select'] == 'sl':
selection['choose'] = 'sbc'
elif method == "stepwise":
selection['method'] = 'stepwise'
selection['select'] = criterion or 'sl'
selection['stop'] = criterion or 'sl'
selection['choose'] = criterion or 'sbc'
if selection['select'] == 'sl':
selection['slentry'] = sl
selection['slstay'] = sl
selection['choose'] = 'sbc'
try:
if params['maxeffects'] > 0 and selection['method'] != 'backward':
selection['maxeffects'] = params['maxeffects']
except:
pass
params['selection'] = selection
stat = table.sas.sasstat()
intercept = ''
if kwargs['intercept'] == False:
intercept = '/ noint'
modelstmt = str(kwargs['target'] + "=" + " ".join(kwargs['inputs']) + intercept)
return LogisticRegressionModel(model, kwargs,
stat.hplogistic(data=table, cls=kwargs['nominals'],
code=codename, model=modelstmt,
**params),
backend=self._get_backend(table))
class LogisticRegressionModel(regression.LogisticRegressionModel, ModelMixIn):
''' Regression trained model for SAS '''
def score(self, table):
''' Score function for Regression '''
self._check_backend(table)
df = self.commonScore(table, algo='LogisticRegression')
return df
class LinearRegression(regression.LinearRegression, EstimatorMixIn):
''' Linear Regression for SAS '''
def fit(self, table, **kwargs):
''' Fit function for decision tree '''
params = kwargs.copy()
params.update(type(self).static_params)
params = self.remap_params('REG', params)
model = self.create_model_table(table.sas)
codename = model.get('path') + model.get('name')
method = params.pop('selection', {})
sl = params.pop('sig_level', None)
criterion = params.pop('criterion', None)
selection = {}
if method == "backward":
selection['method'] = 'backward'
selection['select'] = criterion or 'aic'
selection['stop'] = criterion or 'aic'
selection['choose'] = criterion or 'aic'
elif method == "forward":
selection['method'] = 'forward'
selection['details'] = 'all'
selection['select'] = criterion or 'sbc'
selection['stop'] = criterion or 'sbc'
selection['choose'] = criterion or 'sbc'
elif method == 'lasso':
selection['method'] = 'lasso'
selection['stop'] = criterion or 'aicc'
selection['choose'] = criterion or 'aicc'
elif method == "stepwise":
selection['method'] = 'stepwise'
selection['select'] = criterion or 'sl'
selection['stop'] = criterion or 'sl'
selection['choose'] = criterion or 'sbc'
if sl is not None:
selection['slentry'] = sl
selection['slstay'] = sl
try:
if params['maxeffects'] > 0:
selection['maxeffects'] = params['maxeffects']
except:
pass
if 'method' in selection:
params['selection'] = selection
stat = table.sas.sasstat()
intercept = ''
if params['intercept'] == False:
intercept = '/ noint'
modelstmt = str(kwargs['target'] + "=" + " ".join(kwargs['inputs']) + intercept)
return LinearRegressionModel(model, kwargs,
stat.hpreg(data=table, cls=kwargs['nominals'],
code=codename, model=modelstmt, **params),
backend=self._get_backend(table))
class LinearRegressionModel(regression.LinearRegressionModel, ModelMixIn):
''' Linear Regression model for SAS '''
def score(self, table):
"""
:param table:
:return:
"""
self._check_backend(table)
df = self.commonScore(table, algo='LinearRegression')
return df
```
#### File: sas/transformer/imputer.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
import pandas as pd
import uuid
from .... import transformer
class Imputer(transformer.Imputer):
''' SAS Imputer Implementation '''
def transform(self, table, value=transformer.Imputer.MEAN):
'''
Fill data missing values with specified values
Parameters
----------
table : SASdata
The table to impute
value : ImputerMethod or scalar or dict or Series or DataFrame, optional
Specifies the value to use in place of missing values.
* If an ImputerMethod is specified, that method is used for all
missing values.
* If a scalar is specified, that value is used to substitute for
all missings.
* If a dict is specified, the keys correspond to the columns and
the values are the substitution values (which may also be
ImputerMethod instances).
* If a Series is specified, the index corresponds to the columns
and the values are the substitution values.
Returns
-------
SASdata
'''
if (table.table.startswith('_imp_')):
tname = table.table
else:
tname = "_imp_"+table.sas._io._logcnt()+table.table[0:18]
sql = "proc sql;\n select\n"
ds1 = "data "+table.libref+"."+tname+"; set "+table.libref+"."+table.table+";\n"
sqlsel = ' %s(%s),\n'
dsmiss = ' if missing(%s) then do;\n %s = %s;\n end;\n'
sqlinto = ' into\n'
modesql = ''
modeq = "proc sql outobs=1;\n select %s, count(*) as freq into :imp_mode_%s, :imp_mode_freq\n"
modeq += " from %s where %s is not null group by %s order by freq desc, %s;\nquit;\n"
# get list of variables and types
code = "data _null_; d = open('"+table.libref+"."+table.table+"');\n"
code += "nvars = attrn(d, 'NVARS');\n"
code += "vn='VARNUMS='; vl='VARLIST='; vt='VARTYPE=';\n"
code += "put vn nvars; put vl;\n"
code += "do i = 1 to nvars; var = varname(d, i); put var; end;\n"
code += "put vt;\n"
code += "do i = 1 to nvars; var = vartype(d, i); put var; end;\n"
code += "run;"
ll = table.sas.submit(code, "text")
l2 = ll['LOG'].rpartition("VARNUMS= ")
l2 = l2[2].partition("\n")
nvars = int(l2[0])
l2 = l2[2].partition("\n")
varlist = l2[2].upper().split("\n", nvars)
del varlist[nvars]
l2 = l2[2].partition("VARTYPE=")
l2 = l2[2].partition("\n")
vartype = l2[2].split("\n", nvars)
del vartype[nvars]
vars = dict(zip(varlist, vartype))
# DataFrame
if isinstance(value, (pd.DataFrame, type(table))):
raise TypeError('DataFrame-like replacements are not supported')
# Convert Series / etc. to a dictionary
if hasattr(value, 'to_dict'):
value = value.to_dict()
for k, v in value.items():
if isinstance(v, (list, tuple)):
value[k] = v[0]
globalval = False
# Replace the missing values
if not isinstance(value, dict):
globalval = True
method = [value] * nvars
value = dict(zip(varlist, method))
if isinstance(value, dict):
for col, val in value.items():
if not isinstance(val, transformer.ImputerMethod):
if type(val) == str:
if vars.get(col.upper()) != 'N':
ds1 += dsmiss % (col, col, '"'+str(val)+'"')
else:
if not globalval:
raise TypeError("Column '%s' is numeric, but substitution value is character." % col)
else:
if vars.get(col.upper()) == 'N':
ds1 += dsmiss % (col, col, val)
else:
if not globalval:
raise TypeError("Column '%s' is character, but substitution is numeric." % col)
elif val == transformer.Imputer.MAX:
sql += sqlsel %('max', col)
sqlinto += ' :imp_max_'+col+',\n'
if vars.get(col.upper()) == 'N':
ds1 += dsmiss % (col, col, '&imp_max_'+col+'.')
else:
ds1 += dsmiss % (col, col, '"&imp_max_'+col+'."')
elif val == transformer.Imputer.MIN:
sql += sqlsel %('min', col)
sqlinto += ' :imp_min_'+col+',\n'
if vars.get(col.upper()) == 'N':
ds1 += dsmiss % (col, col, '&imp_min_'+col+'.')
else:
ds1 += dsmiss % (col, col, '"&imp_min_'+col+'."')
elif val == transformer.Imputer.MODE:
modesql += modeq %(col, col, table.libref+"."+table.table, col, col, col)
if vars.get(col.upper()) == 'N':
ds1 += dsmiss % (col, col, '&imp_mode_'+col+'.')
else:
ds1 += dsmiss % (col, col, '"&imp_mode_'+col+'."')
elif vars.get(col.upper()) != 'N':
continue
elif val == transformer.Imputer.MEAN:
sql += sqlsel %('mean', col)
sqlinto += ' :imp_mean_'+col+',\n'
ds1 += dsmiss % (col, col, '&imp_mean_'+col+'.')
elif val == transformer.Imputer.MEDIAN:
sql += sqlsel %('median', col)
sqlinto += ' :imp_median_'+col+',\n'
ds1 += dsmiss % (col, col, '&imp_median_'+col+'.')
elif val == transformer.Imputer.MIDRANGE:
sql += sqlsel %('max', col)
sqlinto += ' :imp_max_'+col+',\n'
sql += sqlsel %('min', col)
sqlinto += ' :imp_min_'+col+',\n'
ds1 += dsmiss % (col, col, '(&imp_min_'+col+'.'+' + '+'&imp_max_'+col+'.'+') / 2')
elif val == transformer.Imputer.RANDOM:
sql += sqlsel %('max', col)
sqlinto += ' :imp_max_'+col+',\n'
sql += sqlsel %('min', col)
sqlinto += ' :imp_min_'+col+',\n'
ds1 += dsmiss % (col, col, '&imp_min_'+col+'.'+' + (&imp_max_'+col+'.'+' - &imp_min_'+col+'.'+') * ranuni(0)')
else:
print('HOW DID I GET to HERE?????')
if len(sql) > 20:
sql = sql.rstrip(', \n')+'\n'+sqlinto.rstrip(', \n')+'\n from '+table.libref+'.'+table.table+';\nquit;\n'
else:
sql = ''
ds1 += 'run;\n'
ll = table.sas.submit(modesql+sql+ds1)
outtable = table.sas.sasdata(tname, libref=table.libref, results=table.results, dsopts=table.dsopts)
return outtable
```
#### File: backends/sas/utils.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
import numbers
import warnings
from ... import base
class ResourceManager(base.ResourceManager):
def split_data(self, data, k=3, var=None, parallel=False):
if parallel:
warnings.warn('The SAS backend does not support parallel processing; '
'using a single session for all tables.',
RuntimeWarning)
if var is None:
var = ''
if isinstance(k, numbers.Integral):
return data.partition(kfold=k, var=var, singleOut=False)
return data.partition(fraction=k, var=var, singleOut=False)
```
#### File: pipefitter/estimator/regression.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
import functools
from ..base import BaseEstimator, BaseModel
from ..utils.params import (param_def, check_int, check_string, check_boolean,
check_float, check_variable, check_variable_list)
class LogisticRegression(BaseEstimator):
'''
Logistic Regression
Parameters
----------
intercept : bool, optional
Include the intercept term in the model?
max_effects : int, optional
Specifies the maximum number of effects in any model to consider
during the selection process
selection : string, optional
Specifies the selection method. Valid values are 'none', 'backward',
'forward', and 'stepwise'.
sig_level : float, optional
Specifies the significance level
criterion : string, optional
Specifies selection criterion. Valid values are 'sl', 'aic', 'aicc',
and 'sbc'.
target : string, optional
The target variable
nominals : string or list of strings, optional
The nominal variables
inputs : string or list of strings, optional
The input variables
Examples
--------
>>> log = LogisticRegression(target='Origin',
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'],
... nominals = ['Type', 'Cylinders', 'Origin'])
Returns
-------
:class:`LogisticRegression`
'''
param_defs = dict(
intercept=param_def(True, check_boolean),
max_effects=param_def(0, functools.partial(check_int, minimum=0)),
selection=param_def('none', functools.partial(check_string,
valid_values=['none', 'backward',
'forward', 'stepwise'])),
sig_level=param_def(0.05, functools.partial(check_float, minimum=0.0, maximum=1.0)),
criterion=param_def(None, functools.partial(check_string, allow_none=True,
valid_values=['sl', 'aic', 'aicc', 'sbc'])),
target=param_def(None, check_variable),
nominals=param_def(None, check_variable_list),
inputs=param_def(None, check_variable_list),
)
def __init__(self, intercept=True, max_effects=0, selection='none',
sig_level=0.05, criterion=None,
target=None, nominals=None, inputs=None):
BaseEstimator.__init__(self, intercept=intercept, max_effects=max_effects,
selection=selection, sig_level=sig_level, criterion=criterion,
target=target, nominals=nominals, inputs=inputs)
if self.params['criterion'] == 'sl' and \
self.params['selection'] in ['backward', 'lasso']:
raise ValueError("criterion='sl' is not valid with "
"selection='backward' | 'lasso'")
def fit(self, table, *args, **kwargs):
'''
Fit function for logistic regression
Parameters
----------
*args : dicts or two-element tuples or consecutive key/value pairs, optional
The following types are allowed:
* Dictionaries contain key/value pairs of parameters.
* Two-element tuples must contain the name of the parameter in the
first element and the value in the second element.
* Consecutive key/value pairs are also allowed.
**kwargs : keyword arguments, optional
These keyword arguments are the same as on the constructor.
Examples
--------
>>> log = LogisticRegression(target='Origin',
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'],
... nominals = ['Type', 'Cylinders', 'Origin'])
>>> model = log.fit(training_data)
Returns
-------
:class:`LogisticRegressionModel`
'''
params = self.get_combined_params(*args, **kwargs)
return self._get_super(table).fit(table, **params)
class LogisticRegressionModel(BaseModel):
''' LogisticRegresson trained model '''
param_defs = LogisticRegression.param_defs
class LinearRegression(BaseEstimator):
'''
Linear Regression
Parameters
----------
intercept : bool, optional
Include the intercept term in the model?
max_effects : int, optional
Specifies the maximum number of effects in any model to consider
during the selection process
selection : string, optional
Specifies the selection method. Valid values are 'none', 'backward',
'forward', 'lasso', and 'stepwise'.
sig_level : float, optional
Specifies the significance level
criterion : string, optional
Specifies selection criterion. Valid values are 'sl', 'aic', 'aicc',
and 'sbc'.
target : string, optional
The target variable
nominals : string or list of strings, optional
The nominal variables
inputs : string or list of strings, optional
The input variables
Examples
--------
>>> lin = LinearRegression(target='MSRP',
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'],
... nominals = ['Type', 'Cylinders', 'Origin'])
Returns
-------
:class:`LinearRegression`
'''
param_defs = dict(
intercept=param_def(True, check_boolean),
max_effects=param_def(0, functools.partial(check_int, minimum=0)),
selection=param_def('none', functools.partial(check_string,
valid_values=['none', 'backward', 'forward',
'lasso', 'stepwise'])),
sig_level=param_def(0.05, functools.partial(check_float, minimum=0.0, maximum=1.0)),
criterion=param_def(None, functools.partial(check_string, allow_none=True,
valid_values=['sl', 'aic', 'aicc', 'sbc'])),
target=param_def(None, check_variable),
nominals=param_def(None, check_variable_list),
inputs=param_def(None, check_variable_list),
)
def __init__(self, intercept=True, max_effects=0, selection='none',
sig_level=0.05, criterion=None,
target=None, nominals=None, inputs=None):
BaseEstimator.__init__(self, intercept=intercept, max_effects=max_effects,
selection=selection, sig_level=sig_level, criterion=criterion,
target=target, nominals=nominals, inputs=inputs)
if self.params['criterion'] == 'sl' and \
self.params['selection'] in ['backward', 'lasso']:
raise ValueError("criterion='sl' is not valid with "
"selection='backward' | 'lasso'")
def fit(self, table, *args, **kwargs):
'''
Fit function for linear regression
Parameters
----------
*args : dicts or two-element tuples or consecutive key/value pairs, optional
The following types are allowed:
* Dictionaries contain key/value pairs of parameters.
* Two-element tuples must contain the name of the parameter in the
first element and the value in the second element.
* Consecutive key/value pairs are also allowed.
**kwargs : keyword arguments, optional
These keyword arguments are the same as on the constructor.
Examples
--------
>>> lin = LinearRegression(target='MSRP',
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'],
... nominals = ['Type', 'Cylinders', 'Origin'])
>>> model = lin.fit(training_data)
Returns
-------
:class:`LinearRegressionModel`
'''
params = self.get_combined_params(*args, **kwargs)
return self._get_super(table).fit(table, **params)
class LinearRegressionModel(BaseModel):
''' LinearRegresson trained model '''
param_defs = LinearRegression.param_defs
```
#### File: python-pipefitter/pipefitter/pipeline.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
import numpy as np
import pandas as pd
import re
import six
from collections import Sequence
from .base import BaseTransformer, BaseEstimator, BaseModel
def tosequence(obj):
''' Cast an iterable to a sequence '''
if isinstance(obj, np.ndarray):
return np.asarray(obj)
elif isinstance(obj, Sequence):
return obj
return list(obj)
@six.python_2_unicode_compatible
class Pipeline(object):
'''
Execute a series of transformers and estimators
Parameters
----------
stages : one or more transformers/estimators
The stages of the pipeline to execute
Examples
--------
Basic pipeline of imputers and an estimator:
>>> mean_imp = Imputer(Imputer.MEAN)
>>> mode_imp = Imputer(Imputer.MODE)
>>> dtree = DecisionTree(target='Origin',
... nominals=['Type', 'Cylinders', 'Origin'],
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'])
>>> pipe = Pipeline([mean_imp, mode_imp, dtree])
Returns
-------
:class:`Pipeline`
'''
def __init__(self, stages):
self.stages = tosequence(stages)
self._extra_params = []
for item in self.stages:
if not isinstance(item, BaseTransformer):
raise TypeError('%s is not a transformer or estimator' % item)
def __str__(self):
return '%s([%s])' % (type(self).__name__,
', '.join(str(x) for x in self.stages))
def __repr__(self):
return str(self)
def set_params(self, *args, **kwargs):
'''
Set additional parameters for the estimators in the pipeline
Parameters
----------
*args : positional parameters, optional
Any valid parameters to the estimators' ``fit`` method
**kwargs : keyword parameters, optional
Any valid keyword parameters to the estimators' ``fit`` method
'''
self._extra_params.extend(list(args))
self._extra_params.append(kwargs)
def fit(self, table, *args, **kwargs):
'''
Train the models using the stages in the pipeline
Notes
-----
Parameters passed in on this method are not persisted on
the pipeline. They are only used during the scope of this method.
Parameters
----------
table : data set
Any data set object supported by the transformers and
estimators in the pipeline stages
*args : positional parameters, optional
Any valid parameters to the estimators' ``fit`` method
**kwargs : keyword parameters, optional
Any valid keyword parameters to the estimators' ``fit`` method
Examples
--------
Basic pipeline fit using imputers and an estimator:
>>> mean_imp = Imputer(Imputer.MEAN)
>>> mode_imp = Imputer(Imputer.MODE)
>>> dtree = DecisionTree(target='Origin',
... nominals=['Type', 'Cylinders', 'Origin'],
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'])
>>> pipe = Pipeline([mean_imp, mode_imp, dtree])
>>> model = pipe.fit(data)
Returns
-------
:class:`PipelineModel`
'''
out = []
last_idx = len(self.stages) - 1
extra_params = list(self._extra_params)
extra_params.extend(args)
extra_params.append(kwargs)
for i, stage in enumerate(self.stages):
params = stage.get_filtered_params(*extra_params)
if isinstance(stage, BaseEstimator):
out.append(stage.fit(table, **params))
if i == last_idx:
break
else:
out.append(stage)
table = out[-1].transform(table)
if out:
return PipelineModel(out)
def transform(self, table, *args, **kwargs):
'''
Execute the transformations in this pipeline only
Parameters
----------
table : data set
Any data set object supported by the transformers and
estimators in the pipeline stages
*args : positional parameters, optional
Any valid parameters to the transformers' ``transform`` method
**kwargs : keyword parameters, optional
Any valid keyword parameters to the transformers' ``transform`` method
Notes
-----
When the pipeline contains estimators, they typically just pass the
input table on to the next stage of the pipeline.
Examples
--------
Basic pipeline fit using imputers and an estimator:
>>> mean_imp = Imputer(Imputer.MEAN)
>>> mode_imp = Imputer(Imputer.MODE)
>>> dtree = DecisionTree(target='Origin',
... nominals=['Type', 'Cylinders', 'Origin'],
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'])
>>> pipe = Pipeline([mean_imp, mode_imp, dtree])
>>> new_table = pipe.transform(data)
Returns
-------
data set
The same type of data set as passed in `table`
'''
out = []
last_idx = len(self.stages) - 1
extra_params = list(self._extra_params)
extra_params.extend(args)
extra_params.append(kwargs)
for i, stage in enumerate(self.stages):
params = stage.get_filtered_params(*extra_params)
if isinstance(stage, BaseEstimator):
out.append(stage.fit(table, **params))
if i == last_idx:
break
else:
out.append(stage)
table = out[-1].transform(table)
return table
def __getitem__(self, idx):
return self.stages[idx]
@six.python_2_unicode_compatible
class PipelineModel(object):
'''
Trained model for a Pipeline
Notes
-----
This object is not instantiated directly. It is the result of
calling the ``fit`` method of the :class:`Pipeline` object.
Parameters
----------
stages : list of transformors / models
A list of the elements of the fitted Pipeline.
Returns
-------
:class:`PipelineModel`
'''
def __init__(self, stages):
self.stages = tosequence(stages)
def __str__(self):
return '%s([%s])' % (type(self).__name__,
', '.join(str(x) for x in self.stages))
def __repr__(self):
return str(self)
def score(self, table, **kwargs):
'''
Apply transformations and score the data using the trained model
Parameters
----------
table : data set
A data set that is of the same type as the training data set
Examples
--------
Basic pipeline model transform using imputers and an estimator:
>>> mean_imp = Imputer(Imputer.MEAN)
>>> mode_imp = Imputer(Imputer.MODE)
>>> dtree = DecisionTree(target='Origin',
... nominals=['Type', 'Cylinders', 'Origin'],
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'])
>>> pipe = Pipeline([mean_imp, mode_imp, dtree])
>>> model = pipe.fit(training_data)
>>> score = model.score(data)
Returns
-------
:class:`pandas.DataFrame`
'''
scores = []
names = {}
for i, stage in enumerate(self.stages):
if isinstance(stage, BaseModel):
scores.append(stage.score(table, **kwargs))
name = re.sub(r'Model$', '', type(stage).__name__)
if name in names:
names[name] += 1
name = '%s%s' % (name, names[name])
else:
names[name] = 0
scores[-1].name = name
table = stage.transform(table)
if scores:
if len(scores) == 1:
return scores[0]
return pd.DataFrame(scores)
def transform(self, table):
'''
Run the transforms in the trained pipeline
Parameters
----------
table : data set
A data set that is of the same type as the training data set
Examples
--------
Basic pipeline model transform using imputers and an estimator:
>>> mean_imp = Imputer(Imputer.MEAN)
>>> mode_imp = Imputer(Imputer.MODE)
>>> dtree = DecisionTree(target='Origin',
... nominals=['Type', 'Cylinders', 'Origin'],
... inputs=['MPG_City', 'MPG_Highway', 'Length',
... 'Weight', 'Type', 'Cylinders'])
>>> pipe = Pipeline([mean_imp, mode_imp, dtree])
>>> model = pipe.fit(training_data)
>>> new_table = model.transform(data)
Returns
-------
data set
A data set of the same type that was passed in `table`
'''
for stage in self.stages:
table = stage.transform(table)
return table
def __getitem__(self, idx):
return self.stages[idx]
def unload(self):
''' Unload model resources '''
for stage in self.stages:
if isinstance(stage, BaseModel):
stage.unload()
```
#### File: tests/cas/test_pipeline.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import numpy as np
import pandas as pd
import swat
import swat.utils.testing as tm
import unittest
from pipefitter.estimator import DecisionTree, DecisionForest, GBTree
from pipefitter.pipeline import Pipeline, tosequence
from pipefitter.transformer import Imputer
from swat.utils.compat import patch_pandas_sort
from swat.utils.testing import UUID_RE, get_cas_host_type, load_data
patch_pandas_sort()
USER, PASSWD = tm.get_user_pass()
HOST, PORT, PROTOCOL = tm.get_host_port_proto()
# Classification
ctarget = 'Origin'
# Regression
rtarget = 'MSRP'
inputs = ['MPG_City', 'MPG_Highway', 'Length', 'Weight', 'Type', 'Cylinders']
nominals = ['Type', 'Cylinders', 'Origin']
class TestPipelineUtils(tm.TestCase):
def test_tosequence(self):
self.assertEqual(tosequence(('a', 'b', 'c')), ('a', 'b', 'c'))
self.assertEqual(tosequence(['a', 'b', 'c']), ['a', 'b', 'c'])
self.assertEqual(tosequence(iter(('a', 'b', 'c'))), ['a', 'b', 'c'])
self.assertEqual(tosequence('abc'), 'abc')
self.assertEqual(list(tosequence(np.array((1, 2, 3)))),
list(np.asarray(np.array((1, 2, 3)))))
with self.assertRaises(TypeError):
tosequence(4)
class TestPipeline(tm.TestCase):
server_type = None
def setUp(self):
swat.reset_option()
swat.options.cas.print_messages = True
swat.options.interactive_mode = True
self.s = swat.CAS(HOST, PORT, USER, PASSWD, protocol=PROTOCOL)
if type(self).server_type is None:
type(self).server_type = get_cas_host_type(self.s)
self.srcLib = tm.get_casout_lib(self.server_type)
r = tm.load_data(self.s, 'datasources/cars_single.sashdat', self.server_type)
self.table = r['casTable']
def tearDown(self):
# tear down tests
self.s.terminate()
del self.s
swat.reset_option()
def test_basic(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
model = pipe.fit(tbl)
self.assertEqual(model.__class__.__name__, 'PipelineModel')
self.assertEqual(len(model.stages), 3)
self.assertTrue(model[0] is mean_imp)
self.assertTrue(model[1] is mode_imp)
self.assertEqual(model[2].__class__.__name__, 'DecisionTreeModel')
out = model.score(tbl)
self.assertEqual(set(list(out.index)),
set(['Target', 'Level', 'Var', 'NBins', 'NObsUsed',
'TargetCount', 'TargetMiss', 'PredCount', 'PredMiss',
'Event', 'EventCount', 'NonEventCount', 'EventMiss',
'AreaUnderROCCurve', 'CRCut', 'ClassificationCutOff',
'KS', 'KSCutOff', 'MisClassificationRate']))
# Bad item type
with self.assertRaises(TypeError):
Pipeline([mean_imp, mode_imp, 'foo', dtree])
def test_multiple_estimators(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree1 = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
dtree2 = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree1, dtree2])
model = pipe.fit(tbl)
self.assertEqual(model.__class__.__name__, 'PipelineModel')
self.assertEqual(len(model.stages), 4)
self.assertTrue(model[0] is mean_imp)
self.assertTrue(model[1] is mode_imp)
self.assertEqual(model[2].__class__.__name__, 'DecisionTreeModel')
self.assertEqual(model[3].__class__.__name__, 'DecisionTreeModel')
out = model.score(tbl)
self.assertEqual(set(list(out.index)),
set(['DecisionTree', 'DecisionTree1']))
def test_str(self):
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
out = "Pipeline([Imputer(MEAN), Imputer(MODE), " + \
"DecisionTree(alpha=0.0, cf_level=0.25, criterion=None, " + \
"inputs=['MPG_City', 'MPG_Highway', 'Length', 'Weight', " + \
"'Type', 'Cylinders'], leaf_size=5, max_branches=2, " + \
"max_depth=6, n_bins=20, nominals=['Type', 'Cylinders', " + \
"'Origin'], prune=False, target='Origin', var_importance=False)])"
self.assertEqual(str(pipe).replace("u'", "'"), out)
def test_repr(self):
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
out = "Pipeline([Imputer(MEAN), Imputer(MODE), " + \
"DecisionTree(alpha=0.0, cf_level=0.25, criterion=None, " + \
"inputs=['MPG_City', 'MPG_Highway', 'Length', 'Weight', " + \
"'Type', 'Cylinders'], leaf_size=5, max_branches=2, " + \
"max_depth=6, n_bins=20, nominals=['Type', 'Cylinders', " + \
"'Origin'], prune=False, target='Origin', var_importance=False)])"
self.assertEqual(repr(pipe).replace("u'", "'"), out)
def test_model_str(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
model = Pipeline([mean_imp, mode_imp, dtree]).fit(tbl)
out = "PipelineModel([Imputer(MEAN), Imputer(MODE), " + \
"DecisionTreeModel(alpha=0.0, cf_level=0.25, criterion=None, " + \
"inputs=['MPG_City', 'MPG_Highway', 'Length', 'Weight', " + \
"'Type', 'Cylinders'], leaf_size=5, max_branches=2, " + \
"max_depth=6, n_bins=20, nominals=['Type', 'Cylinders', " + \
"'Origin'], prune=False, target='Origin', var_importance=False)])"
self.assertEqual(str(model).replace("u'", "'"), out)
def test_model_repr(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
model = Pipeline([mean_imp, mode_imp, dtree]).fit(tbl)
out = "PipelineModel([Imputer(MEAN), Imputer(MODE), " + \
"DecisionTreeModel(alpha=0.0, cf_level=0.25, criterion=None, " + \
"inputs=['MPG_City', 'MPG_Highway', 'Length', 'Weight', " + \
"'Type', 'Cylinders'], leaf_size=5, max_branches=2, " + \
"max_depth=6, n_bins=20, nominals=['Type', 'Cylinders', " + \
"'Origin'], prune=False, target='Origin', var_importance=False)])"
self.assertEqual(repr(model).replace("u'", "'"), out)
def test_set_params(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
out = pipe.fit(tbl).score(tbl)
self.assertEqual(out.loc['Target'], 'Origin')
# Set extra parameters on Pipeline (not on estimator)
pipe.set_params({dtree.target: 'MSRP'})
self.assertEqual(dtree.target, 'Origin')
out = pipe.fit(tbl).score(tbl)
self.assertEqual(out.loc['Target'], 'MSRP')
# Set parameters during fit
pipe = Pipeline([mean_imp, mode_imp, dtree])
out = pipe.fit(tbl).score(tbl)
self.assertEqual(out.loc['Target'], 'Origin')
out = pipe.fit(tbl, {dtree.target: 'MSRP'}).score(tbl)
self.assertEqual(out.loc['Target'], 'MSRP')
def test_transform(self):
tbl = self.table
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mode_imp, dtree])
self.assertEqual(tbl.nmiss().max(), 2)
out = pipe.transform(tbl)
self.assertEqual(out.__class__.__name__, 'CASTable')
self.assertEqual(tbl.nmiss().max(), 2)
self.assertEqual(out.nmiss().max(), 0)
def test_model_transform(self):
tbl = self.table
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mode_imp, dtree])
self.assertEqual(tbl.nmiss().max(), 2)
model = pipe.fit(tbl)
out = model.transform(tbl)
self.assertEqual(out.__class__.__name__, 'CASTable')
self.assertEqual(tbl.nmiss().max(), 2)
self.assertEqual(out.nmiss().max(), 0)
def test_getitem(self):
tbl = self.table
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mode_imp, dtree])
self.assertTrue(pipe[0] is mode_imp)
self.assertTrue(pipe[1] is dtree)
with self.assertRaises(IndexError):
pipe[2]
with self.assertRaises(TypeError):
pipe['foo']
def test_model_getitem(self):
tbl = self.table
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
model = Pipeline([mode_imp, dtree]).fit(tbl)
self.assertTrue(model[0] is mode_imp)
self.assertTrue(model[1] is not dtree)
self.assertEqual(model[1].__class__.__name__, 'DecisionTreeModel')
with self.assertRaises(IndexError):
model[2]
with self.assertRaises(TypeError):
model['foo']
def test_classification_score(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='Origin', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
model = pipe.fit(tbl)
score = model.score(tbl)
self.assertTrue(isinstance(score, pd.Series))
self.assertEqual(score.loc['Target'], 'Origin')
self.assertEqual(score.loc['Level'], 'CLASS')
self.assertEqual(score.loc['Event'], 'USA')
self.assertEqual(score.loc['NBins'], 100)
self.assertEqual(score.loc['NObsUsed'], 428)
self.assertTrue(isinstance(score.loc['AreaUnderROCCurve'], float))
self.assertTrue(isinstance(score.loc['CRCut'], float))
self.assertTrue(isinstance(score.loc['KS'], float))
self.assertTrue(isinstance(score.loc['KSCutOff'], float))
self.assertTrue(isinstance(score.loc['MisClassificationRate'], float))
def test_regression_score(self):
tbl = self.table
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='MSRP', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
model = pipe.fit(tbl)
score = model.score(tbl)
self.assertTrue(isinstance(score, pd.Series))
self.assertEqual(score.loc['Target'], 'MSRP')
self.assertEqual(score.loc['Level'], 'INTERVAL')
self.assertEqual(score.loc['NBins'], 100)
self.assertEqual(score.loc['NObsUsed'], 428)
self.assertTrue(isinstance(score.loc['AverageSquaredError'], float))
self.assertTrue(isinstance(score.loc['AverageAbsoluteError'], float))
self.assertTrue(isinstance(score.loc['AverageSquaredLogarithmicError'], float))
self.assertTrue(isinstance(score.loc['RootAverageSquaredError'], float))
self.assertTrue(isinstance(score.loc['RootAverageAbsoluteError'], float))
self.assertTrue(isinstance(score.loc['RootAverageSquaredLogarithmicError'], float))
def test_unload(self):
mean_imp = Imputer(Imputer.MEAN)
mode_imp = Imputer(Imputer.MODE)
dtree = DecisionTree(target='MSRP', nominals=nominals, inputs=inputs)
pipe = Pipeline([mean_imp, mode_imp, dtree])
model = pipe.fit(self.table)
self.assertEqual(model[-1].data.table.tableexists().exists, 1)
model.unload()
self.assertEqual(model[-1].data.table.tableexists().exists, 0)
if __name__ == '__main__':
tm.runtests()
```
#### File: pipefitter/utils/connection.py
```python
from __future__ import print_function, division, absolute_import, unicode_literals
import weakref
class ConnectionManager(object):
'''
Register a connection with an object
'''
def __init__(self):
self._connection = None
def set_connection(self, connection):
''' Set the connection object for estimator '''
if connection is None:
self._connection = None
else:
self._connection = weakref.ref(connection)
def get_connection(self):
'''
Get the connection session for estimator
Returns
-------
Connection object
'''
conn = None
if self._connection is not None:
try:
conn = self._connection()
except:
pass
if conn is None:
raise ValueError('No connection is currently registered')
return conn
``` |
{
"source": "jld23/sasoptpy",
"score": 2
} |
#### File: examples/client_side/decentralization.py
```python
import sasoptpy as so
import pandas as pd
def test(cas_conn):
m = so.Model(name='decentralization', session=cas_conn)
DEPTS = ['A', 'B', 'C', 'D', 'E']
CITIES = ['Bristol', 'Brighton', 'London']
benefit_data = pd.DataFrame([
['Bristol', 10, 15, 10, 20, 5],
['Brighton', 10, 20, 15, 15, 15]],
columns=['city'] + DEPTS).set_index('city')
comm_data = pd.DataFrame([
['A', 'B', 0.0],
['A', 'C', 1.0],
['A', 'D', 1.5],
['A', 'E', 0.0],
['B', 'C', 1.4],
['B', 'D', 1.2],
['B', 'E', 0.0],
['C', 'D', 0.0],
['C', 'E', 2.0],
['D', 'E', 0.7]], columns=['i', 'j', 'comm']).set_index(['i', 'j'])
cost_data = pd.DataFrame([
['Bristol', 'Bristol', 5],
['Bristol', 'Brighton', 14],
['Bristol', 'London', 13],
['Brighton', 'Brighton', 5],
['Brighton', 'London', 9],
['London', 'London', 10]], columns=['i', 'j', 'cost']).set_index(
['i', 'j'])
max_num_depts = 3
benefit = {}
for city in CITIES:
for dept in DEPTS:
try:
benefit[dept, city] = benefit_data.loc[city, dept]
except:
benefit[dept, city] = 0
comm = {}
for row in comm_data.iterrows():
(i, j) = row[0]
comm[i, j] = row[1]['comm']
comm[j, i] = comm[i, j]
cost = {}
for row in cost_data.iterrows():
(i, j) = row[0]
cost[i, j] = row[1]['cost']
cost[j, i] = cost[i, j]
assign = m.add_variables(DEPTS, CITIES, vartype=so.BIN, name='assign')
IJKL = [(i, j, k, l)
for i in DEPTS for j in CITIES for k in DEPTS for l in CITIES
if i < k]
product = m.add_variables(IJKL, vartype=so.BIN, name='product')
totalBenefit = so.expr_sum(benefit[i, j] * assign[i, j]
for i in DEPTS for j in CITIES)
totalCost = so.expr_sum(comm[i, k] * cost[j, l] * product[i, j, k, l]
for (i, j, k, l) in IJKL)
m.set_objective(totalBenefit-totalCost, name='netBenefit', sense=so.MAX)
m.add_constraints((so.expr_sum(assign[dept, city] for city in CITIES)
== 1 for dept in DEPTS), name='assign_dept')
m.add_constraints((so.expr_sum(assign[dept, city] for dept in DEPTS)
<= max_num_depts for city in CITIES), name='cardinality')
product_def1 = m.add_constraints((assign[i, j] + assign[k, l] - 1
<= product[i, j, k, l]
for (i, j, k, l) in IJKL),
name='pd1')
product_def2 = m.add_constraints((product[i, j, k, l] <= assign[i, j]
for (i, j, k, l) in IJKL),
name='pd2')
product_def3 = m.add_constraints((product[i, j, k, l] <= assign[k, l]
for (i, j, k, l) in IJKL),
name='pd3')
m.solve()
print(m.get_problem_summary())
m.drop_constraints(product_def1)
m.drop_constraints(product_def2)
m.drop_constraints(product_def3)
m.add_constraints((
so.expr_sum(product[i, j, k, l]
for j in CITIES if (i, j, k, l) in IJKL) == assign[k, l]
for i in DEPTS for k in DEPTS for l in CITIES if i < k),
name='pd4')
m.add_constraints((
so.expr_sum(product[i, j, k, l]
for l in CITIES if (i, j, k, l) in IJKL) == assign[i, j]
for k in DEPTS for i in DEPTS for j in CITIES if i < k),
name='pd5')
m.solve()
print(m.get_problem_summary())
totalBenefit.set_name('totalBenefit')
totalCost.set_name('totalCost')
print(so.get_solution_table(totalBenefit, totalCost))
print(so.get_solution_table(assign).unstack(level=-1))
return m.get_objective_value()
```
#### File: examples/client_side/least_squares.py
```python
import sasoptpy as so
import pandas as pd
def test(cas_conn, data=None):
# Use default data if not passed
if data is None:
data = pd.DataFrame([
[4, 8, 43.71],
[62, 5, 351.29],
[81, 62, 2878.91],
[85, 75, 3591.59],
[65, 54, 2058.71],
[96, 84, 4487.87],
[98, 29, 1773.52],
[36, 33, 767.57],
[30, 91, 1637.66],
[3, 59, 215.28],
[62, 57, 2067.42],
[11, 48, 394.11],
[66, 21, 932.84],
[68, 24, 1069.21],
[95, 30, 1770.78],
[34, 14, 368.51],
[86, 81, 3902.27],
[37, 49, 1115.67],
[46, 80, 2136.92],
[87, 72, 3537.84],
], columns=['x1', 'x2', 'y'])
m = so.Model(name='least_squares', session=cas_conn)
# Regression model: L(a,b,c) = a * x1 + b * x2 + c * x1 * x2
a = m.add_variable(name='a')
b = m.add_variable(name='b')
c = m.add_variable(name='c')
x1 = data['x1']
x2 = data['x2']
y = data['y']
err = m.add_implicit_variable((
y[i] - (a * x1[i] + b * x2[i] + c * x1[i] * x2[i]) for i in data.index
), name='error')
m.set_objective(so.expr_sum(err[i]**2 for i in data.index),
sense=so.MIN,
name='total_error')
m.solve(verbose=True, options={'with': 'nlp'})
return m.get_objective_value()
```
#### File: examples/client_side/nonlinear_1.py
```python
import sasoptpy as so
def test(cas_conn):
m = so.Model(name='nlpse01', session=cas_conn)
x = m.add_variables(range(1, 9), lb=0.1, ub=10, name='x')
f = so.Expression(0.4 * (x[1]/x[7]) ** 0.67 + 0.4 * (x[2]/x[8]) ** 0.67 + 10 - x[1] - x[2], name='f')
m.set_objective(f, sense=so.MIN, name='f1')
m.add_constraint(1 - 0.0588*x[5]*x[7] - 0.1*x[1] >= 0, name='c1')
m.add_constraint(1 - 0.0588*x[6]*x[8] - 0.1*x[1] - 0.1*x[2] >= 0, name='c2')
m.add_constraint(1 - 4*x[3]/x[5] - 2/(x[3]**0.71 * x[5]) - 0.0588*(x[7]/x[3]**1.3) >= 0, name='c3')
m.add_constraint(1 - 4*x[4]/x[6] - 2/(x[4]**0.71 * x[6]) - 0.0588*(x[8]/x[4]**1.3) >= 0, name='c4')
m.add_constraint(f == [0.1, 4.2], name='frange')
x[1].set_init(6)
x[2].set_init(3)
x[3].set_init(0.4)
x[4].set_init(0.2)
x[5].set_init(6)
x[6].set_init(6)
x[7].set_init(1)
x[8].set_init(0.5)
m.solve(verbose=True, options={'with': 'nlp', 'algorithm': 'activeset'})
print(m.get_problem_summary())
print(m.get_solution_summary())
if m.get_session_type() == 'CAS':
print(m.get_solution()[['var', 'value']])
return m.get_objective_value()
```
#### File: examples/client_side/refinery_optimization.py
```python
import sasoptpy as so
import pandas as pd
import numpy as np
def test(cas_conn, **kwargs):
m = so.Model(name='refinery_optimization', session=cas_conn)
crude_data = pd.DataFrame([
['crude1', 20000],
['crude2', 30000]
], columns=['crude', 'crude_ub']).set_index(['crude'])
arc_data = pd.DataFrame([
['source', 'crude1', 6],
['source', 'crude2', 6],
['crude1', 'light_naphtha', 0.1],
['crude1', 'medium_naphtha', 0.2],
['crude1', 'heavy_naphtha', 0.2],
['crude1', 'light_oil', 0.12],
['crude1', 'heavy_oil', 0.2],
['crude1', 'residuum', 0.13],
['crude2', 'light_naphtha', 0.15],
['crude2', 'medium_naphtha', 0.25],
['crude2', 'heavy_naphtha', 0.18],
['crude2', 'light_oil', 0.08],
['crude2', 'heavy_oil', 0.19],
['crude2', 'residuum', 0.12],
['light_naphtha', 'regular_petrol', np.nan],
['light_naphtha', 'premium_petrol', np.nan],
['medium_naphtha', 'regular_petrol', np.nan],
['medium_naphtha', 'premium_petrol', np.nan],
['heavy_naphtha', 'regular_petrol', np.nan],
['heavy_naphtha', 'premium_petrol', np.nan],
['light_naphtha', 'reformed_gasoline', 0.6],
['medium_naphtha', 'reformed_gasoline', 0.52],
['heavy_naphtha', 'reformed_gasoline', 0.45],
['light_oil', 'jet_fuel', np.nan],
['light_oil', 'fuel_oil', np.nan],
['heavy_oil', 'jet_fuel', np.nan],
['heavy_oil', 'fuel_oil', np.nan],
['light_oil', 'light_oil_cracked', 2],
['light_oil_cracked', 'cracked_oil', 0.68],
['light_oil_cracked', 'cracked_gasoline', 0.28],
['heavy_oil', 'heavy_oil_cracked', 2],
['heavy_oil_cracked', 'cracked_oil', 0.75],
['heavy_oil_cracked', 'cracked_gasoline', 0.2],
['cracked_oil', 'jet_fuel', np.nan],
['cracked_oil', 'fuel_oil', np.nan],
['reformed_gasoline', 'regular_petrol', np.nan],
['reformed_gasoline', 'premium_petrol', np.nan],
['cracked_gasoline', 'regular_petrol', np.nan],
['cracked_gasoline', 'premium_petrol', np.nan],
['residuum', 'lube_oil', 0.5],
['residuum', 'jet_fuel', np.nan],
['residuum', 'fuel_oil', np.nan],
], columns=['i', 'j', 'multiplier']).set_index(['i', 'j'])
octane_data = pd.DataFrame([
['light_naphtha', 90],
['medium_naphtha', 80],
['heavy_naphtha', 70],
['reformed_gasoline', 115],
['cracked_gasoline', 105],
], columns=['i', 'octane']).set_index(['i'])
petrol_data = pd.DataFrame([
['regular_petrol', 84],
['premium_petrol', 94],
], columns=['petrol', 'octane_lb']).set_index(['petrol'])
vapour_pressure_data = pd.DataFrame([
['light_oil', 1.0],
['heavy_oil', 0.6],
['cracked_oil', 1.5],
['residuum', 0.05],
], columns=['oil', 'vapour_pressure']).set_index(['oil'])
fuel_oil_ratio_data = pd.DataFrame([
['light_oil', 10],
['cracked_oil', 4],
['heavy_oil', 3],
['residuum', 1],
], columns=['oil', 'coefficient']).set_index(['oil'])
final_product_data = pd.DataFrame([
['premium_petrol', 700],
['regular_petrol', 600],
['jet_fuel', 400],
['fuel_oil', 350],
['lube_oil', 150],
], columns=['product', 'profit']).set_index(['product'])
vapour_pressure_ub = 1
crude_total_ub = 45000
naphtha_ub = 10000
cracked_oil_ub = 8000
lube_oil_lb = 500
lube_oil_ub = 1000
premium_ratio = 0.40
ARCS = arc_data.index.tolist()
arc_mult = arc_data['multiplier'].fillna(1)
FINAL_PRODUCTS = final_product_data.index.tolist()
final_product_data['profit'] = final_product_data['profit'] / 100
profit = final_product_data['profit']
ARCS = ARCS + [(i, 'sink') for i in FINAL_PRODUCTS]
flow = m.add_variables(ARCS, name='flow', lb=0)
NODES = np.unique([i for j in ARCS for i in j])
m.set_objective(so.expr_sum(profit[i] * flow[i, 'sink']
for i in FINAL_PRODUCTS
if (i, 'sink') in ARCS),
name='totalProfit', sense=so.MAX)
m.add_constraints((so.expr_sum(flow[a] for a in ARCS if a[0] == n) ==
so.expr_sum(arc_mult[a] * flow[a]
for a in ARCS if a[1] == n)
for n in NODES if n not in ['source', 'sink']),
name='flow_balance')
CRUDES = crude_data.index.tolist()
crudeDistilled = m.add_variables(CRUDES, name='crudesDistilled', lb=0)
crudeDistilled.set_bounds(ub=crude_data['crude_ub'])
m.add_constraints((flow[i, j] == crudeDistilled[i]
for (i, j) in ARCS if i in CRUDES), name='distillation')
OILS = ['light_oil', 'heavy_oil']
CRACKED_OILS = [i+'_cracked' for i in OILS]
oilCracked = m.add_variables(CRACKED_OILS, name='oilCracked', lb=0)
m.add_constraints((flow[i, j] == oilCracked[i] for (i, j) in ARCS
if i in CRACKED_OILS), name='cracking')
octane = octane_data['octane']
PETROLS = petrol_data.index.tolist()
octane_lb = petrol_data['octane_lb']
vapour_pressure = vapour_pressure_data['vapour_pressure']
m.add_constraints((so.expr_sum(octane[a[0]] * arc_mult[a] * flow[a]
for a in ARCS if a[1] == p)
>= octane_lb[p] *
so.expr_sum(arc_mult[a] * flow[a]
for a in ARCS if a[1] == p)
for p in PETROLS), name='blending_petrol')
m.add_constraint(so.expr_sum(vapour_pressure[a[0]] * arc_mult[a] * flow[a]
for a in ARCS if a[1] == 'jet_fuel') <=
vapour_pressure_ub *
so.expr_sum(arc_mult[a] * flow[a]
for a in ARCS if a[1] == 'jet_fuel'),
name='blending_jet_fuel')
fuel_oil_coefficient = fuel_oil_ratio_data['coefficient']
sum_fuel_oil_coefficient = sum(fuel_oil_coefficient)
m.add_constraints((sum_fuel_oil_coefficient * flow[a] ==
fuel_oil_coefficient[a[0]] * flow.sum('*', ['fuel_oil'])
for a in ARCS if a[1] == 'fuel_oil'),
name='blending_fuel_oil')
m.add_constraint(crudeDistilled.sum('*') <= crude_total_ub,
name='crude_total_ub')
m.add_constraint(so.expr_sum(flow[a] for a in ARCS
if a[0].find('naphtha') > -1 and
a[1] == 'reformed_gasoline')
<= naphtha_ub, name='naphtba_ub')
m.add_constraint(so.expr_sum(flow[a] for a in ARCS if a[1] ==
'cracked_oil') <=
cracked_oil_ub, name='cracked_oil_ub')
m.add_constraint(flow['lube_oil', 'sink'] == [lube_oil_lb, lube_oil_ub],
name='lube_oil_range')
m.add_constraint(flow.sum('premium_petrol', '*') >= premium_ratio *
flow.sum('regular_petrol', '*'), name='premium_ratio')
res = m.solve(**kwargs)
if res is not None:
print(so.get_solution_table(crudeDistilled))
print(so.get_solution_table(oilCracked))
print(so.get_solution_table(flow))
octane_sol = []
for p in PETROLS:
octane_sol.append(so.expr_sum(octane[a[0]] * arc_mult[a] *
flow[a].get_value() for a in ARCS
if a[1] == p) /
sum(arc_mult[a] * flow[a].get_value()
for a in ARCS if a[1] == p))
octane_sol = pd.Series(octane_sol, name='octane_sol', index=PETROLS)
print(so.get_solution_table(octane_sol, octane_lb))
print(so.get_solution_table(vapour_pressure))
vapour_pressure_sol = sum(vapour_pressure[a[0]] *
arc_mult[a] *
flow[a].get_value() for a in ARCS
if a[1] == 'jet_fuel') /\
sum(arc_mult[a] * flow[a].get_value() for a in ARCS
if a[1] == 'jet_fuel')
print('Vapour_pressure_sol: {:.4f}'.format(vapour_pressure_sol))
num_fuel_oil_ratio_sol = [arc_mult[a] * flow[a].get_value() /
sum(arc_mult[b] *
flow[b].get_value()
for b in ARCS if b[1] == 'fuel_oil')
for a in ARCS if a[1] == 'fuel_oil']
num_fuel_oil_ratio_sol = pd.Series(num_fuel_oil_ratio_sol,
name='num_fuel_oil_ratio_sol',
index=[a[0] for a in ARCS
if a[1] == 'fuel_oil'])
print(so.get_solution_table(fuel_oil_coefficient,
num_fuel_oil_ratio_sol))
return m.get_objective_value()
```
#### File: sasoptpy/abstract/parameter.py
```python
import sasoptpy
from sasoptpy.core import Expression
from sasoptpy.util.package_utils import _to_sas_string
class Parameter(Expression):
"""
Represents a problem input parameter
Parameters
----------
name : string
Name of the parameter
ptype : string, optional
Type of the parameter. Possible values are `sasoptpy.STR` and
`sasoptpy.NUM`
value : float, optional
Value of the parameter
init : float, optional
Initial value of the parameter
Examples
--------
>>> with so.Workspace('w') as w:
... p = so.Parameter(name='p', init=3)
... p.set_value(5)
...
<sasoptpy.abstract.statement.assignment.Assignment object at 0x7f7952e9bb38>
>>> print(so.to_optmodel(w))
proc optmodel;
num p init 3;
p = 5;
quit;
"""
@sasoptpy.class_containable
def __init__(self, name, ptype=None, value=None, init=None, **kwargs):
super().__init__(name=name)
if name is None:
name = sasoptpy.util.get_next_name()
if ptype is None:
if value is not None and isinstance(value, str):
ptype = sasoptpy.STR
elif init is not None and isinstance(init, str):
ptype = sasoptpy.STR
else:
ptype = sasoptpy.NUM
self._type = ptype
self._fix_value = value
self._init = init
self._parent = None
self._initialize_self_coef()
self._abstract = True
def set_parent(self, parent, key):
self._parent = parent
self._key = key
def _initialize_self_coef(self):
self.set_member(key=self._name, ref=self, val=1)
def set_init(self, value):
self._init = value
@sasoptpy.containable
def set_value(self, value):
self._fix_value = value
def get_value(self):
return self._fix_value
def _expr(self):
if self._parent:
return self._parent.get_element_name(self._key)
return self.get_name()
def _defn(self):
if self._parent:
return None
else:
s = '{} {}'.format(self._type, self.get_name())
if self._init:
#s += ' init {}'.format(_to_python_string(self._init))
s += ' init {}'.format(_to_sas_string(self._init))
elif self._fix_value is not None:
#s += ' = {}'.format(_to_python_string(self._fix_value))
s += ' = {}'.format(_to_sas_string(self._fix_value))
s += ';'
return s
def __str__(self):
return self._name
class ParameterValue(Expression):
"""
Represents a single value of a parameter
Parameters
----------
param : Parameter
Parameter that the value belongs to
key : tuple, optional
Key of the parameter value in the multi-index parameter
prefix : string
Prefix of the parameter
suffix : string
Suffix of the parameter, such as ``.lb`` and ``.ub``
Notes
-----
- Parameter values are mainly used in abstract expressions
"""
def __init__(self, param, key=None):
super().__init__()
self._param = param
tkey = sasoptpy.util.pack_to_tuple(key)
self._key = tkey
self._abstract = True
self.set_member(key=str(self), ref=self, val=1)
def __str__(self):
return \
sasoptpy.util.package_utils._insert_brackets(
self._param.get_name(), self._key)
def _expr(self):
return str(self)
```
#### File: sasoptpy/abstract/set_iterator.py
```python
from collections import OrderedDict
import sasoptpy
from .condition import Conditional, Condition
class SetIterator(sasoptpy.Expression):
"""
Creates an iterator object for a given Set
Parameters
----------
initset : :class:`Set`
Set to be iterated on
name : string, optional
Name of the iterator
datatype : string, optional
Type of the iterator
Notes
-----
- :class:`abstract.SetIterator` objects are created automatically when
iterating over a :class:`abstract.Set` object
Examples
--------
>>> S = so.Set(name='S')
>>> for i in S:
... print(i.get_name(), type(i))
o19 <class 'sasoptpy.abstract.set_iterator.SetIterator'>
"""
def __init__(self, initset, name=None, datatype=None):
if name is None:
name = sasoptpy.util.get_next_name()
super().__init__(name=name)
self.set_member(key=name, ref=self, val=1.0)
self._set = initset
if datatype is None:
datatype = sasoptpy.NUM
self._type = datatype
self.sym = Conditional(self)
def get_set(self):
return self._set
def get_type(self):
return self._type
def __hash__(self):
return hash('{}'.format(id(self)))
def _get_for_expr(self):
return '{} in {}'.format(self._expr(),
sasoptpy.to_expression(self._set))
def _expr(self):
return self.get_name()
def __str__(self):
return self._name
def _defn(self):
return self._get_for_expr()
def __repr__(self):
s = 'sasoptpy.SetIterator({}, name=\'{}\')'.format(self._set, self.get_name())
return s
# def _cond_expr(self):
# return '<' + self._expr() + '>'
def __lt__(self, other):
return Condition(self, '<', other)
def __gt__(self, other):
return Condition(self, '>', other)
def __le__(self, other):
return Condition(self, '<=', other)
def __ge__(self, other):
return Condition(self, '>=', other)
def __eq__(self, other):
return Condition(self, 'EQ', other)
def __ne__(self, other):
return Condition(self, 'NE', other)
class SetIteratorGroup(OrderedDict, sasoptpy.Expression):
"""
Creates a group of set iterator objects for multi-dimensional sets
Parameters
----------
initset : :class:`Set`
Set to be iterated on
names : string, optional
Names of the iterators
datatype : string, optional
Types of the iterators
Examples
--------
>>> T = so.Set(name='T', settype=[so.STR, so.NUM])
>>> for j in T:
... print(j.get_name(), type(j))
... for k in j:
... print(k.get_name(), type(k))
o5 <class 'sasoptpy.abstract.set_iterator.SetIteratorGroup'>
o6 <class 'sasoptpy.abstract.set_iterator.SetIterator'>
o8 <class 'sasoptpy.abstract.set_iterator.SetIterator'>
"""
def __init__(self, initset, datatype=None, names=None):
super(SetIteratorGroup, self).__init__()
self._objorder = sasoptpy.util.get_creation_id()
self._name = sasoptpy.util.get_next_name()
self._set = initset
self._init_members(names, datatype)
self.sym = sasoptpy.abstract.Conditional(self)
def _init_members(self, names, datatype):
if names is not None:
for i, name in enumerate(names):
dt = datatype[i] if datatype is not None else None
it = SetIterator(None, name=name, datatype=dt)
self.append(it)
else:
for i in datatype:
it = SetIterator(None, datatype=i)
self.append(it)
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
if isinstance(key, int):
return list(self.values())[key]
def get_name(self):
return self._name
def append(self, object):
name = object.get_name()
self[name] = object
def _get_for_expr(self):
#return '<{}> in {}'.format(self._expr(), self._set._name)
comb = '<' + ', '.join(str(i) for i in self.values()) + '>'
s = '{} in {}'.format(comb, sasoptpy.to_expression(self._set))
return s
def _expr(self):
return ', '.join(str(i) for i in self.values())
def _defn(self):
return self._get_for_expr()
def __iter__(self):
for i in self.values():
yield i
def __repr__(self):
return 'sasoptpy.SetIteratorGroup({}, datatype=[{}], names=[{}])'.format(
self._set,
', '.join('\'' + i.get_type() + '\'' for i in self.values()),
', '.join('\'' + i.get_name() + '\'' for i in self.values())
)
def __str__(self):
s = ', '.join(str(i) for i in self.values())
return '(' + s + ')'
def __hash__(self):
hashstr = ','.join(str(id(i)) for i in self.values())
return hash(hashstr)
```
#### File: sasoptpy/abstract/shadow.py
```python
import sasoptpy
from sasoptpy.core import Variable, Constraint
from abc import ABC, abstractmethod
class Shadow(ABC):
def __init__(self):
self._abstract = True
self._shadow = True
@abstractmethod
def _expr(self):
pass
class ShadowVariable(Shadow, Variable):
def __init__(self, name, **kwargs):
Variable.__init__(self, name=name, internal=True)
Shadow.__init__(self)
def _initialize_self_coef(self):
self.set_member(key=self._name + str(id(self)), ref=self, val=1)
def set_group_key(self, vg, key):
self._parent = vg
self._iterkey = key
if not sasoptpy.abstract.util.is_key_abstract(key):
self._abstract = False
def _expr(self):
keylist = sasoptpy.util.package_utils._to_iterator_expression(
self._iterkey)
key = ', '.join(keylist)
self_expr = '{}[{}]'.format(self._name, key)
return self_expr
class ShadowConstraint(Shadow, Constraint):
def __init__(self):
super(Constraint, self).__init__()
def _expr(self):
return self._name
```
#### File: abstract/statement/cofor_loop.py
```python
from .for_loop import ForLoopStatement
import sasoptpy
class CoForLoopStatement(ForLoopStatement):
def __init__(self, *args):
super().__init__(*args)
self.keyword = 'cofor'
@classmethod
def cofor_loop(cls, *args):
loop = CoForLoopStatement(*args)
return loop
```
#### File: abstract/statement/fix_unfix.py
```python
from .statement_base import Statement
import sasoptpy
class FixStatement(Statement):
def __init__(self, *elements):
super().__init__()
self.keyword = 'fix'
for i in elements:
self.append(i)
def append(self, element):
self.elements.append(element)
def _defn(self):
elems = []
for i in self.elements:
elems.append('{}={}'.format(
sasoptpy.to_expression(i[0]),
sasoptpy.to_expression(i[1])
))
s = self.keyword + ' ' + ' '.join(elems) + ';'
return s
@classmethod
def fix(cls, *items):
if len(items) == 2 and not any(isinstance(i, tuple) for i in items):
items = ((items[0], items[1]),)
fs = FixStatement(*items)
return fs
class UnfixStatement(Statement):
def __init__(self, *elements):
super().__init__()
self.keyword = 'unfix'
for i in elements:
self.append(i)
def append(self, element):
self.elements.append(element)
def _defn(self):
elems = []
for i in self.elements:
if isinstance(i, tuple):
elems.append('{}={}'.format(
sasoptpy.to_expression(i[0]),
sasoptpy.to_expression(i[1])
))
else:
elems.append(sasoptpy.to_expression(i))
s = self.keyword + ' ' + ' '.join(elems) + ';'
return s
@classmethod
def unfix(cls, *items):
fs = UnfixStatement(*items)
return fs
```
#### File: abstract/statement/for_loop.py
```python
from contextlib import contextmanager
from .statement_base import Statement
import sasoptpy
class ForLoopStatement(Statement):
def __init__(self, *args):
super().__init__()
self.keyword = 'for'
self._sets = list(args)
@classmethod
def for_loop(cls, *args):
loop = ForLoopStatement(*args)
return loop
def __iter__(self):
iterators = []
for i in self._sets:
j = sasoptpy.abstract.SetIterator(i)
iterators.append(j)
self.iterators = iterators
self.original = sasoptpy.container
sasoptpy.container = self
if len(self.iterators) == 1:
yield self.iterators[0]
else:
yield iter(self.iterators)
sasoptpy.container = self.original
def append(self, element):
self.elements.append(element)
def _defn(self):
s = f'{self.keyword} '
s += '{'
loops = []
for i, it in enumerate(self.iterators):
loops.append('{} in {}'.format(
sasoptpy.util.package_utils._to_sas_string(it),
sasoptpy.util.package_utils._to_sas_string(self._sets[i])#._expr()
))
s += ', '.join(loops)
s += '} do;\n'
eldefs = []
for i in self.elements:
eldef = sasoptpy.to_definition(i)
if eldef is not None:
eldefs.append(eldef)
s += sasoptpy.util.addSpaces('\n'.join(eldefs), 3) + '\n'
s += 'end;'
return s
```
#### File: abstract/statement/objective.py
```python
from .statement_base import Statement
import sasoptpy
class ObjectiveStatement(Statement):
def __init__(self, expression, **kwargs):
super().__init__()
self.model = kwargs.get('model', None)
self.name = kwargs.get('name')
self.expr = expression
self.sense = kwargs.get('sense')
def append(self):
pass
def _defn(self):
return '{} {} = {};'.format(self.sense, self.name, self.expr._expr())
@classmethod
def set_objective(cls, expression, name, sense):
st = ObjectiveStatement(expression, name=name, sense=sense)
return st
```
#### File: sasoptpy/core/model.py
```python
from collections import OrderedDict
import inspect
from math import inf
from types import GeneratorType
import warnings
import numpy as np
import pandas as pd
import sasoptpy
import sasoptpy.util
from sasoptpy.core import (Expression, Objective, Variable, VariableGroup,
Constraint, ConstraintGroup)
class Model:
"""
Creates an optimization model
Parameters
----------
name : string
Name of the model
session : :class:`swat.cas.connection.CAS` or \
:class:`saspy.SASsession`, optional
CAS or SAS Session object
Examples
--------
>>> from swat import CAS
>>> import sasoptpy as so
>>> s = CAS('cas.server.address', port=12345)
>>> m = so.Model(name='my_model', session=s)
NOTE: Initialized model my_model
>>> mip = so.Model(name='mip')
NOTE: Initialized model mip
"""
@sasoptpy.class_containable
def __init__(self, name=None, session=None):
self._name = name
self._objorder = sasoptpy.util.get_creation_id()
self._session = session
self._members = {}
self._variableDict = OrderedDict()
self._constraintDict = OrderedDict()
self._objectiveDict = OrderedDict()
self._setDict = OrderedDict()
self._parameterDict = OrderedDict()
self._impvarDict = OrderedDict()
self._statementDict = OrderedDict()
self._postSolveDict = OrderedDict()
self._soltime = 0
self._objval = None
self._status = ''
self._castablename = None
self._mpsmode = 0
self._problemSummary = None
self._solutionSummary = None
self._primalSolution = None
self._dualSolution = None
self._tunerResults = None
self._milp_opts = {}
self._lp_opts = {}
self.response = None
self._droppedCons = OrderedDict()
self._droppedVars = OrderedDict()
self._objective = Objective(0, name=name + '_obj', default=True,
internal=True)
print('NOTE: Initialized model {}.'.format(name))
def __eq__(self, other):
if not isinstance(other, sasoptpy.Model):
warnings.warn('Cannot compare Model object with {}'.
format(type(other)), RuntimeWarning, stacklevel=2)
return False
return super().__eq__(other)
def get_name(self):
"""
Returns model name
"""
return self._name
def add(self, object):
self.include(object)
def add_variable(self, name, vartype=None,
lb=None, ub=None, init=None):
"""
Adds a new variable to the model
New variables can be created via this method or existing variables
can be added to the model.
Parameters
----------
name : string
Name of the variable to be created
vartype : string, optional
Type of the variable, either `sasoptpy.BIN`, `sasoptpy.INT` or
`sasoptpy.CONT`
lb : float, optional
Lower bound of the variable
ub : float, optional
Upper bound of the variable
init : float, optional
Initial value of the variable
Returns
-------
var : :class:`Variable`
Variable that is added to the model
Examples
--------
Adding a variable on the fly
>>> m = so.Model(name='demo')
>>> x = m.add_variable(name='x', vartype=so.INT, ub=10, init=2)
>>> print(repr(x))
NOTE: Initialized model demo
sasoptpy.Variable(name='x', lb=0, ub=10, init=2, vartype='INT')
Adding an existing variable to a model
>>> y = so.Variable(name='y', vartype=so.BIN)
>>> m = so.Model(name='demo')
>>> m.include(y)
Notes
-----
* `name` is a mandatory field for this method.
See also
--------
:class:`Variable`, :func:`Model.include`
"""
var = Variable(name, vartype, lb, ub, init)
self.include(var)
return var
def add_variables(self, *argv, name,
vartype=None,
lb=None, ub=None, init=None):
"""
Adds a group of variables to the model
Parameters
----------
argv : list, dict, :class:`pandas.Index`
Loop index for variable group
name : string
Name of the variables
vartype : string, optional
Type of variables, `BIN`, `INT`, or `CONT`
lb : list, dict, :class:`pandas.Series`
Lower bounds of variables
ub : list, dict, :class:`pandas.Series`
Upper bounds of variables
init : list, dict, :class:`pandas.Series`
Initial values of variables
See also
--------
:class:`VariableGroup`, :meth:`Model.include`
Examples
--------
>>> production = m.add_variables(PERIODS, vartype=so.INT,
name='production', lb=min_production)
>>> print(production)
>>> print(repr(production))
Variable Group (production) [
[Period1: production['Period1',]]
[Period2: production['Period2',]]
[Period3: production['Period3',]]
]
sasoptpy.VariableGroup(['Period1', 'Period2', 'Period3'],
name='production')
"""
vg = VariableGroup(*argv, name=name, vartype=vartype, lb=lb, ub=ub,
init=init)
self.include(vg)
return vg
@sasoptpy.containable
def add_constraint(self, c, name):
"""
Adds a single constraint to the model
Parameters
----------
c : :class:`Constraint`
Constraint to be added to the model
name : string
Name of the constraint
Returns
-------
c : :class:`Constraint`
Reference to the constraint
Examples
--------
>>> x = m.add_variable(name='x', vartype=so.INT, lb=0, ub=5)
>>> y = m.add_variables(3, name='y', vartype=so.CONT, lb=0, ub=10)
>>> c1 = m.add_constraint(x + y[0] >= 3, name='c1')
>>> print(c1)
x + y[0] >= 3
>>> c2 = m.add_constraint(x - y[2] == [4, 10], name='c2')
>>> print(c2)
- y[2] + x = [4, 10]
See also
--------
:class:`Constraint`, :meth:`Model.include`
"""
if ((c._direction == 'L' and c._linCoef['CONST']['val'] == -inf) or
(c._direction == 'G' and c._linCoef['CONST']['val'] == inf)):
raise ValueError("Invalid constant value for the constraint type")
if c._name is None:
c.set_name(name)
c.set_permanent()
self.include(c)
return c
def add_constraints(self, argv, name):
"""
Adds a set of constraints to the model
Parameters
----------
argv : Generator-type object
List of constraints as a generator-type Python object
name : string
Name for the constraint group and individual constraint prefix
Returns
-------
cg : :class:`ConstraintGroup`
Reference to the ConstraintGroup
Examples
--------
>>> x = m.add_variable(name='x', vartype=so.INT, lb=0, ub=5)
>>> y = m.add_variables(3, name='y', vartype=so.CONT, lb=0, ub=10)
>>> c = m.add_constraints((x + 2 * y[i] >= 2 for i in [0, 1, 2]),
name='c')
>>> print(c)
Constraint Group (c) [
[0: 2.0 * y[0] + x >= 2]
[1: 2.0 * y[1] + x >= 2]
[2: 2.0 * y[2] + x >= 2]
]
>>> t = m.add_variables(3, 4, name='t')
>>> ct = m.add_constraints((t[i, j] <= x for i in range(3)
for j in range(4)), name='ct')
>>> print(ct)
Constraint Group (ct) [
[(0, 0): - x + t[0, 0] <= 0]
[(0, 1): t[0, 1] - x <= 0]
[(0, 2): - x + t[0, 2] <= 0]
[(0, 3): t[0, 3] - x <= 0]
[(1, 0): t[1, 0] - x <= 0]
[(1, 1): t[1, 1] - x <= 0]
[(1, 2): - x + t[1, 2] <= 0]
[(1, 3): - x + t[1, 3] <= 0]
[(2, 0): - x + t[2, 0] <= 0]
[(2, 1): t[2, 1] - x <= 0]
[(2, 2): t[2, 2] - x <= 0]
[(2, 3): t[2, 3] - x <= 0]
]
See also
--------
:class:`ConstraintGroup`, :meth:`Model.include`
"""
if type(argv) == list or type(argv) == GeneratorType:
cg = ConstraintGroup(argv, name=name)
self.include(cg)
return cg
elif sasoptpy.core.util.is_constraint(argv):
warnings.warn(
'Use add_constraint method for adding single constraints',
UserWarning)
c = self.add_constraint(argv, name=name)
return c
def add_set(self, name, init=None, value=None, settype=None):
"""
Adds a set to the model
Parameters
----------
name : string, optional
Name of the set
init : :class:`Set`, optional
Initial value of the set
value : list, float, optional
Exact value of the set
settype : list, optional
Types of the set as a list
The list can have one more `num` (for float) and `str` (for string)
values. You can use `sasoptpy.NUM` and `sasoptpy.STR` for floats and
strings, respectively.
Examples
--------
>>> I = m.add_set(name='I')
>>> print(I._defn())
set I;
>>> J = m.add_set(name='J', settype=['str'])
>>> print(J._defn())
set <str> J;
>>> N = m.add_parameter(name='N', init=4)
>>> K = m.add_set(name='K', init=so.exp_range(1, N))
>>> print(K._defn())
set K = 1..N;
>>> m.add_set(name='W', settype=[so.STR, so.NUM])
>>> print(W._defn())
set <str, num> W;
"""
new_set = sasoptpy.abstract.Set(name, init=init, value=value,
settype=settype)
self.include(new_set)
return new_set
def add_parameter(self, *argv, name, init=None, value=None, p_type=None):
"""
Adds a :class:`abstract.Parameter` object to the model
Parameters
----------
argv : :class:`Set`, optional
Index or indices of the parameter
name : string
Name of the parameter
init : float or expression, optional
Initial value of the parameter
p_type : string, optional
Type of the parameter, 'num' for floats or 'str' for strings
Examples
--------
>>> I = m.add_set(name='I')
>>> a = m.add_parameter(I, name='a', init=5)
>>> print(a._defn())
num a {I} init 5 ;
>>> I = m.add_set(name='I')
>>> J = m.add_set(name='J')
>>> p = m.add_parameter(I, J, name='p')
>>> print(p._defn())
num p {{I,J}};
Returns
-------
p : :class:`abstract.Parameter` or :class:`abstract.ParameterGroup`
A single parameter or a parameter group
"""
if len(argv) == 0:
p = sasoptpy.abstract.Parameter(
name, init=init, value=value, ptype=p_type)
self.include(p)
return p
else:
keylist = list(argv)
p = sasoptpy.abstract.ParameterGroup(keylist, name=name, init=init,
value=value, ptype=p_type)
self.include(p)
return p
def add_implicit_variable(self, argv=None, name=None):
"""
Adds an implicit variable to the model
Parameters
----------
argv : Generator-type object
Generator object where each item is an entry
name : string
Name of the implicit variable
Examples
--------
>>> x = m.add_variables(range(5), name='x')
>>> y = m.add_implicit_variable((
>>> x[i] + 2 * x[i+1] for i in range(4)), name='y')
>>> print(y[2])
x[2] + 2 * x[3]
>>> I = m.add_set(name='I')
>>> z = m.add_implicit_variable((x[i] * 2 + 2 for i in I), name='z')
>>> print(z._defn())
impvar z {i_1 in I} = 2 * x[i_1] + 2;
Notes
-----
- Based on whether the implicit variables are generated by a regular
or abstract expression, they can appear in generated OPTMODEL codes.
"""
if name is None:
name = sasoptpy.util.get_next_name()
iv = sasoptpy.abstract.ImplicitVar(argv=argv, name=name)
self.include(iv)
return iv
def add_statement(self, statement, after_solve=None):
"""
Adds a PROC OPTMODEL statement to the model
Parameters
----------
statement : :class:`Expression` or string
Statement object
after_solve : boolean
Switch for appending the statement after the problem solution
Examples
--------
>>> I = m.add_set(name='I')
>>> x = m.add_variables(I, name='x', vartype=so.INT)
>>> a = m.add_parameter(I, name='a')
>>> c = m.add_constraints((x[i] <= 2 * a[i] for i in I), name='c')
>>> m.add_statement('print x;', after_solve=True)
>>> print(m.to_optmodel())
proc optmodel;
min m_obj = 0;
set I;
var x {I} integer >= 0;
num a {I};
con c {i_1 in I} : x[i_1] - 2.0 * a[i_1] <= 0;
solve;
print _var_.name _var_.lb _var_.ub _var_ _var_.rc;
print _con_.name _con_.body _con_.dual;
print x;
quit;
Notes
-----
- If the statement string includes 'print', then the statement is
automatically placed after the solve even if `after_solve` is `False`.
"""
if isinstance(statement, sasoptpy.abstract.Statement):
self._save_statement(statement, after_solve)
elif isinstance(statement, str):
s = sasoptpy.abstract.LiteralStatement(statement)
self._save_statement(s, after_solve)
def add_postsolve_statement(self, statement):
if isinstance(statement, sasoptpy.abstract.Statement):
self._save_statement(statement, after_solve=True)
elif isinstance(statement, str):
s = sasoptpy.abstract.LiteralStatement(statement)
self._save_statement(s, after_solve=True)
def _save_statement(self, st, after_solve=None):
if after_solve is None or after_solve is False:
self._statementDict[id(st)] = st
else:
self._postSolveDict[id(st)] = st
def drop_variable(self, variable):
"""
Drops a variable from the model
Parameters
----------
variable : :class:`Variable`
The variable to be dropped from the model
Examples
--------
>>> x = m.add_variable(name='x')
>>> y = m.add_variable(name='y')
>>> print(m.get_variable('x'))
x
>>> m.drop_variable(x)
>>> print(m.get_variable('x'))
None
See also
--------
:func:`Model.drop_variables`
:func:`Model.drop_constraint`
:func:`Model.drop_constraints`
"""
vname = variable.get_name()
if self._variableDict.pop(vname, None) is None:
self._droppedVars[vname] = True
def restore_variable(self, variable):
vname = variable.get_name()
if variable.get_parent_reference()[0] is not None:
self._droppedVars.pop(vname, None)
else:
self.include(variable)
@sasoptpy.containable
def drop_constraint(self, constraint):
"""
Drops a constraint from the model
Parameters
----------
constraint : :class:`Constraint`
The constraint to be dropped from the model
Examples
--------
>>> c1 = m.add_constraint(2 * x + y <= 15, name='c1')
>>> print(m.get_constraint('c1'))
2 * x + y <= 15
>>> m.drop_constraint(c1)
>>> print(m.get_constraint('c1'))
None
See also
--------
:func:`Model.drop_constraints`
:func:`Model.drop_variable`
:func:`Model.drop_variables`
"""
cname = constraint.get_name()
if self._constraintDict.pop(cname, None) is None:
self._droppedCons[constraint._get_optmodel_name()] = True
def restore_constraint(self, constraint):
cname = constraint.get_name()
if constraint.get_parent_reference()[0] is not None:
self._droppedCons.pop(constraint._get_optmodel_name(), None)
else:
self.include(constraint)
def drop_variables(self, *variables):
"""
Drops a variable group from the model
Parameters
----------
variables : :class:`VariableGroup`
The variable group to be dropped from the model
Examples
--------
>>> x = m.add_variables(3, name='x')
>>> print(m.get_variables())
[sasoptpy.Variable(name='x_0', vartype='CONT'),
sasoptpy.Variable(name='x_1', vartype='CONT')]
>>> m.drop_variables(x)
>>> print(m.get_variables())
[]
See also
--------
:func:`Model.drop_variable`
:func:`Model.drop_constraint`
:func:`Model.drop_constraints`
"""
for v in variables:
self.drop_variable(v)
def drop_constraints(self, *constraints):
"""
Drops a constraint group from the model
Parameters
----------
constraints : :class:`Constraint` or :class:`ConstraintGroup`
Arbitrary number of constraints to be dropped
Examples
--------
>>> c1 = m.add_constraints((x[i] + y <= 15 for i in [0, 1]), name='c1')
>>> print(m.get_constraints())
[sasoptpy.Constraint( x[0] + y <= 15, name='c1_0'),
sasoptpy.Constraint( x[1] + y <= 15, name='c1_1')]
>>> m.drop_constraints(c1)
>>> print(m.get_constraints())
[]
See also
--------
:func:`Model.drop_constraints`
:func:`Model.drop_variable`
:func:`Model.drop_variables`
"""
for c in constraints:
self.drop_constraint(c)
def include(self, *argv):
"""
Adds existing variables and constraints to a model
Parameters
----------
argv :
Objects to be included in the model
Notes
-----
* Valid object types for `argv` parameter:
- :class:`Model`
Including a model causes all variables and constraints inside the
original model to be included.
- :class:`Variable`
- :class:`Constraint`
- :class:`VariableGroup`
- :class:`ConstraintGroup`
- :class:`Objective`
- :class:`Set`
- :class:`Parameter`
- :class:`ParameterGroup`
- :class:`Statement` and all subclasses
- :class:`ImplicitVar`
Examples
--------
Adding an existing variable
>>> x = so.Variable(name='x', vartype=so.CONT)
>>> m.include(x)
Adding an existing constraint
>>> c1 = so.Constraint(x + y <= 5, name='c1')
>>> m.include(c1)
Adding an existing set of variables
>>> z = so.VariableGroup(3, 5, name='z', ub=10)
>>> m.include(z)
Adding an existing set of constraints
>>> c2 = so.ConstraintGroup((x + 2 * z[i, j] >= 2 for i in range(3)
for j in range(5)), name='c2')
>>> m.include(c2)
Adding an existing model (including all of its elements)
>>> new_model = so.Model(name='new_model')
>>> new_model.include(m)
"""
include_methods = {
Variable: self._include_variable,
VariableGroup: self._include_vargroup,
Constraint: self._include_constraint,
ConstraintGroup: self._include_congroup,
Objective: self._set_objective,
sasoptpy.Set: self._include_set,
sasoptpy.Parameter: self._include_parameter,
sasoptpy.ParameterGroup: self._include_parameter_group,
sasoptpy.abstract.LiteralStatement: self._include_statement,
sasoptpy.ImplicitVar: self._include_expdict,
sasoptpy.abstract.ReadDataStatement: self._include_statement,
sasoptpy.abstract.DropStatement: self._include_statement,
list: self.include,
Model: self._include_model
}
for c in argv:
meth = include_methods.get(type(c))
if any(isinstance(c, i) for i in [Variable, VariableGroup, Constraint, ConstraintGroup, Objective]):
if sasoptpy.container is not None:
if c._objorder > self._objorder:
raise ReferenceError('Object {} should be defined before Model {} inside a Workspace'.format(
c._expr(), self.get_name()
))
if meth is not None:
meth(c)
def _include_variable(self, var):
vname = var.get_name()
if vname in self._variableDict:
warnings.warn(f"Variable name {vname} exists in the model."
"New declaration will override the existing value.",
UserWarning)
self._variableDict[vname] = var
def _include_vargroup(self, vg):
self._variableDict[vg.get_name()] = vg
def _include_constraint(self, con):
if sasoptpy.core.util.has_parent(con):
return
name = con.get_name()
if con.get_name() in self._constraintDict:
warnings.warn(f"Constraint name {name} exists in the model."
"New declaration will override the existing value.",
UserWarning)
self._constraintDict[con.get_name()] = con
def _include_congroup(self, cg):
self._constraintDict[cg.get_name()] = cg
def _set_objective(self, ob):
self._objective = ob
def _include_set(self, st):
self._setDict[id(st)] = st
def _include_parameter(self, p):
self._parameterDict[p.get_name()] = p
def _include_parameter_group(self, pg):
self._parameterDict[pg.get_name()] = pg
def _include_statement(self, os):
self._save_statement(os)
def _include_expdict(self, ed):
self._impvarDict[ed.get_name()] = ed
def _include_model(self, model):
self._setDict.update(model._setDict)
self._parameterDict.update(model._parameterDict)
self._statementDict.update(model._statementDict)
self._postSolveDict.update(model._postSolveDict)
self._impvarDict.update(model._impvarDict)
for s in model.get_grouped_variables().values():
self._include_variable(s)
for s in model.get_grouped_constraints().values():
self._include_constraint(s)
self._objective = model._objective
def drop(self, obj):
if isinstance(obj, sasoptpy.VariableGroup):
self.drop_variables(obj)
elif isinstance(obj, sasoptpy.Variable):
self.drop_variable(obj)
elif isinstance(obj, sasoptpy.ConstraintGroup):
self.drop_constraints(obj)
elif isinstance(obj, sasoptpy.Constraint):
self.drop_constraint(obj)
elif isinstance(obj, sasoptpy.Set):
self._setDict.pop(id(obj), None)
elif isinstance(obj, sasoptpy.Parameter) or\
isinstance(obj, sasoptpy.ParameterGroup):
self._parameterDict.pop(obj.get_name(), None)
elif isinstance(obj, sasoptpy.abstract.Statement):
self._statementDict.pop(id(obj), None)
def set_objective(self, expression, name, sense=None):
"""
Specifies the objective function for the model
Parameters
----------
expression : :class:`Expression`
The objective function as an Expression
name : string
Name of the objective value
sense : string, optional
Objective value direction, `sasoptpy.MIN` or `sasoptpy.MAX`
Returns
-------
objective : :class:`Expression`
Objective function as an :class:`Expression` object
Examples
--------
>>> profit = so.Expression(5 * sales - 2 * material, name='profit')
>>> m.set_objective(profit, so.MAX)
>>> print(m.get_objective())
- 2.0 * material + 5.0 * sales
>>> m.set_objective(4 * x - 5 * y, name='obj')
>>> print(repr(m.get_objective()))
sasoptpy.Expression(exp = 4.0 * x - 5.0 * y , name='obj')
>>> f1 = m.set_objective(2 * x + y, sense=so.MIN, name='f1')
>>> f2 = m.append_objective( (x - y) ** 2, sense=so.MIN, name='f2')
>>> print(m.to_optmodel(options={'with': 'blackbox', 'obj': (f1, f2)}))
proc optmodel;
var x;
var y;
min f1 = 2 * x + y;
min f2 = (x - y) ^ (2);
solve with blackbox obj (f1 f2);
print _var_.name _var_.lb _var_.ub _var_ _var_.rc;
print _con_.name _con_.body _con_.dual;
quit;
Notes
-----
- Default objective sense is minimization `MIN`.
- This method replaces the existing objective of the model.
When working with multiple objectives, use the
:meth:`Model.append_objective` method.
See also
--------
:meth:`Model.append_objective`
"""
obj = Objective(expression, sense=sense, name=name)
self._objective = obj
return self._objective
def append_objective(self, expression, name, sense=None):
"""
Appends a new objective to the model
Parameters
----------
expression : :class:`Expression`
The objective function as an Expression
name : string
Name of the objective value
sense : string, optional
Objective value direction, `sasoptpy.MIN` or `sasoptpy.MAX`
Returns
-------
objective : :class:`Expression`
Objective function as an :class:`Expression` object
Examples
--------
>>> f1 = m.set_objective(2 * x + y, sense=so.MIN, name='f1')
>>> f2 = m.append_objective( (x - y) ** 2, sense=so.MIN, name='f2')
>>> print(m.to_optmodel(options={'with': 'blackbox', 'obj': (f1, f2)}))
proc optmodel;
var x;
var y;
min f1 = 2 * x + y;
min f2 = (x - y) ^ (2);
solve with blackbox obj (f1 f2);
print _var_.name _var_.lb _var_.ub _var_ _var_.rc;
print _con_.name _con_.body _con_.dual;
quit;
Notes
-----
- Default objective sense is minimization `MIN`.
See also
--------
:meth:`Model.set_objective`
"""
obj = Objective(expression, name=name, sense=sense)
self._objectiveDict[id(obj)] = obj
return obj
def get_objective(self):
"""
Returns the objective function as an :class:`Expression` object
Returns
-------
objective : :class:`Expression`
Objective function
Examples
--------
>>> m.set_objective(4 * x - 5 * y, name='obj')
>>> print(repr(m.get_objective()))
sasoptpy.Expression(exp = 4.0 * x - 5.0 * y , name='obj')
"""
return self._objective
def get_all_objectives(self):
"""
Returns a list of objectives in the model
Returns
-------
all_objectives : list
A list of :class:`Objective` objects
Examples
--------
>>> m = so.Model(name='test_set_get_objective')
>>> x = m.add_variable(name='x')
>>> obj1 = m.set_objective(2 * x, sense=so.MIN, name='obj1')
>>> obj2 = m.set_objective(5 * x, sense=so.MIN, name='obj2') # Overrides obj1
>>> obj3 = m.append_objective(10 * x, sense=so.MIN, name='obj3')
>>> assertEqual(m.get_all_objectives(), [obj2, obj3])
True
"""
all_objs = list(self._objectiveDict.values())
all_objs.append(self._objective)
return sorted(all_objs, key=lambda i: i._objorder)
def get_objective_value(self):
"""
Returns the optimal objective value
Returns
-------
objective_value : float
Optimal objective value at current solution
Examples
--------
>>> m.solve()
>>> print(m.get_objective_value())
42.0
Notes
-----
- This method should be used for getting the objective value after
solve.
- In order to get the current value of the objective after changing
variable values, you can use :code:`m.get_objective().get_value()`.
"""
if self._objval is not None:
return sasoptpy.util.get_in_digit_format(self._objval)
else:
return self.get_objective().get_value()
def set_objective_value(self, value):
self._objval = value
def get_constraint(self, name):
"""
Returns the reference to a constraint in the model
Parameters
----------
name : string
Name of the constraint requested
Returns
-------
constraint : :class:`Constraint`
Requested object
Examples
--------
>>> m.add_constraint(2 * x + y <= 15, name='c1')
>>> print(m.get_constraint('c1'))
2.0 * x + y <= 15
"""
# return self._constraintDict.get(name, None)
constraints = self.get_constraints_dict()
safe_name = name.replace('\'', '')
if name in constraints:
return constraints[name]
elif safe_name in constraints:
return constraints[safe_name]
elif '[' in name:
first_part = name.split('[')[0]
if constraints.get(first_part, None) is not None:
vg = constraints.get(first_part)
return vg.get_member_by_name(name)
else:
return None
def loop_constraints(self):
for i in self._constraintDict.values():
if isinstance(i, sasoptpy.Constraint):
yield i
elif isinstance(i, sasoptpy.ConstraintGroup):
for j in i.get_members().values():
yield j
def _get_all_constraints(self):
all_cons = OrderedDict()
for c in self._constraintDict.values():
if isinstance(c, sasoptpy.Constraint):
all_cons[c.get_name()] = c
elif isinstance(c, sasoptpy.ConstraintGroup):
for sc in c.get_members().values():
all_cons[sc.get_name()] = sc
return all_cons
def get_constraints(self):
"""
Returns a list of constraints in the model
Returns
-------
constraints : list
A list of Constraint objects
Examples
--------
>>> m.add_constraint(x[0] + y <= 15, name='c1')
>>> m.add_constraints((2 * x[i] - y >= 1 for i in [0, 1]), name='c2')
>>> print(m.get_constraints())
[sasoptpy.Constraint( x[0] + y <= 15, name='c1'),
sasoptpy.Constraint( 2.0 * x[0] - y >= 1, name='c2_0'),
sasoptpy.Constraint( 2.0 * x[1] - y >= 1, name='c2_1')]
"""
return list(self.loop_constraints())
def get_constraints_dict(self):
return self._constraintDict
def get_grouped_constraints(self):
"""
Returns an ordered dictionary of constraints
Returns
-------
grouped_cons : OrderedDict
Dictionary of constraints and constraint groups in the model
Examples
--------
>>> m1 = so.Model(name='test_copy_model_1')
>>> x = m1.add_variable(name='x')
>>> y = m1.add_variables(2, name='y')
>>> c1 = m1.add_constraint(x + y[0] >= 2, name='c1')
>>> c2 = m1.add_constraints((x - y[i] <= 10 for i in range(2)), name='c2')
>>> cons = OrderedDict([('c1', c1), ('c2', c2)])
>>> self.assertEqual(m1.get_grouped_constraints(), cons)
True
See also
--------
:meth:`Model.get_constraints`, :meth:`Model.get_grouped_variables`
"""
return self.get_constraints_dict()
def get_variable(self, name):
"""
Returns the reference to a variable in the model
Parameters
----------
name : string
Name or key of the variable requested
Returns
-------
variable : :class:`Variable`
Reference to the variable
Examples
--------
>>> m.add_variable(name='x', vartype=so.INT, lb=3, ub=5)
>>> var1 = m.get_variable('x')
>>> print(repr(var1))
sasoptpy.Variable(name='x', lb=3, ub=5, vartype='INT')
"""
variables = self.get_variable_dict()
safe_name = name.replace('\'', '')
if name in variables:
return variables[name]
elif safe_name in variables:
return variables[safe_name]
elif '[' in name:
first_part = name.split('[')[0]
if variables.get(first_part, None) is not None:
vg = variables.get(first_part)
return vg.get_member_by_name(name)
else:
return None
def loop_variables(self):
for i in self._variableDict.values():
if isinstance(i, sasoptpy.Variable):
yield i
elif isinstance(i, sasoptpy.VariableGroup):
for j in i.get_members().values():
yield j
def _get_all_variables(self):
all_vars = OrderedDict()
for v in self._variableDict.values():
if isinstance(v, sasoptpy.Variable):
all_vars[v.get_name()] = v
elif isinstance(v, sasoptpy.VariableGroup):
for sc in v.get_members().values():
all_vars[sc.get_name()] = sc
return all_vars
def get_variables(self):
"""
Returns a list of variables
Returns
-------
variables : list
List of variables in the model
Examples
--------
>>> x = m.add_variables(2, name='x')
>>> y = m.add_variable(name='y')
>>> print(m.get_variables())
[sasoptpy.Variable(name='x_0', vartype='CONT'),
sasoptpy.Variable(name='x_1', vartype='CONT'),
sasoptpy.Variable(name='y', vartype='CONT')]
"""
return list(self.loop_variables())
def get_variable_dict(self):
return self._variableDict
def get_grouped_variables(self):
"""
Returns an ordered dictionary of variables
Returns
-------
grouped_vars : OrderedDict
Dictionary of variables and variable groups in the model
Examples
--------
>>> m1 = so.Model(name='test_copy_model_1')
>>> x = m1.add_variable(name='x')
>>> y = m1.add_variables(2, name='y')
>>> vars = OrderedDict([('x', x), ('y', y)])
>>> self.assertEqual(m1.get_grouped_variables(), vars)
True
See also
--------
:meth:`Model.get_variables`, :meth:`Model.get_grouped_constraints`
"""
return self.get_variable_dict()
def get_variable_coef(self, var):
"""
Returns the objective value coefficient of a variable
Parameters
----------
var : :class:`Variable` or string
Variable whose objective value is requested. It can be either the
variable object itself, or the name of the variable.
Returns
-------
coef : float
Objective value coefficient of the given variable
Examples
--------
>>> x = m.add_variable(name='x')
>>> y = m.add_variable(name='y')
>>> m.set_objective(4 * x - 5 * y, name='obj', sense=so.MAX)
>>> print(m.get_variable_coef(x))
4.0
>>> print(m.get_variable_coef('y'))
-5.0
"""
if isinstance(var, sasoptpy.core.Variable):
varname = var.get_name()
else:
varname = var
if varname in self._objective._linCoef:
return self._objective._linCoef[varname]['val']
else:
if self.get_objective()._is_linear():
if varname in self._variableDict:
return 0
else:
raise RuntimeError('Variable is not a member of the model')
else:
warnings.warn('Objective is not linear', RuntimeWarning)
def set_variable_coef(self, var, coef):
varname = var.get_name()
if varname in self._objective._linCoef:
self._objective._linCoef[varname]['val'] = coef
else:
self._objective += coef*var
def set_variable_value(self, name, value):
variable = self.get_variable(name)
if variable is not None:
variable.set_value(value)
else:
self._set_abstract_values(name, value)
def set_dual_value(self, name, value):
variable = self.get_variable(name)
if variable is not None:
variable.set_dual(value)
def get_variable_value(self, var):
"""
Returns the value of a variable
Parameters
----------
var : :class:`Variable` or string
Variable reference
Notes
-----
- It is possible to get a variable's value by using the
:func:`Variable.get_value` method, as long as the variable is not
abstract.
- This method is a wrapper around :func:`Variable.get_value` and an
overlook function for model components.
"""
if sasoptpy.core.util.is_variable(var):
varname = var.get_name()
else:
varname = var
if varname in self._variableDict:
return self._variableDict[varname].get_value()
else:
return self._get_variable_solution(varname)
def _get_variable_solution(self, name):
if self._primalSolution is not None:
for row in self._primalSolution.itertuples():
if row.var == name:
return row.value
else:
raise RuntimeError('No primal solution is available')
warnings.warn('Variable could not be found')
return None
def get_problem_summary(self):
"""
Returns the problem summary table to the user
Returns
-------
ps : :class:`swat.dataframe.SASDataFrame`
Problem summary table, that is obtained after :meth:`Model.solve`
Examples
--------
>>> m.solve()
>>> ps = m.get_problem_summary()
>>> print(type(ps))
<class 'swat.dataframe.SASDataFrame'>
>>> print(ps)
Problem Summary
Value
Label
Problem Name model1
Objective Sense Maximization
Objective Function obj
RHS RHS
Number of Variables 2
Bounded Above 0
Bounded Below 2
Bounded Above and Below 0
Free 0
Fixed 0
Number of Constraints 2
LE (<=) 1
EQ (=) 0
GE (>=) 1
Range 0
Constraint Coefficients 4
>>> print(ps.index)
Index(['Problem Name', 'Objective Sense', 'Objective Function', 'RHS',
'', 'Number of Variables', 'Bounded Above', 'Bounded Below',
'Bounded Above and Below', 'Free', 'Fixed', '',
'Number of Constraints', 'LE (<=)', 'EQ (=)', 'GE (>=)', 'Range', '',
'Constraint Coefficients'],
dtype='object', name='Label')
>>> print(ps.loc['Number of Variables'])
Value 2
Name: Number of Variables, dtype: object
>>> print(ps.loc['Constraint Coefficients', 'Value'])
4
"""
return self._problemSummary
def get_solution_summary(self):
"""
Returns the solution summary table to the user
Returns
-------
ss : :class:`swat.dataframe.SASDataFrame`
Solution summary table, that is obtained after :meth:`Model.solve`
Examples
--------
>>> m.solve()
>>> soln = m.get_solution_summary()
>>> print(type(soln))
<class 'swat.dataframe.SASDataFrame'>
>>> print(soln)
Solution Summary
Value
Label
Solver LP
Algorithm Dual Simplex
Objective Function obj
Solution Status Optimal
Objective Value 10
Primal Infeasibility 0
Dual Infeasibility 0
Bound Infeasibility 0
Iterations 2
Presolve Time 0.00
Solution Time 0.01
>>> print(soln.index)
Index(['Solver', 'Algorithm', 'Objective Function', 'Solution Status',
'Objective Value', '', 'Primal Infeasibility',
'Dual Infeasibility', 'Bound Infeasibility', '', 'Iterations',
'Presolve Time', 'Solution Time'],
dtype='object', name='Label')
>>> print(soln.loc['Solution Status', 'Value'])
Optimal
"""
return self._solutionSummary
def get_solution(self, vtype='Primal', solution=None, pivot=False):
"""
Returns the primal and dual problem solutions
Parameters
----------
vtype : string, optional
`Primal` or `Dual`
solution : integer, optional
Solution number to be returned (for the MILP solver)
pivot : boolean, optional
When set to `True`, returns multiple solutions in columns as a pivot
table
Returns
-------
solution : :class:`pandas.DataFrame`
Primal or dual solution table returned from the CAS action
Examples
--------
>>> m.solve()
>>> print(m.get_solution('Primal'))
var lb ub value solution
0 x[clock] 0.0 1.797693e+308 0.0 1.0
1 x[pc] 0.0 1.797693e+308 5.0 1.0
2 x[headphone] 0.0 1.797693e+308 2.0 1.0
3 x[mug] 0.0 1.797693e+308 0.0 1.0
4 x[book] 0.0 1.797693e+308 0.0 1.0
5 x[pen] 0.0 1.797693e+308 1.0 1.0
6 x[clock] 0.0 1.797693e+308 0.0 2.0
7 x[pc] 0.0 1.797693e+308 5.0 2.0
8 x[headphone] 0.0 1.797693e+308 2.0 2.0
9 x[mug] 0.0 1.797693e+308 0.0 2.0
10 x[book] 0.0 1.797693e+308 0.0 2.0
11 x[pen] 0.0 1.797693e+308 0.0 2.0
12 x[clock] 0.0 1.797693e+308 1.0 3.0
13 x[pc] 0.0 1.797693e+308 4.0 3.0
...
>>> print(m.get_solution('Primal', solution=2))
var lb ub value solution
6 x[clock] 0.0 1.797693e+308 0.0 2.0
7 x[pc] 0.0 1.797693e+308 5.0 2.0
8 x[headphone] 0.0 1.797693e+308 2.0 2.0
9 x[mug] 0.0 1.797693e+308 0.0 2.0
10 x[book] 0.0 1.797693e+308 0.0 2.0
11 x[pen] 0.0 1.797693e+308 0.0 2.0
>>> print(m.get_solution(pivot=True))
solution 1.0 2.0 3.0 4.0 5.0
var
x[book] 0.0 0.0 0.0 1.0 0.0
x[clock] 0.0 0.0 1.0 1.0 0.0
x[headphone] 2.0 2.0 1.0 1.0 0.0
x[mug] 0.0 0.0 0.0 1.0 0.0
x[pc] 5.0 5.0 4.0 1.0 0.0
x[pen] 1.0 0.0 0.0 1.0 0.0
>>> print(m.get_solution('Dual'))
con value solution
0 weight_con 20.0 1.0
1 limit_con[clock] 0.0 1.0
2 limit_con[pc] 5.0 1.0
3 limit_con[headphone] 2.0 1.0
4 limit_con[mug] 0.0 1.0
5 limit_con[book] 0.0 1.0
6 limit_con[pen] 1.0 1.0
7 weight_con 19.0 2.0
8 limit_con[clock] 0.0 2.0
9 limit_con[pc] 5.0 2.0
10 limit_con[headphone] 2.0 2.0
11 limit_con[mug] 0.0 2.0
12 limit_con[book] 0.0 2.0
13 limit_con[pen] 0.0 2.0
...
>>> print(m.get_solution('dual', pivot=True))
solution 1.0 2.0 3.0 4.0 5.0
con
limit_con[book] 0.0 0.0 0.0 1.0 0.0
limit_con[clock] 0.0 0.0 1.0 1.0 0.0
limit_con[headphone] 2.0 2.0 1.0 1.0 0.0
limit_con[mug] 0.0 0.0 0.0 1.0 0.0
limit_con[pc] 5.0 5.0 4.0 1.0 0.0
limit_con[pen] 1.0 0.0 0.0 1.0 0.0
weight_con 20.0 19.0 20.0 19.0 0.0
Notes
-----
- If the :meth:`Model.solve` method is used with :code:`frame=True`
parameter, the MILP solver returns multiple solutions.
You can retreive different results by using the :code:`solution`
parameter.
"""
if vtype == 'Primal' or vtype == 'primal':
if pivot:
return self._primalSolution.pivot_table(
index=['var'], columns=['solution'], values='value')
elif solution and 'solution' in self._primalSolution:
return self._primalSolution.loc[
self._primalSolution['solution'] == solution]
else:
return self._primalSolution
elif vtype == 'Dual' or vtype == 'dual':
if pivot:
return self._dualSolution.pivot_table(
index=['con'], columns=['solution'], values='value')
elif solution and 'solution' in self._dualSolution:
return self._dualSolution.loc[
self._dualSolution['solution'] == solution]
else:
return self._dualSolution
else:
raise ValueError('Solution type should be \'primal\' or \'dual\'')
def get_tuner_results(self):
"""
Returns the tuning results
Examples
--------
>>> m.tune_parameters(tunerParameters={'maxConfigs': 10})
>>> results = m.get_tuner_reults()
Returns
-------
tunerResults : dict
Returns tuner results as a dictionary.
Its members are
- Performance Information
- Tuner Information
- Tuner Summary
- Tuner Results
See also
--------
:meth:`Model.tune_parameters`
"""
return self._tunerResults
def set_session(self, session):
"""
Sets the session of model
Parameters
----------
session : :class:`swat.cas.connection.CAS` or \
:class:`saspy.SASsession`
CAS or SAS Session object
Notes
-----
* You can use CAS sessions (via SWAT package) or SAS sessions (via SASPy package)
* Session of a model can be set at initialization.
See :class:`Model`.
"""
self._session = session
def get_session(self):
"""
Returns the session of the model
Returns
-------
session : :class:`swat.cas.connection.CAS` or \
:class:`saspy.SASsession`
Session of the model, or None
"""
return self._session
def get_sets(self):
"""
Returns a list of :class:`Set` objects in the model
Returns
-------
set_list : list
List of sets in the model
Examples
--------
>>> m.get_sets()
[sasoptpy.abstract.Set(name=W, settype=['str', 'num']), sasoptpy.abstract.Set(name=I, settype=['num']), sasoptpy.abstract.Set(name=J, settype=['num'])]
"""
return list(self._setDict.values())
def get_parameters(self):
"""
Returns a list of :class:`abstract.Parameter` and
:class:`abstract.ParameterGroup` objects in the model
Returns
-------
param_list : list
List of parameters in the model
Examples
--------
>>> for i in m.get_parameters():
... print(i.get_name(), type(i))
p <class 'sasoptpy.abstract.parameter_group.ParameterGroup'>
r <class 'sasoptpy.abstract.parameter.Parameter'>
"""
return list(self._parameterDict.values())
def get_statements(self):
"""
Returns a list of all statements inside the model
Returns
-------
st_list : list
List of all statement objects
Examples
--------
>>> m.add_statement(so.abstract.LiteralStatement("expand;"))
>>> m.get_statements()
[<sasoptpy.abstract.statement.literal.LiteralStatement object at 0x7fe0202fc358>]
>>> print(m.to_optmodel())
proc optmodel;
var x;
min obj1 = x * x;
expand;
solve;
quit;
"""
return list(self._statementDict.values())
def get_implicit_variables(self):
"""
Returns a list of implicit variables
Returns
-------
implicit_variables : list
List of implicit variables in the model
Examples
--------
>>> m = so.Model(name='test_add_impvar')
>>> x = m.add_variables(5, name='x')
>>> y = m.add_implicit_variable((i * x[i] + x[i] ** 2 for i in range(5)),
name='y')
>>> assertEqual([y], m.get_implicit_variables())
True
"""
return list(self._impvarDict.values())
def _get_dropped_cons(self):
return self._droppedCons
def _get_dropped_vars(self):
return self._droppedVars
def print_solution(self):
"""
Prints the current values of the variables
Examples
--------
>>> m.solve()
>>> m.print_solution()
x: 2.0
y: 0.0
See also
--------
:func:`Model.get_solution`
Notes
-----
- This function might not work for abstract variables and nonlinear
models.
"""
for v in self.loop_variables():
print('{}: {}'.format(v.get_name(), v._value))
def to_frame(self, **kwargs):
warnings.warn('Use to_mps for obtaining problem in MPS format',
DeprecationWarning)
self.to_mps(**kwargs)
def to_mps(self, **kwargs):
"""
Returns the problem in MPS format
Examples
--------
>>> print(n.to_mps())
Field1 Field2 Field3 Field4 Field5 Field6 _id_
0 NAME n 0.0 0.0 1
1 ROWS NaN NaN 2
2 MIN myobj NaN NaN 3
3 COLUMNS NaN NaN 4
4 y myobj 2.0 NaN 5
5 RHS NaN NaN 6
6 RANGES NaN NaN 7
7 BOUNDS NaN NaN 8
8 FR BND y NaN NaN 9
9 ENDATA 0.0 0.0 10
"""
return sasoptpy.interface.to_mps(self, **kwargs)
def export_mps(self, filename=None, fetch=False, **kwargs):
"""
Exports model in MPS format
Examples
--------
- Writing a linear optimization or mixed integer linear optimization model into MPS file
>>> m.export_mps('my_problem.mps')
- Returning the MPS string as a Python variable
>>> m.export_mps(fetch=True)
"""
if self._is_linear():
mps = sasoptpy.util.export_to_mps(self, filename=filename, **kwargs)
if fetch:
return mps
else:
raise ValueError("Model is linear or has abstract components")
def to_optmodel(self, **kwargs):
"""
Returns the model in OPTMODEL format
Examples
--------
>>> print(n.to_optmodel())
proc optmodel;
var y init 2;
min myobj = 2 * y;
solve;
quit;
"""
return sasoptpy.interface.to_optmodel(self, **kwargs)
def __str__(self):
"""
Returns a string representation of the Model object.
Examples
--------
>>> print(m)
Model: [
Name: knapsack
Session: cashost:casport
Objective: MAX [8 * get[clock] + 10 * get[mug] + 15 * get[headphone]\
+ 20 * get[book] + get[pen]]
Variables (5): [
get[clock]
get[mug]
get[headphone]
get[book]
get[pen]
]
Constraints (6): [
get[clock] <= 3
get[mug] <= 5
get[headphone] <= 2
get[book] <= 10
get[pen] <= 15
4 * get[clock] + 6 * get[mug] + 7 * get[headphone] + \
12 * get[book] + get[pen] <= 55
]
]
"""
s = 'Model: [\n'
s += ' Name: {}\n'.format(self.get_name())
if self._session is not None:
s += ' Session: {}:{}\n'.format(self._session._hostname,
self._session._port)
s += ' Objective: {} [{}]\n'.format(self.get_objective().get_sense(),
self.get_objective())
s += ' Variables ({}): [\n'.format(len(self.get_variables()))
for i in self.loop_variables():
s += ' {}\n'.format(i)
s += ' ]\n'
s += ' Constraints ({}): [\n'.format(len(self.get_constraints()))
for i in self.loop_constraints():
s += ' {}\n'.format(i)
s += ' ]\n'
s += ']'
return s
def __repr__(self):
"""
Returns a representation of the Model object.
Examples
--------
>>> print(repr(m))
sasoptpy.Model(name='model1', session=CAS('cashost', 12345,
'username', protocol='cas', name='py-session-1',
session='594ad8d5-6a7b-3443-a155-be59177e8d23'))
"""
if self._session is not None:
stype = self.get_session_type()
if stype == 'SAS':
s = "sasoptpy.Model(name='{}', session=saspy.SASsession(cfgname='{}'))".format(
self.get_name(), self._session.sascfg.name)
elif stype == 'CAS':
s = 'sasoptpy.Model(name=\'{}\', session={})'.format(
self.get_name(), self._session)
else:
raise TypeError('Invalid session type: {}'.format(type(self.get_session())))
else:
s = 'sasoptpy.Model(name=\'{}\')'.format(self.get_name())
return s
def _defn(self):
s = 'problem {}'.format(self.get_name())
vars = [s.get_name() for s in self.get_grouped_variables().values()]
cons = [s.get_name() for s in self.get_grouped_constraints().values()]
obj = self.get_objective()
objs = []
if not obj.is_default():
objs.append(obj.get_name())
elements = ' '.join(vars + cons + objs)
if elements != '':
s += ' include ' + elements
s += ';'
return s
def _expr(self):
return self.to_optmodel()
def _is_linear(self):
"""
Checks whether the model can be written as a linear model (in MPS format)
Returns
-------
is_linear : boolean
True if model does not have any nonlinear components or abstract\
operations, False otherwise
"""
for c in self.loop_constraints():
if not c._is_linear():
return False
if not self._objective._is_linear():
return False
return True
def _has_integer_vars(self):
for v in self._variableDict.values():
if v._type != sasoptpy.CONT:
return True
return False
def get_session_type(self):
"""
Tests whether the model session is defined and still active
Returns
-------
session : string
'CAS' for CAS sessions, 'SAS' for SAS sessions
"""
# Check if session is defined
return sasoptpy.util.get_session_type(self._session)
@sasoptpy.containable
def solve(self, **kwargs):
"""
Solves the model by calling CAS or SAS optimization solvers
Parameters
----------
options : dict, optional
Solver options as a dictionary object
submit : boolean, optional
When set to `True`, calls the solver
name : string, optional
Name of the table
frame : boolean, optional
When set to `True`, uploads the problem as a DataFrame in MPS format
drop : boolean, optional
When set to `True`, drops the MPS table after solve (only CAS)
replace : boolean, optional
When set to `True`, replaces an existing MPS table (only CAS and MPS)
primalin : boolean, optional
When set to `True`, uses initial values (only MILP)
verbose : boolean, optional (experimental)
When set to `True`, prints the generated OPTMODEL code
Returns
-------
solution : :class:`pandas.DataFrame`
Solution of the optimization model
Examples
--------
>>> m.solve()
NOTE: Initialized model food_manufacture_1
NOTE: Converting model food_manufacture_1 to DataFrame
NOTE: Added action set 'optimization'.
...
NOTE: Optimal.
NOTE: Objective = 107842.59259.
NOTE: The Dual Simplex solve time is 0.01 seconds.
>>> m.solve(options={'maxtime': 600})
>>> m.solve(options={'algorithm': 'ipm'})
Notes
-----
* Some of the options listed under the ``options`` argument might not be
passed, depending on which CAS action is being used.
* The ``option`` argument should be a dictionary, where keys are
option names. For example, ``m.solve(options={'maxtime': 600})``
limits the solution time to 600 seconds.
* See :ref:`solver-options` for a list of solver options.
"""
return sasoptpy.util.submit_for_solve(self, **kwargs)
def tune_parameters(self, **kwargs):
"""
Tunes the model to find ideal solver parameters
Parameters
----------
kwargs :
Keyword arguments as defined in the optimization.tuner action.
Acceptable values are:
- `milpParameters <https://go.documentation.sas.com/?docsetId=casactmopt&docsetTarget=cas-optimization-tuner.htm&docsetVersion=8.5&locale=en#PYTHON.cas-optimization-tuner-milpparameters>`_:
Parameters for the solveMilp action, such as
`maxTime`, `heuristics`, `feasTol`
- `tunerParameters <https://go.documentation.sas.com/?docsetId=casactmopt&docsetTarget=cas-optimization-tuner.htm&docsetVersion=8.5&locale=en#PYTHON.cas-optimization-tuner-tunerparameters>`_:
Parameters for the tuner itself, such as
`maxConfigs`, `printLevel`, `logFreq`
- `tuningParameters <https://go.documentation.sas.com/?docsetId=casactmopt&docsetTarget=cas-optimization-tuner.htm&docsetVersion=8.5&locale=en#PYTHON.cas-optimization-tuner-tuningparameters>`_:
List of parameters to be tuned, such as
`cutStrategy`, `presolver`, `restarts`
Returns
-------
tunerResults : :class:`swat.dataframe.SASDataFrame`
Tuning results as a table
Examples
--------
>>> m = so.Model(name='model1')
>>> ...
>>> results = m.tune_parameters(tunerParameters={'maxConfigs': 10})
NOTE: Initialized model knapsack_with_tuner.
NOTE: Added action set 'optimization'.
NOTE: Uploading the problem DataFrame to the server.
NOTE: Cloud Analytic Services made the uploaded file available as table KNAPSACK_WITH_TUNER in caslib CASUSER(casuser).
NOTE: The table KNAPSACK_WITH_TUNER has been created in caslib CASUSER(casuser) from binary data uploaded to Cloud Analytic Services.
NOTE: Start to tune the MILP
SolveCalls Configurations BestTime Time
1 1 0.21 0.26
2 2 0.19 0.50
3 3 0.19 0.72
4 4 0.19 0.95
5 5 0.19 1.17
6 6 0.19 1.56
7 7 0.18 1.76
8 8 0.17 1.96
9 9 0.17 2.16
10 10 0.17 2.35
NOTE: Configuration limit reached.
NOTE: The tuning time is 2.35 seconds.
>>> print(results)
Configuration conflictSearch ... Sum of Run Times Percentage Successful
0 0.0 automatic ... 0.20 100.0
1 1.0 none ... 0.17 100.0
2 2.0 none ... 0.17 100.0
3 3.0 moderate ... 0.17 100.0
4 4.0 none ... 0.18 100.0
5 5.0 none ... 0.18 100.0
6 6.0 aggressive ... 0.18 100.0
7 7.0 moderate ... 0.18 100.0
8 8.0 aggressive ... 0.19 100.0
9 9.0 automatic ... 0.36 100.0
>>> results = m.tune_parameters(
milpParameters={'maxtime': 10},
tunerParameters={'maxConfigs': 20, 'logfreq': 5},
tuningParameters=[
{'option': 'presolver', 'initial': 'none', 'values': ['basic', 'aggressive', 'none']},
{'option': 'cutStrategy'},
{'option': 'strongIter', 'initial': -1, 'values': [-1, 100, 1000]}
])
NOTE: Added action set 'optimization'.
NOTE: Uploading the problem DataFrame to the server.
NOTE: Cloud Analytic Services made the uploaded file available as table KNAPSACK_WITH_TUNER in caslib CASUSER(casuser).
NOTE: The table KNAPSACK_WITH_TUNER has been created in caslib CASUSER(casuser) from binary data uploaded to Cloud Analytic Services.
NOTE: Start to tune the MILP
SolveCalls Configurations BestTime Time
5 5 0.17 1.01
10 10 0.17 2.00
15 15 0.17 2.98
20 20 0.17 3.95
NOTE: Configuration limit reached.
NOTE: The tuning time is 3.95 seconds.
>>> print(results)
Configuration conflictSearch ... Sum of Run Times Percentage Successful
0 0.0 automatic ... 0.17 100.0
1 1.0 none ... 0.16 100.0
2 2.0 none ... 0.16 100.0
3 3.0 none ... 0.16 100.0
4 4.0 none ... 0.16 100.0
5 5.0 none ... 0.17 100.0
6 6.0 none ... 0.17 100.0
7 7.0 none ... 0.17 100.0
8 8.0 none ... 0.17 100.0
9 9.0 none ... 0.17 100.0
10 10.0 none ... 0.17 100.0
11 11.0 aggressive ... 0.17 100.0
12 12.0 none ... 0.17 100.0
13 13.0 aggressive ... 0.17 100.0
14 14.0 automatic ... 0.17 100.0
15 15.0 none ... 0.17 100.0
16 16.0 none ... 0.17 100.0
17 17.0 moderate ... 0.17 100.0
18 18.0 moderate ... 0.17 100.0
19 19.0 none ... 0.17 100.0
Notes
-----
- See `SAS Optimization documentation
<https://go.documentation.sas.com/?docsetId=casactmopt&docsetTarget=cas-optimization-tuner.htm&docsetVersion=8.5&locale=en#PYTHON.cas-optimization-tuner-tunerparameters>`_
for a full list of tunable parameters.
- See `Optimization Action Set documentation
<https://go.documentation.sas.com/?docsetId=casactmopt&docsetTarget=casactmopt_optimization_details35.htm&docsetVersion=8.5&locale=en>`_.
See also
--------
:meth:`Model.get_tuner_results`
"""
return sasoptpy.util.submit_for_tune(self, **kwargs)
def _set_abstract_values(self, name, value):
"""
Searches for the missing/abstract variable names and set their values
"""
original_name = sasoptpy.util.get_group_name(name)
group = self.get_variable(original_name)
if group is not None:
v = group.get_member_by_name(name)
v.set_value(value)
def clear_solution(self):
"""
Clears the cached solution of the model
Notes
-----
- This method cleans the optimal objective value and solution time
parameters of the model.
"""
self._objval = None
self.response = None
self._soltime = 0
```
#### File: sasoptpy/core/variable_group.py
```python
from collections import OrderedDict
from itertools import product
from math import inf
import warnings
from multiprocessing import Pool
import sasoptpy
from sasoptpy.libs import (pd, np)
from sasoptpy.core import (Expression, Variable, Group)
class VariableGroup(Group):
"""
Creates a group of :class:`Variable` objects
Parameters
----------
argv : list, dict, int, :class:`pandas.Index`
Loop index for variable group
name : string, optional
Name (prefix) of the variables
vartype : string, optional
Type of variables, `BIN`, `INT`, or `CONT`
lb : list, dict, :class:`pandas.Series`, optional
Lower bounds of variables
ub : list, dict, :class:`pandas.Series`, optional
Upper bounds of variables
init : float, optional
Initial values of variables
Examples
--------
>>> PERIODS = ['Period1', 'Period2', 'Period3']
>>> production = so.VariableGroup(PERIODS, vartype=so.INT,
name='production', lb=10)
>>> print(production)
Variable Group (production) [
[Period1: production['Period1']]
[Period2: production['Period2']]
[Period3: production['Period3']]
]
>>> x = so.VariableGroup(4, vartype=so.BIN, name='x')
>>> print(x)
Variable Group (x) [
[0: x[0]]
[1: x[1]]
[2: x[2]]
[3: x[3]]
]
>>> z = so.VariableGroup(2, ['a', 'b', 'c'], name='z')
>>> print(z)
Variable Group (z) [
[(0, 'a'): z[0, 'a']]
[(0, 'b'): z[0, 'b']]
[(0, 'c'): z[0, 'c']]
[(1, 'a'): z[1, 'a']]
[(1, 'b'): z[1, 'b']]
[(1, 'c'): z[1, 'c']]
]
>>> print(repr(z))
sasoptpy.VariableGroup([0, 1], ['a', 'b', 'c'], name='z')
Notes
-----
* When working with a single model, use the
:func:`sasoptpy.Model.add_variables` method.
* If a variable group object is created, it can be added to a model using
the :func:`sasoptpy.Model.include` method.
* An individual variable inside the group can be accessed using indices.
>>> z = so.VariableGroup(2, ['a', 'b', 'c'], name='z', lb=0, ub=10)
>>> print(repr(z[0, 'a']))
sasoptpy.Variable(name='z_0_a', lb=0, ub=10, vartype='CONT')
See also
--------
:func:`sasoptpy.Model.add_variables`
:func:`sasoptpy.Model.include`
"""
@sasoptpy.class_containable
def __init__(self, *argv, name, vartype=None, lb=None,
ub=None, init=None):
if len(argv) == 0:
raise ValueError('An iterable object or None should be given as '
'the first parameter')
super().__init__(name=name)
self._vardict = OrderedDict()
self._shadows = OrderedDict()
self._keyset = []
self._abstract = False
self._lb = None
self._ub = None
if vartype is None:
vartype = sasoptpy.CONT
self._init = init
self._type = vartype
if name is None:
name = sasoptpy.util.get_next_name()
self._recursive_add_vars(*argv, name=name,
vartype=vartype, lb=lb, ub=ub, init=init,
vardict=self._vardict)
self.filter_unique_keys()
lb, ub = sasoptpy.core.util.get_default_bounds_if_none(vartype, lb, ub)
self.set_bounds(lb=lb, ub=ub, members=False)
self._objorder = sasoptpy.util.get_creation_id()
for arg in argv:
if isinstance(arg, int):
self._keyset.append(
sasoptpy.util.package_utils._extract_argument_as_list(arg))
else:
self._keyset.append(
sasoptpy.util.package_utils._extract_argument_as_list(arg))
if not self._abstract and sasoptpy.util.is_set_abstract(arg):
self._abstract = True
for _, v in self._vardict.items():
v._abstract = True
self._set_var_info()
def _process_single_var(self, varkey):
# for varkey in allcombs:
current_keys = tuple(k for k in varkey)
is_shadow = any(sasoptpy.abstract.util.is_abstract(i) for i in varkey)
varname = sasoptpy.core.util.get_name_from_keys(self.get_name(),
current_keys)
self._register_keys(current_keys)
varlb = sasoptpy.util.extract_list_value(current_keys, self._lb)
varub = sasoptpy.util.extract_list_value(current_keys, self._ub)
varin = sasoptpy.util.extract_list_value(current_keys, self._init)
self.add_member(key=current_keys, name=varname, vartype=self._type,
lb=varlb, ub=varub, init=varin, shadow=is_shadow)
def get_name(self):
"""
Returns the name of the variable group
Returns
-------
name : string
Name of the variable group
Examples
--------
>>> m = so.Model(name='m')
>>> var1 = m.add_variables(4, name='x')
>>> print(var1.get_name())
x
"""
return self._name
def add_member(self, key, name=None, vartype=None, lb=None,
ub=None, init=None, shadow=False):
"""
(Experimental) Adds a new member to Variable Group
Notes
-----
- This method is mainly intended for internal use.
"""
key = sasoptpy.util.pack_to_tuple(key)
if lb is None:
lb = sasoptpy.util.extract_list_value(key, self._lb)
if ub is None:
ub = sasoptpy.util.extract_list_value(key, self._ub)
if init is None:
init = sasoptpy.util.extract_list_value(key, self._init)
if vartype is None:
vartype = self._type
if shadow is True:
variable_class = sasoptpy.abstract.ShadowVariable
dict_to_add = self._shadows
else:
variable_class = Variable
dict_to_add = self._vardict
if name is None:
name = sasoptpy.core.util.get_name_from_keys(self.get_name(), key)
new_var = variable_class(name=name, lb=lb, ub=ub, init=init,
vartype=vartype)
dict_to_add[key] = new_var
new_var.set_parent(self)
return new_var
def include_member(self, key, var):
if sasoptpy.core.util.is_variable(var):
key = sasoptpy.util.pack_to_tuple(key)
self._vardict[key] = var
def set_abstract(self, abstract=True):
self._abstract = abstract
def _recursive_add_vars(self, *argv, name, vartype, lb, ub, init,
vardict, vkeys=(), shadow=False):
from sasoptpy.util.package_utils import _extract_argument_as_list
next_arg = _extract_argument_as_list(argv[0])
for _, i in enumerate(next_arg):
if isinstance(i, tuple):
current_keys = vkeys + i
else:
current_keys = vkeys + (i,)
if sasoptpy.abstract.util.is_abstract(i):
shadow = True
if len(argv) == 1:
varname = sasoptpy.core.util.get_name_from_keys(
name, current_keys)
self._register_keys(current_keys)
varlb = sasoptpy.util.extract_list_value(current_keys, lb)
varub = sasoptpy.util.extract_list_value(current_keys, ub)
varin = sasoptpy.util.extract_list_value(current_keys, init)
self.add_member(key=current_keys, name=varname, vartype=vartype,
lb=varlb, ub=varub, init=varin, shadow=shadow)
else:
self._recursive_add_vars(*argv[1:], vardict=vardict,
vkeys=current_keys,
name=name, vartype=vartype,
lb=lb, ub=ub, init=init,
shadow=shadow)
def _set_var_info(self):
for i in self._vardict:
self._vardict[i]._set_info(parent=self, key=i)
def __getitem__(self, key):
"""
Overloaded method to access individual variables
Parameters
----------
key : tuple, string or int
Key of the variable
Returns
-------
ref : :class:`Variable` or list
Reference to a single Variable or a list of Variable objects
"""
if self._abstract or sasoptpy.util.is_key_abstract(key) or sasoptpy.core.util.is_expression(key):
tuple_key = sasoptpy.util.pack_to_tuple(key)
tuple_key = tuple(i for i in sasoptpy.util.flatten_tuple(tuple_key))
if tuple_key in self._vardict:
return self._vardict[tuple_key]
elif tuple_key in self._shadows:
return self._shadows[tuple_key]
else:
shadow = sasoptpy.abstract.ShadowVariable(self.get_name())
shadow.set_group_key(self, tuple_key)
self._shadows[tuple_key] = shadow
return shadow
k = sasoptpy.util.pack_to_tuple(key)
if k in self._vardict:
return self._vardict[k]
else:
indices_to_filter = []
filter_values = {}
list_of_variables = []
for i, _ in enumerate(k):
if k[i] != '*':
indices_to_filter.append(i)
filter_values[i] = sasoptpy.util.pack_to_list(k[i])
for v in self._vardict:
eligible = True
for f in indices_to_filter:
if v[f] not in filter_values[f]:
eligible = False
if eligible:
list_of_variables.append(self._vardict[v])
if not list_of_variables:
warnings.warn('Requested variable group is empty:' +
' {}[{}] ({})'.
format(self.get_name(), key, type(key)),
RuntimeWarning, stacklevel=2)
if len(list_of_variables) == 0:
return None
return list_of_variables
def __setitem__(self, key, value):
v = self[key]
v.set_value(value)
sasoptpy.abstract.Assignment(v, value)
def __iter__(self):
"""
Yields an iterable list of variables inside the variable group
Returns
-------
i : list
Iterable list of Variable objects
"""
for v in self._vardict.values():
yield v
def _defn(self):
"""
Returns string to be used in OPTMODEL definition
"""
name = self.get_name()
s = 'var {}'.format(name)
s += ' {'
for i in self._keyset:
ind_list = []
for j in i:
ind_list.append(
sasoptpy.util.package_utils._to_optmodel_quoted_string(j))
s += '{{{}}}, '.format(','.join(ind_list))
s = s[:-2]
s += '} '
# Grab features
CONT = sasoptpy.CONT
BIN = sasoptpy.BIN
INT = sasoptpy.INT
if self._type != CONT:
if self._type == BIN:
s += 'binary '
if self._type == INT:
s += 'integer '
if sasoptpy.core.util.is_valid_lb(self._lb, self._type):
s += '>= {} '.format(self._lb)
if sasoptpy.core.util.is_valid_ub(self._ub, self._type):
s += '<= {} '.format(self._ub)
if sasoptpy.core.util.is_valid_init(self._init, self._type):
s += 'init {} '.format(self._init)
s = s.rstrip()
s += ';'
return(s)
def _expr(self):
return self.get_name()
def _member_defn(self):
dependents = []
for v in self.get_members().values():
dependents.extend(self.get_different_attributes(v))
defn = sasoptpy.util.get_attribute_definitions(dependents)
return defn
def get_members(self):
"""
Returns a dictionary of members
"""
return self._vardict
def get_shadow_members(self):
return self._shadows
def get_attributes(self):
"""
Returns an ordered dictionary of main attributes
Returns
--------
attributes : OrderedDict
The dictionary consists of `init`, `lb`, and `ub` attributes
"""
attributes = OrderedDict()
attributes['init'] = self._init
attributes['lb'] = self._lb
attributes['ub'] = self._ub
return attributes
def get_type(self):
"""
Returns the type of variable
Possible values are:
* sasoptpy.CONT
* sasoptpy.INT
* sasoptpy.BIN
Examples
--------
>>> z = so.VariableGroup(3, name='z', vartype=so.INT)
>>> z.get_type()
'INT'
"""
return self._type
def get_different_attributes(self, var):
var_attr = var.get_attributes()
group_attr = self.get_attributes()
different_attrs = []
for key, var_value in var_attr.items():
if var_value is not None:
group_value = group_attr.get(key, None)
def is_equal_to_default(v, k):
return v == sasoptpy.core.util.get_default_value(
self.get_type(), k)
if group_value is None and is_equal_to_default(var_value, key):
continue
if sasoptpy.util.is_comparable(group_value) and var_value != group_value:
different_attrs.append(
{'ref': var, 'key': key, 'value': var_value})
elif not sasoptpy.util.is_comparable(group_value):
different_attrs.append(
{'ref': var, 'key': key, 'value': var_value})
return different_attrs
def sum(self, *argv):
"""
Quick sum method for the variable groups
Parameters
----------
argv : Arguments
List of indices for the sum
Returns
-------
r : :class:`Expression`
Expression that represents the sum of all variables in the group
Examples
--------
>>> z = so.VariableGroup(2, ['a', 'b', 'c'], name='z', lb=0, ub=10)
>>> e1 = z.sum('*', '*')
>>> print(e1)
z[1, 'c'] + z[1, 'a'] + z[1, 'b'] + z[0, 'a'] + z[0, 'b'] +
z[0, 'c']
>>> e2 = z.sum('*', 'a')
>>> print(e2)
z[1, 'a'] + z[0, 'a']
>>> e3 = z.sum('*', ['a', 'b'])
>>> print(e3)
z[1, 'a'] + z[0, 'b'] + z[1, 'b'] + z[0, 'a']
"""
if self._abstract:
r = Expression()
symbolic_sum = False
ind_set = list()
iter_key = list()
for i, a in enumerate(argv):
if isinstance(a, str) and a == '*':
sub_list = list()
for j in self._keyset[i]:
if sasoptpy.abstract.util.is_abstract_set(j):
si = sasoptpy.abstract.SetIterator(j)
iter_key.append(si)
ind_set.append(si)
symbolic_sum = True
else:
#ind_set.append(j)
sub_list.append(j)
if sub_list:
ind_set.append(sub_list)
else:
if hasattr(a, '__iter__'):
ind_set.append(a)
else:
ind_set.append([a])
combs = product(*ind_set)
for i in combs:
var_key = sasoptpy.util.pack_to_tuple(i)
r = r.add(self[var_key], 1)
if symbolic_sum:
#r = r.add(self[tuple(ind_set)])
r._operator = 'sum'
r._iterkey = iter_key
return r
else:
r = Expression()
r.set_temporary()
feas_set = []
for i, a in enumerate(argv):
if a == '*':
feas_set.append(self._groups[i])
elif hasattr(a, "__iter__") and not isinstance(a, str):
feas_set.append(a)
else:
feas_set.append([a])
combs = product(*feas_set)
for i in combs:
var_key = sasoptpy.util.pack_to_tuple(i)
if var_key in self._vardict:
r.add(self._vardict[var_key], 1)
r.set_permanent()
return r
def mult(self, vector):
"""
Quick multiplication method for the variable groups
Parameters
----------
vector : list, dictionary, :class:`pandas.Series`,\
or :class:`pandas.DataFrame`
Vector to be multiplied with the variable group
Returns
-------
r : :class:`Expression`
An expression that is the product of the variable group with the
specified vector
Examples
--------
Multiplying with a list
>>> x = so.VariableGroup(4, vartype=so.BIN, name='x')
>>> e1 = x.mult([1, 5, 6, 10])
>>> print(e1)
10.0 * x[3] + 6.0 * x[2] + x[0] + 5.0 * x[1]
Multiplying with a dictionary
>>> y = so.VariableGroup([0, 1], ['a', 'b'], name='y', lb=0, ub=10)
>>> dvals = {(0, 'a'): 1, (0, 'b'): 2, (1, 'a'): -1, (1, 'b'): 5}
>>> e2 = y.mult(dvals)
>>> print(e2)
2.0 * y[0, 'b'] - y[1, 'a'] + y[0, 'a'] + 5.0 * y[1, 'b']
Multiplying with a pandas.Series object
>>> u = so.VariableGroup(['a', 'b', 'c', 'd'], name='u')
>>> ps = pd.Series([0.1, 1.5, -0.2, 0.3], index=['a', 'b', 'c', 'd'])
>>> e3 = u.mult(ps)
>>> print(e3)
1.5 * u['b'] + 0.1 * u['a'] - 0.2 * u['c'] + 0.3 * u['d']
Multiplying with a pandas.DataFrame object
>>> data = np.random.rand(3, 3)
>>> df = pd.DataFrame(data, columns=['a', 'b', 'c'])
>>> print(df)
NOTE: Initialized model model1
a b c
0 0.966524 0.237081 0.944630
1 0.821356 0.074753 0.345596
2 0.065229 0.037212 0.136644
>>> y = m.add_variables(3, ['a', 'b', 'c'], name='y')
>>> e = y.mult(df)
>>> print(e)
0.9665237354418064 * y[0, 'a'] + 0.23708064143289442 * y[0, 'b'] +
0.944629500537536 * y[0, 'c'] + 0.8213562592159828 * y[1, 'a'] +
0.07475256894157478 * y[1, 'b'] + 0.3455957019116668 * y[1, 'c'] +
0.06522945752546017 * y[2, 'a'] + 0.03721153533250843 * y[2, 'b'] +
0.13664422498043194 * y[2, 'c']
"""
r = Expression()
if isinstance(vector, list) or isinstance(vector, np.ndarray):
for i, key in enumerate(vector):
var = self._vardict[i, ]
r._linCoef[var.get_name()] = {'ref': var, 'val': vector[i]}
elif isinstance(vector, pd.Series):
for key in vector.index:
k = sasoptpy.util.pack_to_tuple(key)
var = self._vardict[k]
r._linCoef[var.get_name()] = {'ref': var, 'val': vector[key]}
elif isinstance(vector, pd.DataFrame):
vectorflat = sasoptpy.util.flatten_frame(vector)
for key in vectorflat.index:
k = sasoptpy.util.pack_to_tuple(key)
var = self._vardict[k]
r._linCoef[var.get_name()] = {'ref': var, 'val': vectorflat[key]}
else:
for i, key in enumerate(vector):
if isinstance(key, tuple):
k = key
else:
k = (key,)
var = self._vardict[k]
try:
r._linCoef[var.get_name()] = {'ref': var, 'val': vector[i]}
except KeyError:
r._linCoef[var.get_name()] = {'ref': var, 'val': vector[key]}
return r
def set_init(self, init):
"""
Specifies or updates the initial values
Parameters
----------
init : float, list, dict, :class:`pandas.Series`
Initial value of the variables
Examples
--------
>>> m = so.Model(name='m')
>>> y = m.add_variables(3, name='y')
>>> print(y._defn())
var y {{0,1,2}};
>>> y.set_init(5)
>>> print(y._defn())
var y {{0,1,2}} init 5;
"""
self._init = init
for v in self._vardict:
inval = sasoptpy.util.extract_list_value(v, init)
self._vardict[v].set_init(inval)
for v in self._shadows:
self._shadows[v].set_init(init)
def set_bounds(self, lb=None, ub=None, members=True):
"""
Specifies or updates bounds for the variable group
Parameters
----------
lb : float, :class:`pandas.Series`, optional
Lower bound
ub : float, :class:`pandas.Series`, optional
Upper bound
Examples
--------
>>> z = so.VariableGroup(2, ['a', 'b', 'c'], name='z', lb=0, ub=10)
>>> print(repr(z[0, 'a']))
sasoptpy.Variable(name='z_0_a', lb=0, ub=10, vartype='CONT')
>>> z.set_bounds(lb=3, ub=5)
>>> print(repr(z[0, 'a']))
sasoptpy.Variable(name='z_0_a', lb=3, ub=5, vartype='CONT')
>>> u = so.VariableGroup(['a', 'b', 'c', 'd'], name='u')
>>> lb_vals = pd.Series([1, 4, 0, -1], index=['a', 'b', 'c', 'd'])
>>> u.set_bounds(lb=lb_vals)
>>> print(repr(u['b']))
sasoptpy.Variable(name='u_b', lb=4, ub=inf, vartype='CONT')
"""
if lb is not None:
self._lb = sasoptpy.core.util.get_group_bound(lb)
if ub is not None:
self._ub = sasoptpy.core.util.get_group_bound(ub)
if members:
for v in self._vardict:
varlb = sasoptpy.util.extract_list_value(v, lb)
if lb is not None:
self[v].set_bounds(lb=varlb)
varub = sasoptpy.util.extract_list_value(v, ub)
if ub is not None:
self[v].set_bounds(ub=varub)
def set_member_value(self, key, value):
pass
def get_member_by_name(self, name):
keys = sasoptpy.abstract.util.get_key_from_name(name)
return self[keys]
def __str__(self):
"""
Generates a representation string
"""
s = 'Variable Group ({}) [\n'.format(self.get_name())
for k in self._vardict:
v = self._vardict[k]
s += ' [{}: {}]\n'.format(sasoptpy.util.get_first_member(k), v)
s += ']'
return s
def __repr__(self):
"""
Returns a string representation of the object.
"""
s = 'sasoptpy.VariableGroup('
keylen = max(map(len, self._vardict))
for i in range(keylen):
ls = []
for k in self._vardict:
if k[i] not in ls:
ls.append(k[i])
s += '{}, '.format(ls)
s += 'name=\'{}\')'.format(self.get_name())
return s
```
#### File: sasoptpy/libs/pandas.py
```python
import pandas as pd
Index = pd.Index
MultiIndex = pd.MultiIndex
Series = pd.Series
DataFrame = pd.DataFrame
def convert_dict_to_frame(dictobj, cols=None):
df = pd.DataFrame.from_dict(dictobj, orient='index')
if isinstance(cols, list):
df.columns = cols
if isinstance(df.index[0], tuple):
df.index = pd.MultiIndex.from_tuples(df.index)
return df
def display_dense():
pd.set_option('display.multi_sparse', False)
def display_all():
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
concat = pd.concat
```
#### File: sasoptpy/sasoptpy/structure.py
```python
from contextlib import contextmanager
from functools import wraps, partial
import warnings
import sasoptpy
def containable(func=None, standalone=True):
if func is None:
return partial(containable, standalone=standalone)
@wraps(func)
def wrapper(*args, **kwargs):
if not standalone and not sasoptpy.container:
warnings.warn('This function is not intended to be used without any'
' container', UserWarning)
if sasoptpy.container:
try:
statement_func = sasoptpy.statement_dictionary[wrapper]
except KeyError:
raise NotImplementedError(
'Container support for {} is not implemented'.format(
func.__name__))
statements = statement_func(*args, **kwargs)
if isinstance(statements, list):
for st in statements:
append_to_container(st)
else:
append_to_container(statements)
return statements
else:
return func(*args, **kwargs)
return wrapper
def class_containable(func):
def class_append(*args, **kwargs):
is_internal = kwargs.pop('internal', None)
func(*args, **kwargs)
if sasoptpy.container and is_internal is None:
sasoptpy.container.append(args[0])
return class_append
def append_to_container(statement):
if hasattr(statement, 'is_internal') and statement.is_internal():
pass
else:
sasoptpy.container.append(statement)
@contextmanager
def under_condition(c):
if type(c) == bool:
yield c
return True
original = sasoptpy.conditions
if original is None:
sasoptpy.conditions = []
sasoptpy.conditions = sasoptpy.conditions + [c]
yield
sasoptpy.conditions = original
def inline_condition(c):
if sasoptpy.container_conditions:
sasoptpy.container.sym.add_condition(c)
return True
return False
@contextmanager
def set_container(s, conditions=False):
original = sasoptpy.container
sasoptpy.container = s
cond_original = None
if conditions:
cond_original = sasoptpy.container_conditions
sasoptpy.container_conditions = True
yield
sasoptpy.container = original
if conditions:
sasoptpy.container_conditions = cond_original
```
#### File: abstract/statement/test_assignment.py
```python
import os
import sys
import unittest
import warnings
import sasoptpy as so
from inspect import cleandoc
from sasoptpy.actions import read_data
from tests.swat_config import create_cas_connection
class TestAssignment(unittest.TestCase):
"""
Unit tests for assignment statements
"""
@classmethod
def setUpClass(cls):
so.reset()
cls.conn = None
from swat import CAS, SWATError
try:
cls.conn = create_cas_connection()
except SWATError:
warnings.warn('CAS connection is not available', RuntimeWarning)
except TypeError:
warnings.warn('CAS variables are not available', RuntimeWarning)
@classmethod
def tearDownClass(cls):
if cls.conn is not None:
cls.conn.close()
def test_bound_assignment(self):
with so.Workspace('test_regular_assignment') as w:
p = so.Parameter(name='p', init=2)
x = so.VariableGroup(5, lb=1, name='x')
x[0].set_bounds(lb=3)
x[1].set_bounds(lb=p)
p.set_value(4)
x[2].set_bounds(lb=p, ub=p)
x[3].set_bounds(ub=5)
x[4].set_bounds(lb=1, ub=4)
self.assertEqual(so.to_optmodel(w), cleandoc("""
proc optmodel;
num p init 2;
var x {{0,1,2,3,4}} >= 1;
x[0].lb = 3;
x[1].lb = p;
p = 4;
fix x[2]=p;
x[3].ub = 5;
x[4].lb = 1;
x[4].ub = 4;
quit;
"""))
def test_assignment_append(self):
x = so.Variable(name='x')
r = so.abstract.Assignment(x, 5)
self.assertEqual(so.to_definition(r), 'x = 5;')
y = so.Variable(name='y')
r.append(identifier=y)
self.assertEqual(so.to_definition(r), 'y = 5;')
r.append(expression=10)
self.assertEqual(so.to_definition(r), 'y = 10;')
r.append(keyword='fix')
self.assertEqual(so.to_definition(r), 'fix y = 10;')
def test_fix_value(self):
from sasoptpy.actions import fix
with so.Workspace('w') as w:
e = so.Parameter(name='e', value=4)
x = so.VariableGroup(5, name='x')
fix(x[0], 0)
fix(x[1], e*2)
self.assertEqual(so.to_optmodel(w), cleandoc('''
proc optmodel;
num e = 4;
var x {{0,1,2,3,4}};
fix x[0]=0;
fix x[1]=2 * e;
quit;'''))
```
#### File: abstract/statement/test_cofor_loop.py
```python
import os
import sys
import unittest
import sasoptpy as so
from inspect import cleandoc
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(current_dir, '../..')))
from util import assert_equal_wo_temps
from sasoptpy.actions import for_loop, cofor_loop, fix, solve, put_item
class TestCoforLoop(unittest.TestCase):
"""
Unit tests for concurrent for (COFOR) statements
"""
def setUp(self):
so.reset()
def test_simple_example(self):
with so.Workspace('w') as w:
x = so.VariableGroup(6, name='x', lb=0)
so.Objective(
so.expr_sum(x[i] for i in range(6)), name='z', sense=so.MIN)
a1 = so.Constraint(x[1] + x[2] + x[3] <= 4, name='a1')
for i in cofor_loop(so.exp_range(3, 6)):
fix(x[1], i)
solve()
put_item(i, x[1], so.Symbol('_solution_status_'), names=True)
assert_equal_wo_temps(self, so.to_optmodel(w), cleandoc('''
proc optmodel;
var x {{0,1,2,3,4,5}} >= 0;
min z = x[0] + x[1] + x[2] + x[3] + x[4] + x[5];
con a1 : x[1] + x[2] + x[3] <= 4;
cofor {TEMP1 in 3..5} do;
fix x[1]=TEMP1;
solve;
put TEMP1= x[1]= _solution_status_=;
end;
quit;'''))
```
#### File: tests/abstract/test_set.py
```python
import unittest
import sasoptpy as so
from inspect import cleandoc
import os
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(current_dir, '..')))
from util import assert_equal_wo_temps
class TestSet(unittest.TestCase):
"""
Unit tests for :class:`sasoptpy.abstract.Set` objects
"""
def setUp(self):
so.reset()
def test_initialization(self):
with so.Workspace('w') as w:
S = so.Set(name='S')
T = so.Set(name='T', init=range(11))
V = so.Set(name='V', value=[1, 2, 4])
W = so.Set(name='W', settype=[so.STR, so.NUM])
self.assertEqual(so.to_optmodel(w), cleandoc('''
proc optmodel;
set S;
set T init 0..10;
set V = {1,2,4};
set <str, num> W;
quit;'''))
self.assertEqual(repr(S),
'sasoptpy.abstract.Set(name=S, settype=[\'num\'])')
self.assertEqual(
repr(T),
'sasoptpy.abstract.Set(name=T, settype=[\'num\'], init=range(0, 11))')
self.assertEqual(
repr(V),
'sasoptpy.abstract.Set(name=V, settype=[\'num\'], value=[1, 2, 4])'
)
self.assertEqual(
repr(W),
'sasoptpy.abstract.Set(name=W, settype=[\'str\', \'num\'])'
)
def test_in_set(self):
e = so.Parameter(name='e', value=1)
S = so.Set(name='S')
if e.sym in S:
self.assertEqual(e.sym.get_conditions_str(), 'e IN S')
x = so.Variable(name='x')
def incorrect_type():
if x in S:
print('x is in S')
self.assertRaises(RuntimeError, incorrect_type)
def test_inline_set(self):
from sasoptpy.actions import inline_condition, for_loop
from sasoptpy.abstract.math import mod
with so.Workspace('w') as w:
p = so.Parameter(name='p')
S = so.Set(name='S', value=so.exp_range(1, 11))
iset = so.InlineSet(lambda: (x for x in S
if inline_condition(mod(x, 3) == 0)))
for i in for_loop(iset):
p.set_value(i)
assert_equal_wo_temps(self, so.to_optmodel(w), cleandoc('''
proc optmodel;
num p;
set S = 1..10;
for {TEMP2 in {TEMP1 in S: mod(TEMP1 , 3) = 0}} do;
p = TEMP2;
end;
quit;'''))
assert_equal_wo_temps(self, repr(iset),
'sasoptpy.InlineSet({o4 in S: mod(o4 , 3) = 0}}')
```
#### File: tests/core/test_constraint.py
```python
import unittest
import sasoptpy as so
class TestConstraint(unittest.TestCase):
"""
Unit tests for :class:`sasoptpy.Constraint` objects
"""
def setUp(self):
pass
def test_constructor(self):
x = so.Variable(name='x')
c2 = 3 * x + x ** 2 >= 10
c3 = so.Constraint(exp=c2, name='c3')
self.assertEqual(str(c3), "3 * x + (x) ** (2) >= 10")
def no_direction():
c4 = so.Constraint(2 * x, name='c4')
self.assertRaises(AttributeError, no_direction)
c5 = so.Constraint(c3, name='c5')
self.assertEqual(str(c5), "3 * x + (x) ** (2) >= 10")
def test_update_var_coef(self):
x = so.Variable(name='x')
y = so.Variable(name='y')
c1 = so.Constraint(2 * x + 3 * y <= 20, name='c1')
c1.update_var_coef(x, 5)
self.assertEqual(str(c1), '5 * x + 3 * y <= 20')
z = so.Variable(name='z')
c1.update_var_coef(z, 10)
self.assertEqual(str(c1), '5 * x + 3 * y + 10 * z <= 20')
def test_change_direction(self):
x = so.Variable(name='x')
y = so.Variable(name='y')
c1 = so.Constraint(2 * x + 3 * y <= 20, name='c1')
c1.set_direction('G')
self.assertEqual(str(c1), '2 * x + 3 * y >= 20')
def unknown_direction():
c1.set_direction('X')
self.assertRaises(ValueError, unknown_direction)
def test_get_value(self):
x = so.Variable(name='x')
y = so.Variable(name='y')
c1 = so.Constraint(2 * x + 3 * y <= 20, name='c1')
x.set_value(4)
y.set_value(3)
self.assertEqual(c1.get_value(), 17)
self.assertEqual(c1.get_value(rhs=True), -3)
def test_definition(self):
x = so.Variable(name='x')
c1 = so.Constraint(3 * x == [5, 10], name='c1')
self.assertEqual(c1._defn(), 'con c1 : 5 <= 3 * x <= 10;')
c2 = so.Constraint(5 * x == 15, name='c2')
self.assertEqual(c2._defn(), 'con c2 : 5 * x = 15;')
c3 = so.Constraint(10 * x >= 5, name='c3')
self.assertEqual(c3._defn(), 'con c3 : 10 * x >= 5;')
def test_str(self):
x = so.Variable(name='x')
c1 = 2 * x <= 5
self.assertEqual(str(c1), "2 * x <= 5")
c2 = 2 * x >= 5
self.assertEqual(str(c2), "2 * x >= 5")
c3 = 2 * x == 5
self.assertEqual(str(c3), "2 * x == 5")
c4 = 2 * x == [5, 10]
self.assertEqual(str(c4), "2 * x == [5, 10]")
def unknown_direction():
c4._direction = 'X'
c4s = str(c4)
self.assertRaises(ValueError, unknown_direction)
def test_repr(self):
x = so.Variable(name='x')
c1 = so.Constraint(2 * x <= 5, name='c1')
self.assertEqual(repr(c1), "sasoptpy.Constraint(2 * x <= 5, name='c1')")
c2 = 2 * x <= 5
self.assertEqual(repr(c2), "sasoptpy.Constraint(2 * x <= 5, name=None)")
def tearDown(self):
so.reset()
```
#### File: tests/core/test_expression.py
```python
import unittest
import sasoptpy as so
class TestExpression(unittest.TestCase):
"""
Unit tests for :class:`sasoptpy.Expression` objects
"""
def setUp(self):
pass
def test_constructor(self):
def unknown_expression_type():
e = so.Expression(exp='abc')
self.assertRaises(TypeError, unknown_expression_type)
def test_get_value(self):
import sasoptpy.abstract.math as sm
x = so.Variable(name='x')
x.set_value(7)
e = sm.sin(2)**sm.sqrt(9) + sm.min(x, 5, 10)
v = e.get_value()
self.assertAlmostEqual(v, 5.751826, places=5)
def test_dual(self):
x = so.Variable(name='x')
x._dual = 0
self.assertEqual(x.get_dual(), 0)
def test_rename(self):
x = so.Variable(name='x')
y = so.Variable(name='y')
e = so.Expression(x + 2*x*y, name='nonlinear_exp')
new_name = e.set_name('new_name')
self.assertEqual(new_name, 'new_name')
f = so.Expression(2*x + y)
new_name = f.set_name('f')
self.assertIsNotNone(new_name)
new_name = e.set_name('e')
self.assertEqual(new_name, 'e')
get_name = e.get_name()
self.assertEqual(get_name, 'e')
def test_expr_string(self):
import sasoptpy.abstract.math as sm
x = so.Variable(name='x')
y = so.Variable(name='y')
e = sm.abs(x) + 2 *y + sm.min(x, y)
e_exp = e._expr()
self.assertEqual(e_exp, 'abs(x) + 2 * y + min(x , y)')
setI = so.abstract.Set(name='I')
z = so.VariableGroup(setI, name='z')
g = so.expr_sum(z[i] for i in setI) + 5
g_exp = g._expr()
self.assertEqual(g_exp, 'sum {i in I} (z[i]) + 5')
def test_repr(self):
x = so.Variable(name='x')
e = x ** 2 + 3 * x - 5
exp_repr = repr(e)
self.assertEqual(exp_repr, 'sasoptpy.Expression(exp = (x) ** (2) + 3 * x - 5, name=None)')
e.set_permanent()
e.set_name('e')
exp_repr = repr(e)
self.assertEqual(exp_repr, 'sasoptpy.Expression(exp = (x) ** (2) + 3 * x - 5, name=\'e\')')
def test_string(self):
x = so.Variable(name='x')
e = 2 * x + 3 ** x + 10
self.assertEqual(str(e), '2 * x + (3) ** (x) + 10')
import sasoptpy.abstract.math as sm
e = sm.abs(x) + sm.min(x, 2, sm.sqrt(x))
self.assertEqual(str(e), 'abs(x) + min(x , 2, sqrt(x))')
from sasoptpy.abstract import Set
setI = Set(name='setI')
y = so.VariableGroup(setI, name='y')
e = - so.expr_sum(y[i] * i for i in setI)
self.assertEqual(str(e), '- (sum(y[i] * i for i in setI))')
e = 2 * x ** y[0]
self.assertEqual(str(e), '2 * ((x) ** (y[0]))')
def test_addition(self):
x = so.Variable(name='x')
y = so.Variable(name='y')
exp1 = so.Expression(x+y, name='exp1')
exp2 = 2 * x - y
exp3 = exp1 + exp2
self.assertEqual(str(exp3), '3 * x')
import sasoptpy.abstract.math as sm
exp4 = sm.min(x, y, sm.sqrt(x * y)) + 4
self.assertEqual(str(exp4), 'min(x , y, sqrt(x * y)) + 4')
def unknown_addition_type():
exp5 = x + y + [3, 4]
self.assertRaises(TypeError, unknown_addition_type)
def test_multiplication(self):
K = [1, 2, 'a']
L = so.abstract.Set(name='L')
x = so.Variable(name='x')
y = so.Variable(name='y')
z = so.VariableGroup(K, name='z')
u = so.VariableGroup(L, name='u')
exp1 = (x + y + 5) * (2 * x - 4 * y - 10)
self.assertEqual(str(exp1), '2 * x * x - 2 * x * y - 4 * y * y - 30 * y - 50')
exp2 = (x + y + 5) * 0 + x
self.assertEqual(str(exp2), 'x')
def unknown_multiplication_type():
exp3 = x * ['a']
self.assertRaises(TypeError, unknown_multiplication_type)
def test_division(self):
x = so.Variable(name='x')
y = so.Variable(name='y')
exp1 = x/2
self.assertEqual(str(exp1), '0.5 * x')
exp2 = x/y
self.assertEqual(str(exp2), '(x) / (y)')
import sasoptpy.abstract.math as sm
exp3 = x**2 / (3*y) + 0 / sm.abs(x)
self.assertEqual(str(exp3), '((x) ** (2)) / (3 * y) + (0) / (abs(x))')
def division_by_zero():
exp4 = x / 0
self.assertWarns(RuntimeWarning, division_by_zero)
def unknown_division_type():
exp5 = x / 'a'
self.assertRaises(TypeError, unknown_division_type)
def test_power(self):
x = so.Variable(name='x')
e1 = x ** 2 + x ** 3
self.assertEqual(so.to_expression(e1),
'(x) ^ (2) + (x) ^ (3)')
def test_truediv(self):
x = so.Variable(name='x')
e1 = x / (2 + x) + x / (3 + x)
self.assertEqual(so.to_expression(e1),
'(x) / (x + 2) + (x) / (x + 3)')
def test_is_linear(self):
x = so.Variable(name='x')
y = so.Variable(name='y')
exp1 = x
self.assertTrue(exp1._is_linear())
exp2 = 2 * x + 3 * y
self.assertTrue(exp2._is_linear())
exp3 = 5 * x * y
self.assertFalse(exp3._is_linear())
import sasoptpy.abstract.math as sm
exp4 = sm.max(x, y) + 5
self.assertFalse(exp4._is_linear())
exp5 = x ** 2
self.assertFalse(exp5._is_linear())
def test_relational(self):
x = so.Variable(name='x')
e = 2 * x + 5
f = e <= 2
self.assertTrue(type(f) == so.Constraint)
f = e >= 0
self.assertTrue(type(f) == so.Constraint)
f = e == 5
self.assertTrue(type(f) == so.Constraint)
f = 2 >= e
self.assertTrue(type(f) == so.Constraint)
f = 0 <= e
self.assertTrue(type(f) == so.Constraint)
def test_get_constant(self):
x = so.Variable(name='x')
e = 2 * x
self.assertEqual(e.get_constant(), 0)
e = 2 * x + 5
self.assertEqual(e.get_constant(), 5)
def tearDown(self):
so.reset()
```
#### File: tests/core/test_util.py
```python
from difflib import SequenceMatcher
import inspect
import unittest
import sasoptpy as so
from sasoptpy.core.util import *
def match_ratio(a, b):
seq = SequenceMatcher(None, a, b)
return seq.ratio()
class TestUtil(unittest.TestCase):
"""
Unit tests for core utility functions
"""
@classmethod
def setUpClass(cls):
cls.x = x = so.Variable(name='x')
cls.e = so.Expression(2 * x, name='e')
cls.c = so.Constraint(4 * x**2 <= 10, name='c')
cls.I = I = so.abstract.Set(name='I')
cls.a = so.VariableGroup(I, name='a')
cls.S1 = S1 = so.abstract.Set(name='S1')
cls.S2 = S2 = so.abstract.Set(name='S2', settype='str')
cls.S3 = S3 = so.abstract.Set(name='S3')
cls.p = so.abstract.ParameterGroup(S1, name='p')
cls.y = so.VariableGroup(S1, S2, S3, name='y')
def test_expression(self):
self.assertTrue(is_expression(TestUtil.e))
self.assertTrue(is_expression(TestUtil.x))
self.assertTrue(is_expression(TestUtil.c))
def test_variable(self):
self.assertFalse(is_variable(TestUtil.e))
self.assertTrue(is_variable(TestUtil.x))
self.assertFalse(is_variable(TestUtil.c))
def test_constraint(self):
self.assertTrue(is_constraint(TestUtil.c))
self.assertFalse(is_constraint(TestUtil.e))
def test_abstract(self):
self.assertTrue(is_abstract(TestUtil.a))
self.assertFalse(is_abstract(TestUtil.x))
def test_itearor_expression(self):
so.reset()
S1 = TestUtil.S1
S2 = TestUtil.S2
S3 = TestUtil.S3
p = TestUtil.p
y = TestUtil.y
c = so.ConstraintGroup((y[i, 'a', 1] + p[i] <= 5 for i in S1),
name='it_exp')
self.assertEqual(so.to_definition(c),
"con it_exp {{o{i} in S1}} : "
"y[o{i}, 'a', 1] + p[o{i}] <= 5;".format(i=1))
def test_safe_iterator_expression(self):
S1 = TestUtil.S1
S2 = ['a', 'b c', 'd']
S3 = [1, 2, 3]
y = TestUtil.y
c = so.ConstraintGroup((y [i, j, k] <= 5 for i in S1 for j in S2 for k in S3),
name='safe_it_exp')
self.assertEqual(so.to_definition(c), inspect.cleandoc(
"""
con safe_it_exp_a_1 {o1 in S1} : y[o1, 'a', 1] <= 5;
con safe_it_exp_a_2 {o1 in S1} : y[o1, 'a', 2] <= 5;
con safe_it_exp_a_3 {o1 in S1} : y[o1, 'a', 3] <= 5;
con safe_it_exp_b_c_1 {o1 in S1} : y[o1, 'b c', 1] <= 5;
con safe_it_exp_b_c_2 {o1 in S1} : y[o1, 'b c', 2] <= 5;
con safe_it_exp_b_c_3 {o1 in S1} : y[o1, 'b c', 3] <= 5;
con safe_it_exp_d_1 {o1 in S1} : y[o1, 'd', 1] <= 5;
con safe_it_exp_d_2 {o1 in S1} : y[o1, 'd', 2] <= 5;
con safe_it_exp_d_3 {o1 in S1} : y[o1, 'd', 3] <= 5;
"""
))
def test_evaluate(self):
x = TestUtil.x
import sasoptpy.abstract.math as sm
import math
e1 = x * x + x - 5
e2 = 2 * x ** 2 - 10
e3 = 2 / x + 5 + sm.sin(x)
x.set_value(3)
self.assertEqual(e1.get_value(), 7)
self.assertEqual(e2.get_value(), 8)
self.assertEqual(e3.get_value(), 17/3 + math.sin(3))
x.set_value(4)
self.assertEqual(e1.get_value(), 15)
self.assertEqual(e2.get_value(), 22)
self.assertEqual(e3.get_value(), 5.5 + math.sin(4))
def division_by_zero():
x.set_value(0)
e3.get_value()
self.assertRaises(ZeroDivisionError, division_by_zero)
x.set_value(2)
e4 = x ** (sm.sin(x)) - 1
self.assertEqual(e4.get_value(), 2 ** math.sin(2) - 1)
def test_expression_to_constraint(self):
x = TestUtil.x
e1 = 4 * x - 10
c = so.core.util.expression_to_constraint(e1, 'E', [2, 10])
self.assertTrue(so.to_definition(c), "con None : 12 <= 4 * x <= 20;")
c = so.core.util.expression_to_constraint(x, 'G', 10)
self.assertEqual(so.to_definition(c), "con None : x >= 10;")
c = so.core.util.expression_to_constraint(2 * x, 'L', 3 * x)
self.assertEqual(so.to_definition(c), "con None : - x <= 0;")
c = so.core.util.expression_to_constraint(x, 'G', 0)
self.assertEqual(so.to_definition(c), "con None : x >= 0;")
import sasoptpy.abstract.math as sm
c = so.core.util.expression_to_constraint(sm.sin(x), 'L', 1)
self.assertEqual(so.to_definition(c), "con None : sin(x) <= 1;")
y = TestUtil.y
S1 = TestUtil.S1
e1 = so.expr_sum(y[i, 'a', 1] for i in S1)
c = so.core.util.expression_to_constraint(e1, 'G', 5)
self.assertEqual(so.to_definition(c), "con None : sum {i in S1} "
"(y[i, 'a', 1]) >= 5;")
def tearDown(self):
so.reset()
@classmethod
def tearDownClass(self):
so.reset()
```
#### File: tests/interface/test_optmodel_format.py
```python
from inspect import cleandoc
import os
import unittest
import warnings
from swat import CAS, SWATError
import sasoptpy as so
from tests.swat_config import create_cas_connection
class TestOPTMODEL(unittest.TestCase):
"""
Unit tests for the CAS interface
"""
@classmethod
def setUpClass(cls):
so.reset()
cls.conn = None
try:
cls.conn = create_cas_connection()
except SWATError:
warnings.warn('CAS connection is not available', RuntimeWarning)
except TypeError:
warnings.warn('CAS variables are not available', RuntimeWarning)
@classmethod
def tearDownClass(cls):
if cls.conn is not None:
cls.conn.close()
def test_variable_group_assignments(self):
from sasoptpy.actions import read_data
if TestOPTMODEL.conn is None:
self.skipTest('CAS Session is not available')
import pandas as pd
df = pd.DataFrame([
['a', 'b', 1],
['c', 'd,e', 2],
['f,g', 'g,h,i', 3]
], columns=['k1', 'k2', 'v'])
m = so.Model(name='m', session=TestOPTMODEL.conn)
setK1 = df['k1'].tolist()
setK2 = df['k2'].tolist()
x = m.add_variables(setK1, setK2, name='x')
m.add_constraints((x[i, j] >= 1 for i in setK1 for j in setK2), name='c')
m.set_objective(so.expr_sum(x[i, j] for i in setK1 for j in setK2), name='obj', sense=so.minimize)
m.solve(verbose=True)
self.assertEqual(str(m.get_solution()), cleandoc('''
Selected Rows from Table SOLUTION
i var value lb ub rc
0 1.0 x[a,b] 1.0 -1.797693e+308 1.797693e+308 0.0
1 2.0 x[a,'d,e'] 1.0 -1.797693e+308 1.797693e+308 0.0
2 3.0 x[a,'g,h,i'] 1.0 -1.797693e+308 1.797693e+308 0.0
3 4.0 x[c,b] 1.0 -1.797693e+308 1.797693e+308 0.0
4 5.0 x[c,'d,e'] 1.0 -1.797693e+308 1.797693e+308 0.0
5 6.0 x[c,'g,h,i'] 1.0 -1.797693e+308 1.797693e+308 0.0
6 7.0 x['f,g',b] 1.0 -1.797693e+308 1.797693e+308 0.0
7 8.0 x['f,g','d,e'] 1.0 -1.797693e+308 1.797693e+308 0.0
8 9.0 x['f,g','g,h,i'] 1.0 -1.797693e+308 1.797693e+308 0.0'''))
so.config['generic_naming'] = True
self.assertEqual(m.to_optmodel(), cleandoc('''
proc optmodel;
var x {{'a','c','f,g'}, {'b','d,e','g,h,i'}};
x['a', 'b'] = 1.0;
x['a', 'd,e'] = 1.0;
x['a', 'g,h,i'] = 1.0;
x['c', 'b'] = 1.0;
x['c', 'd,e'] = 1.0;
x['c', 'g,h,i'] = 1.0;
x['f,g', 'b'] = 1.0;
x['f,g', 'd,e'] = 1.0;
x['f,g', 'g,h,i'] = 1.0;
con c_0 : x['a', 'b'] >= 1;
con c_1 : x['a', 'd,e'] >= 1;
con c_2 : x['a', 'g,h,i'] >= 1;
con c_3 : x['c', 'b'] >= 1;
con c_4 : x['c', 'd,e'] >= 1;
con c_5 : x['c', 'g,h,i'] >= 1;
con c_6 : x['f,g', 'b'] >= 1;
con c_7 : x['f,g', 'd,e'] >= 1;
con c_8 : x['f,g', 'g,h,i'] >= 1;
min obj = x['a', 'b'] + x['a', 'd,e'] + x['a', 'g,h,i'] + x['c', 'b'] + x['c', 'd,e'] + x['c', 'g,h,i'] + x['f,g', 'b'] + x['f,g', 'd,e'] + x['f,g', 'g,h,i'];
solve;
quit;'''))
```
#### File: tests/utils/test_package_utils.py
```python
import unittest
from inspect import cleandoc
import sasoptpy as so
class TestPackageUtils(unittest.TestCase):
def setUp(self):
so.reset()
def test_extract_arguments(self):
x = so.VariableGroup(['a', 'b', 'c'], [1, 2, 3], name='x')
def1 = so.to_definition(x)
y = so.VariableGroup(('a', 'b', 'c'), (1, 2, 3), name='x')
def2 = so.to_definition(y)
self.assertEqual(def1, def2)
def test_extract_list_value(self):
m = so.Model(name='test_extract_list_vals')
S = ['a', 'b', 'c']
lb_values = {'a': 1, 'b': 0, 'c': 2}
ub_values = {'a': 5, 'b': 10}
init_values = {'b': 2, 'c': 3}
x = m.add_variables(S, name='x', ub=ub_values, lb=lb_values,
init=init_values)
self.assertEqual(so.to_optmodel(m), cleandoc('''
proc optmodel;
min test_extract_list_vals_obj = 0;
var x {{'a','b','c'}};
x['a'].lb = 1;
x['a'].ub = 5;
x['b'] = 2;
x['b'].lb = 0;
x['b'].ub = 10;
x['c'] = 3;
x['c'].lb = 2;
solve;
quit;
'''))
def produce_error():
from collections import OrderedDict
ind = ['a', 'b', 'c']
y_lb = set([0, 1, 2])
y = m.add_variables(ind, name='y', lb=y_lb)
self.assertRaises(ValueError, produce_error)
def test_deprecation(self):
def call_tuple_unpack():
so.util.tuple_unpack((1,2,))
self.assertWarns(DeprecationWarning, call_tuple_unpack)
def call_tuple_pack():
so.util.tuple_pack(1)
self.assertWarns(DeprecationWarning, call_tuple_pack)
def call_list_pack():
so.util.list_pack((1,2,3))
self.assertWarns(DeprecationWarning, call_list_pack)
def call_wrap():
so.util.wrap(5)
self.assertWarns(DeprecationWarning, call_wrap)
def test_sum_wrap(self):
x = so.Variable(name='x')
e = so.expr_sum(x for _ in range(3))
self.assertEqual(so.to_expression(e), '3 * x')
def test_sum_wrap_abstract(self):
I = so.Set(name='I')
x = so.Variable(name='x')
e = so.expr_sum(x for i in I)
self.assertEqual(so.to_expression(e), 'sum {i in I} (x)')
def test_comparable(self):
self.assertTrue(so.util.is_comparable(4))
self.assertFalse(so.util.is_comparable(dict()))
self.assertTrue(so.util.is_comparable('abc'))
def test_flatten_tuple(self):
tp = (3, 4, (5, (1, 0), 2))
self.assertEqual(list(so.util.flatten_tuple(tp)),
[3, 4, 5, 1, 0, 2])
def test_sas_string(self):
S = so.exp_range(1, 11, 2)
self.assertEqual(so.util.package_utils._to_sas_string(S), '1..10 by 2')
def invalid_type():
from collections import OrderedDict
so.util.package_utils._to_sas_string(OrderedDict(a=4))
self.assertRaises(TypeError, invalid_type)
```
#### File: tests/utils/test_user_utils.py
```python
from inspect import cleandoc
import unittest
import pandas as pd
import sasoptpy as so
from sasoptpy import to_expression as exp
import sys
import os
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(current_dir, '..')))
from util import assert_equal_wo_temps
class TestUserUtils(unittest.TestCase):
def setUp(self):
so.reset()
def test_concat(self):
x = so.Variable(name='x')
e1 = 'col'
S = so.Set(name='S')
e2 = so.SetIterator(S, name='i')
r = so.util.concat(e1, e2)
self.assertEqual(exp(r), '\'col\' || i')
def test_exp_range(self):
self.assertEqual(so.exp_range(1,5), range(1,5))
t = so.exp_range(1, so.N)
self.assertEqual(exp(t), '1.._N_')
def test_get_value_table(self):
self.assertEqual(so.get_value_table(), None)
S = ['a', 'b', 'c']
x = so.VariableGroup(S, name='x')
x['a'].set_value(1)
x['b'].set_value(3)
x['c'].set_value(10)
self.assertEqual(so.get_value_table(x).to_string(),
cleandoc('''
x
a 1
b 3
c 10
'''))
c = so.ConstraintGroup((2 * x[i] >= 1 for i in S), name='c')
self.assertEqual(so.get_value_table(c).to_string(), cleandoc(
'''
c
a 2
b 6
c 20
'''))
df = pd.DataFrame([['a', 1, 2], ['b', 3, 4]],
columns=['var', 'lb', 'ub']).set_index(['var'])
self.assertEqual(so.get_value_table(df).to_string(), cleandoc(
'''
lb ub
var
a 1 2
b 3 4
'''))
self.assertEqual(so.get_value_table(x, c).to_string(), cleandoc(
'''
x c
a 1 2
b 3 6
c 10 20
'''
))
self.assertEqual(so.get_value_table(df, df).to_string(), cleandoc(
'''
lb ub lb ub
var
a 1 2 1 2
b 3 4 3 4
'''
))
T = so.Set(name='T')
y = so.VariableGroup(T, name='y')
y[0].set_value(10)
y[1].set_value(5)
self.assertEqual(so.get_value_table(y).to_string(), cleandoc(
'''
y
0 10
1 5
'''))
z = so.ImplicitVar((2 * x[i] - 5 for i in S), name='z')
self.assertEqual(so.get_value_table(z).to_string(), cleandoc(
'''
z
a -3
b 1
c 15
'''
))
e = so.Expression(x['a'] + 5 - y[0], name='e')
self.assertEqual(so.get_value_table(e).to_string(), cleandoc(
'''
e
- -4
'''
))
def test_submit(self):
w = so.Workspace('w')
def get_error_submit():
w.submit()
self.assertRaises(RuntimeError, get_error_submit)
m = so.Model(name='m')
def get_error_solve():
m.solve()
self.assertRaises(RuntimeError, get_error_solve)
def test_expr_sum(self):
S = ['a', 'b', 'c']
x = so.VariableGroup(S, name='x')
e = so.expr_sum(x[i] for i in S)
T = so.Set(name='T')
U = so.Set(name='U')
y = so.VariableGroup(T, U, name='y')
c = so.ConstraintGroup(
(so.expr_sum(y[i, j] for i in T) <= 5 for j in U),
name='c')
assert_equal_wo_temps(
self, so.to_definition(c),
'con c {o10 in U} : sum {i in T} (y[i, o10]) <= 5;')
e = so.expr_sum(y[i, 5] for i in T)
assert_equal_wo_temps(self, exp(e), 'sum {i in T} (y[i, 5])')
def test_reset_globals(self):
def warn_reset_globals():
so.reset_globals()
self.assertWarns(DeprecationWarning, warn_reset_globals)
def test_reset(self):
x = so.Variable(name='x')
self.assertGreater(so.itemid, 0)
so.reset()
self.assertEqual(so.itemid, 0)
def test_dict_to_frame_and_flatten(self):
d = {'coal': {'period1': 1, 'period2': 5, 'period3': 7},
'steel': {'period1': 8, 'period2': 4, 'period3': 3},
'copper': {'period1': 5, 'period2': 7, 'period3': 9}}
df = so.dict_to_frame(d)
self.assertEqual(df.to_string(), cleandoc(
'''
period1 period2 period3
coal 1 5 7
steel 8 4 3
copper 5 7 9
'''))
ff = so.flatten_frame(df)
with pd.option_context('display.multi_sparse', False):
self.assertEqual(ff.to_string(), cleandoc(
'''
coal period1 1
coal period2 5
coal period3 7
steel period1 8
steel period2 4
steel period3 3
copper period1 5
copper period2 7
copper period3 9
'''))
ffs = so.flatten_frame(df, swap=True)
self.assertEqual(ffs.to_string(), cleandoc(
'''
period1 coal 1
period2 coal 5
period3 coal 7
period1 steel 8
period2 steel 4
period3 steel 3
period1 copper 5
period2 copper 7
period3 copper 9
'''))
``` |
{
"source": "jld23/saspy",
"score": 2
} |
#### File: saspy/saspy/__init__.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from saspy.version import __version__
from saspy.sasbase import SASsession, SASconfig, list_configs
from saspy.sasdata import SASdata
from saspy.sasexceptions import SASIONotSupportedError, SASConfigNotFoundError, SASConfigNotValidError
from saspy.sasproccommons import SASProcCommons
from saspy.sastabulate import Tabulate
from saspy.sasresults import SASresults
import os, sys
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
logger.propagate=False
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
if isnotebook():
from saspy.sas_magic import SASMagic
get_ipython().register_magics(SASMagic)
def _find_cfg():
sp = []
sp[:] = sys.path
sp[0] = os.path.abspath(sp[0])
sp.insert(1, os.path.expanduser('~/.config/saspy'))
sp.insert(0, __file__.rsplit(os.sep+'__init__.py')[0])
cfg = 'Not found'
for dir in sp:
f1 = dir+os.sep+'sascfg_personal.py'
if os.path.isfile(f1):
cfg = f1
break
if cfg == 'Not found':
f1 =__file__.rsplit('__init__.py')[0]+'sascfg.py'
if os.path.isfile(f1):
cfg = f1
return cfg
SAScfg = _find_cfg()
``` |
{
"source": "JLDaniel77/DS",
"score": 3
} |
#### File: DS/code/app.py
```python
import joblib
import numpy as np
from flask import Flask, request, jsonify
from flask_cors import CORS
def create_app():
app = Flask(__name__)
CORS(app)
model = joblib.load('code/nba_model')
@app.route('/api', methods=['POST'])
def predict():
# get the data
data = request.get_json(force=True)
predict_request = [data['all_nba'], data['all_star'], data['draft_yr'], data['pk'],
data['fg_percentage'], data['tp_percentage'], data['ft_percentage'],
data['minutes_per_game'], data['points_per_game'], data['trb_per_game'],
data['assists_per_game'], data['ws_per_game'], data['bpm'], data['vorp'],
data['attend_college']]
array_pred_req = np.array(predict_request).reshape(1, -1)
# predictions
y_pred = model.predict(array_pred_req)
# give it back
output = {"y_pred": int(y_pred[0])}
return jsonify(results=output)
return app
if __name__ == '__main__':
app.run()
``` |
{
"source": "JLDaniel77/DS-Unit-3-Sprint-1-Software-Engineering",
"score": 4
} |
#### File: DS-Unit-3-Sprint-1-Software-Engineering/Sprint-Challenge-Files/acme.py
```python
import random
class Product:
"""Class to create a product instance."""
def __init__(self, name=None, price=10, weight=20, flammability=0.5):
self.name = name
self.price = price
self.weight = weight
self.flammability = flammability
self.identifier = random.randint(1000000, 9999999)
def stealability(self):
steal_proba = self.price / self.weight
if steal_proba < 0.5:
return 'Not so stealable...'
elif (steal_proba >= 0.5) & (steal_proba < 1.0):
return 'Kinda stealable.'
else:
return 'Very stealable!'
def explode(self):
explodability = self.flammability * self.weight
if explodability < 10:
return '...fizzle.'
elif (explodability >= 10) & (explodability < 50):
return '...boom!'
else:
return '...BABOOM!!'
class BoxingGlove(Product):
"""Class to create an instance of boxing gloves"""
def __init__(self, name=None, price=10, weight=10, flammability=0.5):
super().__init__(weight=weight)
def explode(self):
return "...it's a glove."
def punch(self):
if self.weight < 5:
return 'That tickles.'
elif (self.weight >= 5) & (self.weight < 15):
return 'Hey that hurt!'
else:
return 'OUCH!'
``` |
{
"source": "JLDaniel77/Simple-Flask-API",
"score": 3
} |
#### File: Simple-Flask-API/resources/item.py
```python
import traceback
from flask_restful import Resource, reqparse
from flask_jwt import jwt_required
from models.item import ItemModel
# Create item class to create item objects
class Item(Resource):
# Create parser
parser = reqparse.RequestParser()
parser.add_argument('price',
type=float,
required=True,
help="This field cannot be left blank!")
parser.add_argument('store_id',
type=int,
required=True,
help="Every item needs a store id.")
# Use jwt_required decorator to require authentication on get request
@jwt_required()
def get(self, name):
item = ItemModel.find_by_name(name)
if item:
return item.json()
return {'message': 'Item not found'}, 404
# Create post request to make a new item
def post(self, name):
# Check to see if the item already exists
if ItemModel.find_by_name(name):
return {'message': "An item with name '{}' already exists.".format(name)}, 400
# Create data object
data = Item.parser.parse_args()
# Define new item and insert into items table
item = ItemModel(name, **data)
try:
item.save_to_db()
except:
traceback.print_exc()
return {"message": "An error occurred inserting the item"}, 500
return item.json(), 201
# Delete an item
def delete(self, name):
item = ItemModel.find_by_name(name)
if item:
item.delete_from_db()
return {'message': 'Item deleted'}
# Update an item
def put(self, name):
data = Item.parser.parse_args()
item = ItemModel.find_by_name(name)
if item is None:
item = ItemModel(name, **data)
else:
item.price = data['price']
item.store_id = data['store_id']
item.save_to_db()
return item.json()
# Create items list
class ItemList(Resource):
def get(self):
return {'items': [item.json() for item in ItemModel.query.all()]}
``` |
{
"source": "jldantas/leet",
"score": 3
} |
#### File: leet/backends/cb.py
```python
import logging
import datetime
from cbapi.response import CbResponseAPI, Sensor
import cbapi.errors
from ..base import LeetBackend, LeetMachine, LeetSOType, LeetSession, LeetFileAttributes
from ..errors import LeetSessionError, LeetCommandError
_MOD_LOGGER = logging.getLogger(__name__)
class CBMachine(LeetMachine):
"""A LeetMachine implementation for the CB Backend.
Attributes:
sensor (cbapi.response.Sensor): A sensor as seen by the CB API.
can_connect (bool): If the machine is available to be connected.
"""
def __init__(self, hostname, backend_name, sensor):
"""Creates a new CBMachine object.
Args:
hostname (str): The hostname of the machine
backend_name (str): The unique name for the backend
sensor (cbapi.response.Sensor): The sensor object that represents
a machine in CB
"""
super().__init__(hostname, backend_name)
self.sensor = sensor
if self.sensor.os_type == 1:
self.so_type = LeetSOType.WINDOWS
@property
def can_connect(self):
"""If the machine is available to be connected."""
return True if self.sensor.status == "Online" else False
def refresh(self):
"""See base class documentation"""
self.sensor.refresh()
def connect(self):
"""See base class documentation"""
try:
return CBSession(self.sensor.lr_session(), self)
except cbapi.errors.TimeoutError as e:
raise LeetSessionError("Timed out when requesting a session to cbapi") from e
except cbapi.errors.ObjectNotFoundError as e:
raise LeetSessionError("Max limit of sessions opened") from e
#return CBSession(self.sensor.lr_session(), self)
class CBSession(LeetSession):
"""Represents a new session using the CB backend.
This basically wraps a live response session into a leet session, allowing
decoupling of the plugin and the backend. It handles all the necessary
code provide what is defined in the base class and makes sure any errors
raised are correctly coverted to the respective Leet errors.
"""
#TODO test what error is raised if session is interrupted in the middle
def __init__(self, lr_session, machine_info):
"""Returns a CBSession object.
Args:
lr_session (cbapi.live_response_api.LiveResponse): A live response
session
machine_info (CBMachine): A machine info object
"""
super().__init__(lr_session, machine_info)
self._mapping_table = {
"list_processes" : self.raw_session.list_processes,
"get_file" : self.raw_session.get_file,
"put_file" : self.raw_session.put_file,
"delete_file" : self.raw_session.delete_file,
"start_process" : self.raw_session.create_process,
"make_dir" : self.raw_session.create_directory,
"dir_list" : self.raw_session.list_directory
}
def start_process(self, cmd_string, cwd=None, background=False):
"""See base class documentation"""
return self._execute("start_process", cmd_string, not background, None, cwd, 600, not background)
def delete_file(self, remote_file_path):
"""See base class documentation"""
self._execute("delete_file", remote_file_path)
def put_file(self, fp, remote_file_path, overwrite=False):
"""See base class documentation"""
if self.exists(remote_file_path) and overwrite:
self._execute("delete_file", remote_file_path)
remote_path = self.path_separator.join(remote_file_path.split(self.path_separator)[:-1])
if not self.exists(remote_path):
self.make_dir(remote_path)
self._execute("put_file", fp, remote_file_path)
def make_dir(self, remote_path, recursive=True):
"""See base class documentation"""
path_parts = remote_path.split(self.path_separator)
#if the last split is empty, probably it was passed with trailling
#separator
if not path_parts[-1]:
path_parts = path_parts[:-1]
#This skips the root of the path
check = []
necessary_create = False
check.append(path_parts.pop(0))
if recursive:
for i, part in enumerate(path_parts):
check.append(part)
if not self.exists(self.path_separator.join(check)):
#the moment we can't find a path, we need to create everything
#from there forward
necessary_create = True
break
if necessary_create:
check.pop(-1)
for missing_path in path_parts[i:]:
check.append(missing_path)
path = self.path_separator.join(check)
_MOD_LOGGER.debug("Trying to create path '%s' on the remote host", path)
self._execute("make_dir", path)
else:
_MOD_LOGGER.debug("No path need to be created.")
else:
self._execute("make_dir", remote_path)
def exists(self, remote_file_path):
"""See base class documentation"""
if remote_file_path[-1] == self.path_separator:
idx = -2
else:
idx = -1
split_path = remote_file_path.split(self.path_separator)
#passing a root path (c:, d:, /, etc) is a logic error and raises an
#exception
if len(split_path) == 1:
raise LeetCommandError("Can't verify existence of root paths.")
file_name = split_path[idx]
path = self.path_separator.join(split_path[:idx]) + self.path_separator
try:
list_dir = self._execute("dir_list", path)
#list_dir = self.raw_session.list_directory(path)
except LeetCommandError as e:
# except cbapi.live_response_api.LiveResponseError as e:
return False
return bool([a for a in list_dir if a["filename"] == file_name])
def get_file(self, remote_file_path):
"""See base class documentation"""
#TODO check if the file exist first?
return self._execute("get_file", remote_file_path)
def _execute(self, *args):
"""See base class documentation"""
#TODO should live response errors be mapped to plugin errors?
_MOD_LOGGER.debug("Executing on session: %s", args)
try:
if len(args) == 1:
return self._mapping_table[args[0]]()
else:
return self._mapping_table[args[0]](*args[1:])
#TODO it can also raise ApiError on 404 to server?
except cbapi.errors.TimeoutError as e:
raise LeetSessionError("Timed out when requesting a session to cbapi") from e
except cbapi.live_response_api.LiveResponseError as e:
raise LeetCommandError(str(e)) from e
#raise LeetPluginError(str(e)) from e
# except KeyError as e:
# raise LeetSessionError("Unknown function.", True) from e
def _parse_file_attributes(self, attributes):
attr = []
attr_list = set(attributes)
if "HIDDEN" in attr_list:
attr.append(LeetFileAttributes.HIDDEN)
if "DIRECTORY" in attr_list:
attr.append(LeetFileAttributes.DIRECTORY)
if "SYSTEM" in attr_list:
attr.append(LeetFileAttributes.SYSTEM)
return attr
def list_dir(self, remote_path):
"""See base class documentation"""
# Sample return of a CB dirlist
# {'last_access_time': 1458169329, 'last_write_time': 1458169329, 'filename': '$Recycle.Bin', 'create_time': 1247541536, 'attributes': ['HIDDEN', 'SYSTEM', 'DIRECTORY'], 'size': 0},
# {'last_access_time': 1515105722, 'last_write_time': 1515105722, 'filename': 'Boot', 'create_time': 1449789900, 'attributes': ['HIDDEN', 'SYSTEM', 'DIRECTORY'], 'size': 0},
# {'last_access_time': 1515105722, 'last_write_time': 1290309831, 'filename': 'bootmgr', 'create_time': 1449789900, 'attributes': ['READONLY', 'HIDDEN', 'SYSTEM', 'ARCHIVE'], 'size': 383786},
# {'last_access_time': 1247548136, 'last_write_time': 1247548136, 'filename': 'Documents and Settings', 'create_time': 1247548136, 'alt_name': 'DOCUME~1', 'attributes': ['HIDDEN', 'SYSTEM', 'DIRECTORY', 'REPARSE_POINT', 'NOT_CONTENT_INDEXED'], 'size': 0}
list_dir = []
cb_list_dir = self._execute("dir_list", remote_path)
if len(cb_list_dir) == 1 and "DIRECTORY" in cb_list_dir[0]["attributes"]:
cb_list_dir = self._execute("dir_list", remote_path + self.path_separator)
for entry in cb_list_dir:
data = {"name": entry["filename"],
"size": entry["size"],
"attributes": self._parse_file_attributes(entry["attributes"]),
"create_time": datetime.datetime.utcfromtimestamp(entry["create_time"]),
"modification_time": datetime.datetime.utcfromtimestamp(entry["last_write_time"]),
}
list_dir.append(data)
return list_dir
def list_processes(self):
"""See base class documentation"""
processes = []
process_list = self._execute("list_processes")
for process in process_list:
processes.append({"username": process["username"],
"pid": process["pid"],
"ppid": process["parent"],
"start_time": datetime.datetime.utcfromtimestamp(process["create_time"]),
"command_line": process["command_line"].split(self.path_separator)[-1],
"path": process["path"],
})
return processes
def __enter__(self):
"""Enter context"""
return self
def __exit__(self, exeception_type, exception_value, traceback):
"""Exit context"""
self.raw_session.close()
class Backend(LeetBackend):
"""Implements the CB backend communication.
This class starts the connection to the backend server and enables direct
interaction with it.
"""
def __init__(self, profile_name):
"""Returns a Backend object.
Args:
profile_name (str): The profile name that this class will connect,
as seen in the 'credentials.response' file.
"""
super().__init__("CB-" + profile_name, 7) #TODO move max_session to a configuration/variable
self._profile_name = profile_name
self._cb = None
@property
def url(self):
"""The Carbon Black server URL"""
return self._cb.url
def start(self):
"""Starts the internal thread (see base class documentation) and
start the connection to the CB server.
"""
super().start()
self._cb = CbResponseAPI(profile=self._profile_name)
return self
def _get_sensor(self, hostname):
"""Return the sensor related to the hostname. If more than one sensor
is found, it will return the one that did the most recent check-in.
Args:
hostname (str): The machine name
Returns:
[Sensor]: The list of sensors
"""
recent_sensor = None
query = "hostname:" + hostname
sensors = self._cb.select(Sensor).where(query)
for sensor in sensors:
if recent_sensor is None:
recent_sensor = sensor
else:
if sensor.last_checkin_time > recent_sensor.last_checkin_time:
recent_sensor = sensor
return recent_sensor
def _search_machines(self, search_request):
"""See base class documentation"""
machine_list = []
for hostname in search_request.hostnames:
sensor = self._get_sensor(hostname)
if sensor is not None:
machine_list.append(CBMachine(hostname, self.backend_name, sensor))
return machine_list
``` |
{
"source": "jldantas/power-mft",
"score": 2
} |
#### File: power-mft/libmft/attribute.py
```python
import struct
import logging
from operator import getitem as _getitem
from uuid import UUID
from abc import ABCMeta, abstractmethod
from math import ceil as _ceil
import sys as _sys
from libmft.util.functions import convert_filetime, get_file_reference
from libmft.flagsandtypes import AttrTypes, AttrFlags, NameType, FileInfoFlags, \
IndexEntryFlags, VolumeFlags, ReparseType, ReparseFlags, CollationRule, \
SecurityDescriptorFlags, ACEType, ACEControlFlags, ACEAccessFlags, \
SymbolicLinkFlags, EAFlags
from libmft.exceptions import HeaderError, ContentError
#******************************************************************************
# MODULE LEVEL VARIABLES
#******************************************************************************
_MOD_LOGGER = logging.getLogger(__name__)
'''logging.Logger: Module level logger for all the logging needs of the module'''
_ATTR_BASIC = struct.Struct("<2IB")
'''struct.Struct: Struct to get basic information from the attribute header'''
#******************************************************************************
# MODULE LEVEL FUNCTIONS
#******************************************************************************
def get_attr_info(binary_view):
'''Gets basic information from a binary stream to allow correct processing of
the attribute header.
This function allows the interpretation of the Attribute type, attribute length
and if the attribute is non resident.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Returns:
An tuple with the attribute type, the attribute length, in bytes, and
if the attribute is resident or not.
'''
global _ATTR_BASIC
attr_type, attr_len, non_resident = _ATTR_BASIC.unpack(binary_view[:9])
return (AttrTypes(attr_type), attr_len, bool(non_resident))
def _create_attrcontent_class(name, fields, inheritance=(object,), data_structure=None, extra_functions=None, docstring=""):
'''Helper function that creates a class for attribute contents.
This function creates is a boilerplate to create all the expected methods of
an attributes. The basic methods work in the same way for all classes.
Once it executes it defines a dynamic class with the methods "__init__",
"__repr__" and "__eq__" based on the fields passed in the ``fields`` parameter.
If the ``data_structure`` parameter is present, the classmethod ``get_representation_size``
and the class variable ``_REPR`` will also be present.
It is also possible to define the inheritance using this method by passing
a list of classes in the ``inheritance`` parameter.
If the ``extra_functions`` argument is present, they will be added to the
class.
Note:
If the ``extra_functions`` has defined any of dinamically created methods,
they will *replace* the ones created.
Args:
name (str): Name of the class that will be created.
fields (tuple(str)): The attributes that will be added to the class.
inherited (tuple(object)): List of objects that will be inherited by
the new class
extra_functions (dict(str : function)): A dictionary where the key
will be the name of the function in the class and the content
of the key is a function that will be bound to the class
doctring (str): Class' docstring
Returns:
A new class with the ``name`` as it's name.
'''
def create_func_from_str(f_name, args, content, docstring=""):
'''Helper function to create functions from strings.
To improve performance, the standard functions are created at runtime
based on the string derived from the content. This way the function, from
the interpreter point of view, looks like statically defined.
Note:
This function should be used only for methods that will receive
``self`` (instace methods). The ``self`` argument is added automatically.
Args:
f_name (str): Function name
args (list(str)): List of extra arguments that the function will receive
content (str): Content of the function
docstring (str): Function's docstring
Returns:
A new function object that can be inserted in the class.
'''
exec_namespace = {"__name__" : f"{f_name}"}
new_args = ", ".join(["self"] + args)
func_str = f"def {f_name}({new_args}): {content}"
exec(func_str, exec_namespace)
func = exec_namespace[f_name]
func.__doc__ = docstring
return func
#creates the functions necessary for the new class
slots = fields
init_content = ", ".join([f"self.{field}" for field in fields]) + " = content"
__init__ = create_func_from_str("__init__", [f"content=(None,)*{len(fields)}"], init_content)
temp = ", ".join([f"{field}={{self.{field}}}" for field in fields])
repr = "return " + f"f\'{{self.__class__.__name__}}({temp})\'"
__repr__ = create_func_from_str("__repr__", [], repr)
temp = " and ".join([f"self.{field} == other.{field}" for field in fields])
eq = f"return {temp} if isinstance(other, {name}) else False"
__eq__ = create_func_from_str("__eq__", ["other"], eq)
@classmethod
def get_representation_size(cls):
return cls._REPR.size
#adapted from namedtuple code
# Modify function metadata to help with introspection and debugging
for method in (__init__, get_representation_size.__func__, __eq__,
__repr__):
method.__qualname__ = f'{name}.{method.__name__}'
#map class namespace for the class creation
namespace = {"__slots__" : slots,
"__init__" : __init__,
"__repr__" : __repr__,
"__eq__" : __eq__
}
if data_structure is not None:
namespace["_REPR"] = struct.Struct(data_structure)
namespace["get_representation_size"] = get_representation_size
if docstring:
namespace["__doc__"] = docstring
#some new mappings can be set or overload the ones defined
if extra_functions is not None:
for method in extra_functions.values():
try:
method.__qualname__ = f'{name}.{method.__name__}'
except AttributeError:
try:
method.__func__.__qualname__ = f'{name}.{method.__func__.__name__}'
except AttributeError:
#if we got here, it is not a method or classmethod, must be an attribute
#TODO feels like a hack, change it
#TODO design a test for this
pass
namespace = {**namespace, **extra_functions}
#TODO check if docstring was provided, issue a warning
new_class = type(name, inheritance, namespace)
# adapted from namedtuple code
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in environments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython), or where the user has
# specified a particular module.
try:
new_class.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return new_class
#******************************************************************************
# CLASSES
#******************************************************************************
#******************************************************************************
# DATA_RUN
#******************************************************************************
# Data runs are part of the non resident header.
#TODO replace the datarun tuple by a class?
# class DataRun():
# def __init__(self, dr_len, offset):
# self._dr_len = dr_len
# self.offset = offset
#
# def __getitem__(self, index):
# if index == 0:
# return self._dr_len
# elif index == 1:
# return self.offset
# else:
# raise IndexError("Invalid index for datarun object")
#
# def __eq__(self, other):
# if isinstance(other, self.__class__):
# return self._dr_len == other._dr_len and self.offset == other.offset
# else:
# return False
#
# def __len__(self):
# return self._dr_len
#
# def __repr__(self):
# return f"{self.__class__.__name__}(dr_len={self._dr_len},offset={self.offset})"
class DataRuns():
'''Represents the data runs of a non-resident attribute.
When we have non resident attributes, it is necessary to map where in the
disk the contents are. For that the NTFS uses data runs.
Great resource for explanation and tests:
https://flatcap.org/linux-ntfs/ntfs/concepts/data_runs.html
Important:
Calling ``len`` in this class returns the number of data runs, not the
size in bytes.
Args:
data_runs (list of tuples) - A list of tuples representing the data run.
The tuple has to have 2 elements, where the first element is the
length of the data run and the second is the absolute offset
Attributes:
data_runs (list of tuples) - A list of tuples representing the data run.
The tuple has to have 2 elements, where the first element is the
length of the data run and the second is the absolute offset
'''
_INFO = struct.Struct("<B")
def __init__(self, data_runs=[]):
'''See class docstring.'''
self.data_runs = data_runs #list of tuples
@classmethod
def create_from_binary(cls, binary_view):
'''Creates a new object DataRuns from a binary stream. The binary
stream can be represented by a byte string, bytearray or a memoryview of the
bytearray.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Returns:
DataRuns: New object using hte binary stream as source
'''
nw_obj = cls()
offset = 0
previous_dr_offset = 0
header_size = cls._INFO.size #"header" of a data run is always a byte
while binary_view[offset] != 0: #the runlist ends with an 0 as the "header"
header = cls._INFO.unpack(binary_view[offset:offset+header_size])[0]
length_len = header & 0x0F
length_offset = (header & 0xF0) >> 4
temp_len = offset+header_size+length_len #helper variable just to make things simpler
dr_length = int.from_bytes(binary_view[offset+header_size:temp_len], "little", signed=False)
if length_offset: #the offset is relative to the previous data run
dr_offset = int.from_bytes(binary_view[temp_len:temp_len+length_offset], "little", signed=True) + previous_dr_offset
previous_dr_offset = dr_offset
else: #if it is sparse, requires a a different approach
dr_offset = None
offset += header_size + length_len + length_offset
nw_obj.data_runs.append((dr_length, dr_offset))
#nw_obj.data_runs.append(DataRun(dr_length, dr_offset))
_MOD_LOGGER.debug("DataRuns object created successfully")
return nw_obj
def __len__(self):
'''Returns the number of data runs'''
return len(self.data_runs)
def __iter__(self):
'''Return the iterator for the representation of the list.'''
return iter(self.data_runs)
def __getitem__(self, index):
'''Return a specific data run'''
return _getitem(self.data_runs, index)
def __repr__(self):
'Return a nicely formatted representation string'
return f'{self.__class__.__name__}(data_runs={self.data_runs})'
class BaseAttributeHeader():
'''Represents the common contents of the Attribute Header.
Independently if the attribute is resident on non-resident, all of them
have a common set a data. This class represents this common set of attributes
and is not meant to be used directly, but to be inherited by the resident
header and non resident header classes.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`AttrTypes`): Type of the attribute
content[1] (int): Attribute's length, in bytes
content[2] (bool): True if non resident attribute, False otherwise
content[3] (:obj:`AttrFlags`): Attribute flags
content[4] (int): Attribute ID
content[5] (str): Attribute name
Attributes:
attr_type_id (:obj:`AttrTypes`): Type of the attribute
attr_len (int): Attribute's length, in bytes
non_resident (bool): True if non resident attribute, False otherwise
flags (:obj:`AttrFlags`): Attribute flags
attr_id (int): Attribute ID
attr_name (str): Attribute name
'''
_REPR_STRING = "2I2B3H"
_REPR = struct.Struct("<2I2B3H")
''' Attribute type id - 4 (AttrTypes)
Length of the attribute - 4 (in bytes)
Non-resident flag - 1 (0 - resident, 1 - non-resident)
Length of the name - 1 (in number of characters)
Offset to name - 2
Flags - 2 (AttrFlags)
Attribute id - 2
'''
__slots__ = ("attr_type_id", "attr_len", "non_resident", "flags", "attr_id",
"attr_name")
def __init__(self, content=(None,)*6):
'''See class docstring.'''
self.attr_type_id, self.attr_len, self.non_resident, self.flags, self.attr_id, \
self.attr_name = content
@classmethod
def create_from_binary(cls, binary_view):
'''Creates a new object BaseAttributeHeader from a binary stream. The binary
stream can be represented by a byte string, bytearray or a memoryview of the
bytearray.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Returns:
BaseAttributeHeader: New object using hte binary stream as source
'''
attr_type, attr_len, non_resident, name_len, name_offset, flags, attr_id = cls._REPR.unpack(binary_view[:cls._REPR.size])
if name_len:
name = binary_view[name_offset:name_offset+(2*name_len)].tobytes().decode("utf_16_le")
else:
name = None
nw_obj = cls((AttrTypes(attr_type), attr_len, bool(non_resident), AttrFlags(flags), attr_id, name ))
return nw_obj
@classmethod
def get_representation_size(cls):
'''Return the header size WITHOUT accounting for a possible named attribute.'''
return cls._REPR.size
def __len__(self):
'''Returns the logical size of the attribute'''
return self.attr_len
def __repr__(self):
'Return a nicely formatted representation string'
return (f'{self.__class__.__name__}(attr_type_id={str(self.attr_type_id)},'
f'attr_len={self.attr_len}, nonresident_flag={self.non_resident},'
f'flags={str(self.flags)}, attr_id={self.attr_id},'
f'resident_header={self.resident_header}, non_resident_header={self.non_resident_header}, attr_name={self.attr_name})'
)
class ResidentAttrHeader(BaseAttributeHeader):
'''Represents the the attribute header when the attribute is resident.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content_basic (Iterable): See BaseAttributeHeader documentation
content_specific[0] (int): Content's length, in bytes
content_specific[1] (int): Content offset
content_specific[2] (int): Indexed flag
Attributes:
content_len (int): Content's length, in bytes
content_offset (int): Content offset
indexed_flag (int): Indexed flag
'''
_REPR = struct.Struct("".join(["<", BaseAttributeHeader._REPR_STRING, "IHBx"]))
'''
BASIC HEADER
Attribute type id - 4 (AttrTypes)
Length of the attribute - 4 (in bytes)
Non-resident flag - 1 (0 - resident, 1 - non-resident)
Length of the name - 1 (in number of characters)
Offset to name - 2
Flags - 2 (AttrFlags)
Attribute id - 2
RESIDENT HEADER COMPLEMENT
Content length - 4
Content offset - 2
Indexed flag - 1
Padding - 1
'''
__slots__ = ("content_len", "content_offset", "indexed_flag")
def __init__(self, content_basic=(None,)*6, content_specific=(None,)*3):
super().__init__(content_basic)
self.content_len, self.content_offset, self.indexed_flag = content_specific
pass
@classmethod
def get_representation_size(cls):
'''Return the header size WITHOUT accounting for a possible named attribute.'''
return cls._REPR.size
@classmethod
def create_from_binary(cls, binary_view):
'''Creates a new object AttributeHeader from a binary stream. The binary
stream can be represented by a byte string, bytearray or a memoryview of the
bytearray.
Args:
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
Returns:
AttributeHeader: New object using hte binary stream as source
'''
attr_type, attr_len, non_resident, name_len, name_offset, flags, attr_id, \
content_len, content_offset, indexed_flag = cls._REPR.unpack(binary_view[:cls._REPR.size])
if name_len:
name = binary_view[name_offset:name_offset+(2*name_len)].tobytes().decode("utf_16_le")
else:
name = None
nw_obj = cls((AttrTypes(attr_type), attr_len, bool(non_resident), AttrFlags(flags), attr_id, name),
(content_len, content_offset, indexed_flag))
return nw_obj
def __repr__(self):
'Return a nicely formatted representation string'
return (f'{self.__class__.__name__}(attr_type_id={str(self.attr_type_id)},'
f'attr_len={self.attr_len}, nonresident_flag={self.non_resident},'
f'flags={str(self.flags)}, attr_id={self.attr_id}, attr_name={self.attr_name}'
f'content_len={self.content_len}, content_offset={self.content_offset}),indexed_flag={self.indexed_flag}'
)
class NonResidentAttrHeader(BaseAttributeHeader):
'''Represents the non-resident header of an attribute.'''
_REPR = struct.Struct("".join(["<", BaseAttributeHeader._REPR_STRING, "2Q2H4x3Q"]))
'''
BASIC HEADER
Attribute type id - 4 (AttrTypes)
Length of the attribute - 4 (in bytes)
Non-resident flag - 1 (0 - resident, 1 - non-resident)
Length of the name - 1 (in number of characters)
Offset to name - 2
Flags - 2 (AttrFlags)
Attribute id - 2
NON-RESIDENT HEADER COMPLEMENT
Start virtual cluster number - 8
End virtual cluster number - 8
Runlist offset - 2
Compression unit size - 2
Padding - 4
Allocated size of the stream - 8
Current size of the stream - 8
Initialized size of the stream - 8
Data runs - dynamic
'''
__slots__ = ("start_vcn", "end_vcn", "rl_offset", "compress_usize", "alloc_sstream", "curr_sstream", "init_sstream", "data_runs")
def __init__(self, content_basic=(None,)*6, content_specific=(None,)*7, data_runs=None):
'''Creates a NonResidentAttrHeader object. The content has to be an iterable
with precisely 9 elements in order.
If content is not provided, a 9 element tuple, where all elements are
None, is the default argument
Args:
content (iterable), where:
[0] (int) - start vcn
[1] (int) - end vcn
[2] (int) - datarun list offset
[3] (int) - compression unit size
[4] (int) - allocated data size
[5] (int) - current data size
[6] (int) - initialized data size
data_runs (list of DataRuns) - A list with all dataruns relative
to this particular header. If nothing is provided, the default
argument is 'None'.
'''
super().__init__(content_basic)
self.start_vcn, self.end_vcn, self.rl_offset, self.compress_usize, \
self.alloc_sstream, self.curr_sstream, self.init_sstream = content_specific
self.data_runs = data_runs
@classmethod
def get_representation_size(cls):
'''Return the header size, does not account for the number of data runs'''
return cls._REPR.size
@classmethod
def create_from_binary(cls, load_dataruns, binary_view):
'''Creates a new object NonResidentAttrHeader from a binary stream. The binary
stream can be represented by a byte string, bytearray or a memoryview of the
bytearray.
Args:
load_dataruns (bool) - Indicates if the dataruns are to be loaded
binary_view (memoryview of bytearray) - A binary stream with the
information of the attribute
non_resident_offset (int) - The offset where the non resident header
begins
Returns:
NonResidentAttrHeader: New object using hte binary stream as source
'''
attr_type, attr_len, non_resident, name_len, name_offset, flags, attr_id, \
start_vcn, end_vcn, rl_offset, compress_usize, alloc_sstream, curr_sstream, \
init_sstream = cls._REPR.unpack(binary_view[:cls._REPR.size])
if name_len:
name = binary_view[name_offset:name_offset+(2*name_len)].tobytes().decode("utf_16_le")
else:
name = None
#content = cls._REPR.unpack(binary_view[non_resident_offset:non_resident_offset+cls._REPR.size])
nw_obj = cls((AttrTypes(attr_type), attr_len, bool(non_resident), AttrFlags(flags), attr_id, name),
(start_vcn, end_vcn, rl_offset, compress_usize, alloc_sstream, curr_sstream, init_sstream))
if load_dataruns:
nw_obj.data_runs = DataRuns.create_from_binary(binary_view[nw_obj.rl_offset:])
_MOD_LOGGER.debug("NonResidentAttrHeader object created successfully")
return nw_obj
def __repr__(self):
'Return a nicely formatted representation string'
return (f'{self.__class__.__name__}(attr_type_id={str(self.attr_type_id)},'
f'attr_len={self.attr_len}, nonresident_flag={self.non_resident},'
f'flags={str(self.flags)}, attr_id={self.attr_id}, attr_name={self.attr_name},'
f'start_vcn={self.start_vcn}, end_vcn={self.end_vcn}, rl_offset={self.rl_offset},'
f'compress_usize={self.compress_usize}, alloc_sstream={self.alloc_sstream},'
f'curr_sstream={self.curr_sstream}, init_sstream={self.init_sstream}, data_runs={self.data_runs})'
)
#------------------------------------------------------------------------------
#******************************************************************************
#******************************************************************************
# ATTRIBUTE CONTENT CLASSES
#******************************************************************************
#******************************************************************************
#------------------------------------------------------------------------------
#******************************************************************************
# ABSTRACT CLASS FOR ATTRIBUTE CONTENT
#******************************************************************************
class AttributeContentBase(metaclass=ABCMeta):
'''Base class for attribute's content.
This class is an interface to all the attribute's contents. It can't be
instantiated and serves only a general interface.
'''
@classmethod
@abstractmethod
def create_from_binary(cls, binary_stream):
'''Creates an object from from a binary stream.
Args:
binary_stream (memoryview): A buffer access to the underlying binary
stream
Returns:
A new object of whatever type has overloaded the method.
'''
pass
@abstractmethod
def __len__(self):
'''Get the actual size of the content, in bytes, as some attributes have variable sizes.'''
pass
@abstractmethod
def __eq__(self, other):
pass
class AttributeContentNoRepr(AttributeContentBase):
'''Base class for attribute's content that don't have a fixed representation.
This class is an interface to the attribute's contents. It can't be
instantiated and serves only a general interface.
'''
pass
class AttributeContentRepr(AttributeContentBase):
'''Base class for attribute's content that don't have a fixed representation.
This class is an interface to the attribute's contents. It can't be
instantiated and serves only a general interface.
'''
@classmethod
@abstractmethod
def get_representation_size(cls):
'''Get the representation size, in bytes, based on defined struct
Returns:
An ``int`` with the size of the structure
'''
pass
#******************************************************************************
# TIMESTAMPS class
#******************************************************************************
def _len_ts(self):
return Timestamps._REPR.size
def _from_binary_ts(cls, binary_stream):
"""See base class."""
repr = cls._REPR
if len(binary_stream) != repr.size:
raise ContentError("Invalid binary stream size")
content = repr.unpack(binary_stream)
nw_obj = cls()
nw_obj.created, nw_obj.changed, nw_obj.mft_changed, nw_obj.accessed = \
convert_filetime(content[0]), convert_filetime(content[1]), \
convert_filetime(content[2]), convert_filetime(content[3])
_MOD_LOGGER.debug("Attempted to unpack Timestamp from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _astimezone_ts(self, timezone):
"""Changes the time zones of all timestamps.
Receives a new timezone and applies to all timestamps, if necessary.
Args:
timezone (:obj:`tzinfo`): Time zone to be applied
Returns:
A new ``Timestamps`` object if the time zone changes, otherwise returns ``self``.
"""
if self.created.tzinfo is timezone:
return self
else:
nw_obj = Timestamps((None,)*4)
nw_obj.created = self.created.astimezone(timezone)
nw_obj.changed = self.changed.astimezone(timezone)
nw_obj.mft_changed = self.mft_changed.astimezone(timezone)
nw_obj.accessed = self.accessed.astimezone(timezone)
return nw_obj
_docstring_ts = '''Represents a group of timestamps based on how MFT records.
Aggregates the entries for timesamps when dealing with standard NTFS timestamps,
e.g., created, changed, mft change and accessed. All attributes are time zone
aware.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`datetime`): Created timestamp
content[1] (datetime): Changed timestamp
content[2] (datetime): MFT change timestamp
content[3] (datetime): Accessed timestamp
Attributes:
created (datetime): A datetime with the created timestamp
changed (datetime): A datetime with the changed timestamp
mft_changed (datetime): A datetime with the mft_changed timestamp
accessed (datetime): A datetime with the accessed timestamp
'''
_ts_namespace = {"__len__" : _len_ts,
"create_from_binary" : classmethod(_from_binary_ts),
"astimezone" : _astimezone_ts
}
Timestamps = _create_attrcontent_class("Timestamps", ("created", "changed", "mft_changed", "accessed"),
inheritance=(AttributeContentRepr,), data_structure="<4Q",
extra_functions=_ts_namespace, docstring=_docstring_ts)
#******************************************************************************
# STANDARD_INFORMATION ATTRIBUTE
#******************************************************************************
def _len_stdinfo(self):
return StandardInformation._TIMESTAMP_SIZE + StandardInformation._REPR.size
def _from_binary_stdinfo(cls, binary_stream):
"""See base class."""
'''
TIMESTAMPS(32)
Creation time - 8
File altered time - 8
MFT/Metadata altered time - 8
Accessed time - 8
Flags - 4 (FileInfoFlags)
Maximum number of versions - 4
Version number - 4
Class id - 4
Owner id - 4 (NTFS 3+)
Security id - 4 (NTFS 3+)
Quota charged - 8 (NTFS 3+)
Update Sequence Number (USN) - 8 (NTFS 3+)
'''
if len(binary_stream) == cls._REPR.size: #check if it is v3 by size of the stram
t_created, t_changed, t_mft_changed, t_accessed, flags, m_ver, ver, \
c_id, o_id, s_id, quota_charged, usn = cls._REPR.unpack(binary_stream)
nw_obj = cls(
( Timestamps((convert_filetime(t_created), convert_filetime(t_changed),
convert_filetime(t_mft_changed), convert_filetime(t_accessed))
), FileInfoFlags(flags), m_ver, ver, c_id, o_id, s_id, quota_charged, usn))
else:
#if the content is not using v3 extension, added the missing stuff for consistency
t_created, t_changed, t_mft_changed, t_accessed, flags, m_ver, ver, \
c_id = cls._REPR_NO_NFTS_3_EXTENSION.unpack(binary_stream)
nw_obj = cls(
( Timestamps((convert_filetime(t_created), convert_filetime(t_changed),
convert_filetime(t_mft_changed), convert_filetime(t_accessed))
), FileInfoFlags(flags), m_ver, ver, c_id, None, None, None, None))
_MOD_LOGGER.debug("Attempted to unpack STANDARD_INFORMATION from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
_docstring_stdinfo = '''Represents the STANDARD_INFORMATION content.
Has all the data structures to represent a STANDARD_INFORMATION attribute,
allowing everything to be accessed with python objects/types.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`Timestamps`): Timestamp object
content[1] (:obj:`FileInfoFlags`): A FIleInfoFlags object with the flags
for this object
content[2] (int): Maximum number of allowed versions
content[3] (int): Current version number
content[4] (int): Class id
content[5] (int): Owner id
content[6] (int): Security id
content[7] (int): Quota charged
content[8] (int): Update Sequence Number (USN)
Attributes:
timestamps (:obj:`Timestamps`): All attribute's timestamps
flags (:obj:`FileInfoFlags`): STANDARD_INFORMATION flags for the file
max_n_versions (int): Maximum number of allowed versions
version_number (int): Current version number
class_id (int): Class id
owner_id (int): Owner id
security_id (int): Security id
quota_charged (int): Quota charged
usn (int): Update Sequence Number (USN)
'''
_stdinfo_namespace = {"__len__" : _len_stdinfo,
"create_from_binary" : classmethod(_from_binary_stdinfo),
"_REPR_NO_NFTS_3_EXTENSION" : struct.Struct("<4Q4I")
}
StandardInformation = _create_attrcontent_class("StandardInformation",
("timestamps", "flags", "max_n_versions", "version_number", "class_id",
"owner_id", "security_id", "quota_charged", "usn"),
inheritance=(AttributeContentRepr,), data_structure="<4Q4I2I2Q",
extra_functions=_stdinfo_namespace, docstring=_docstring_stdinfo)
#******************************************************************************
# ATTRIBUTE_LIST ATTRIBUTE
#******************************************************************************
def _from_binary_attrlist_e(cls, binary_stream):
"""See base class."""
'''
Attribute type - 4
Length of a particular entry - 2
Length of the name - 1 (in characters)
Offset to name - 1
Starting VCN - 8
File reference - 8
Attribute ID - 1
Name (unicode) - variable
'''
attr_type, entry_len, name_len, name_off, s_vcn, f_tag, attr_id = cls._REPR.unpack(binary_stream[:cls._REPR.size])
if name_len:
name = binary_stream[name_off:name_off+(2*name_len)].tobytes().decode("utf_16_le")
else:
name = None
file_ref, file_seq = get_file_reference(f_tag)
nw_obj = cls((AttrTypes(attr_type), entry_len, name_off, s_vcn, file_ref, file_seq, attr_id, name))
_MOD_LOGGER.debug("Attempted to unpack ATTRIBUTE_LIST Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_attrlist_e(self):
'''Returns the size of the entry, in bytes'''
return self._entry_len
_docstring_attrlist_e = '''Represents an entry for ATTRIBUTE_LIST.
Has all the data structures to represent one entry of the ATTRIBUTE_LIST
content allowing everything to be accessed with python objects/types.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`AttrTypes`): Type of the attribute in the entry
content[1] (int): Length of the entry, in bytes
content[2] (int): Length of the name, in bytes
content[3] (int): Offset to the name, in bytes
content[4] (int): Start VCN
content[5] (int): File reference number
content[6] (int): File sequence number
content[7] (int): Attribute ID
content[8] (int): Name
Attributes:
attr_type (:obj:`Timestamps`): Type of the attribute in the entry
name_offset (int): Offset to the name, in bytes
start_vcn (int): Start VCN
file_ref (int): File reference number
file_seq (int): File sequence number
attr_id (int): Attribute ID
name (int): Name
'''
_attrlist_e_namespace = {"__len__" : _len_attrlist_e,
"create_from_binary" : classmethod(_from_binary_attrlist_e)
}
AttributeListEntry = _create_attrcontent_class("AttributeListEntry",
("attr_type", "_entry_len", "name_offset", "start_vcn",
"file_ref", "file_seq", "attr_id", "name"),
inheritance=(AttributeContentRepr,), data_structure="<IH2B2QH",
extra_functions=_attrlist_e_namespace, docstring=_docstring_attrlist_e)
#-----------------------------------------------------------------------------
def _from_binary_attrlist(cls, binary_stream):
"""See base class."""
_attr_list = []
offset = 0
while True:
entry = AttributeListEntry.create_from_binary(binary_stream[offset:])
offset += len(entry)
_attr_list.append(entry)
if offset >= len(binary_stream):
break
_MOD_LOGGER.debug("Next AttributeListEntry offset = %d", offset)
_MOD_LOGGER.debug("Attempted to unpack ATTRIBUTE_LIST Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), _attr_list)
return cls(_attr_list)
def _len_attrlist(self):
'''Return the number of entries in the attribute list'''
return len(self._attr_list)
def _iter_attrlist(self):
return iter(self._attr_list)
def _gitem_attrlist(self, index):
return _getitem(self._attr_list, index)
_docstring_attrlist = '''Represents the contents for the ATTRIBUTE_LIST attribute.
Is a list of AttributeListEntry. It behaves as list in python, you can iterate
over it, access by member, etc.
Important:
Using the ``len()`` method on the objects of this class returns the number
of elements in the list.
Args:
content (list(:obj:`AttributeListEntry`)): List of AttributeListEntry
'''
_attrlist_namespace = {"__len__" : _len_attrlist,
"__iter__" : _iter_attrlist,
"__getitem__" : _gitem_attrlist,
"create_from_binary" : classmethod(_from_binary_attrlist)
}
AttributeList = _create_attrcontent_class("AttributeList",
("_attr_list",),
inheritance=(AttributeContentNoRepr,), data_structure=None,
extra_functions=_attrlist_namespace, docstring=_docstring_attrlist)
#******************************************************************************
# OBJECT_ID ATTRIBUTE
#******************************************************************************
def _from_binary_objid(cls, binary_stream):
"""See base class."""
uid_size = ObjectID._UUID_SIZE
#some entries might not have all four ids, this line forces
#to always create 4 elements, so contruction is easier
uids = [UUID(bytes_le=binary_stream[i*uid_size:(i+1)*uid_size].tobytes()) if i * uid_size < len(binary_stream) else None for i in range(0,4)]
_MOD_LOGGER.debug("Attempted to unpack OBJECT_ID Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), uids)
return cls(uids)
def _len_objid(self):
'''Get the actual size of the content, as some attributes have variable sizes'''
try:
return self._size
except AttributeError:
temp = (self.object_id, self.birth_vol_id, self.birth_object_id, self.birth_domain_id)
self._size = sum([ObjectID._UUID_SIZE for data in temp if data is not None])
return self._size
_docstring_objid = '''Represents the content of the OBJECT_ID attribute.
Important:
When reading from binary, some entries may not have all the members,
in this case the code creates None entries.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`UUID`): Object id
content[1] (:obj:`UUID`): Birth volume id
content[2] (:obj:`UUID`): Birth object id
content[3] (:obj:`UUID`): Birth domain id
Attributes:
object_id (UUID): Unique ID assigned to file
birth_vol_id (UUID): ID of the volume where the file was created
birth_object_id (UUID): Original Object ID of the file
birth_domain_id (UUID): Domain where the object was created
'''
_objid_namespace = {"__len__" : _len_objid,
"_UUID_SIZE" : 16,
"create_from_binary" : classmethod(_from_binary_objid)
}
ObjectID = _create_attrcontent_class("ObjectID",
("object_id", "birth_vol_id", "birth_object_id", "birth_domain_id"),
inheritance=(AttributeContentNoRepr,), data_structure=None,
extra_functions=_objid_namespace, docstring=_docstring_objid)
#******************************************************************************
# VOLUME_NAME ATTRIBUTE
#******************************************************************************
def _from_binary_volname(cls, binary_stream):
"""See base class."""
name = binary_stream.tobytes().decode("utf_16_le")
_MOD_LOGGER.debug("Attempted to unpack VOLUME_NAME Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), name)
return cls(name)
def _len_volname(self):
"""Returns the size of the attribute, in bytes, encoded in utf_16_le"""
return len(self.name.encode("utf_16_le"))
_docstring_volname = """Represents the content of the VOLUME_NAME attribute.
Args:
name (str): Volume's name
Attributes:
name (str): Volume's name
"""
_volname_namespace = {"__len__" : _len_volname,
"create_from_binary" : classmethod(_from_binary_volname)
}
VolumeName = _create_attrcontent_class("VolumeName",
("name", ),
inheritance=(AttributeContentNoRepr,), data_structure=None,
extra_functions=_volname_namespace, docstring=_docstring_volname)
#******************************************************************************
# VOLUME_INFORMATION ATTRIBUTE
#******************************************************************************
def _from_binary_volinfo(cls, binary_stream):
"""See base class."""
content = cls._REPR.unpack(binary_stream)
nw_obj = cls(content)
nw_obj.vol_flags = VolumeFlags(content[2])
_MOD_LOGGER.debug("Attempted to unpack VOLUME_INFORMATION Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), content)
return nw_obj
def _len_volinfo(self):
'''Returns the length of the attribute'''
return VolumeInformation._REPR.size
_docstring_volinfo = '''Represents the content of the VOLUME_INFORMATION attribute
Interprets the volume information as per viewed by MFT. Contains information
like version and the state of the volume.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (int): Major version
content[1] (int): Minor version
content[2] (:obj:`VolumeFlags`): Volume flags
Attributes:
major_ver (int): Major version
minor_ver (int): Minor version
vol_flags (:obj:`VolumeFlags`): Volume flags
'''
_volinfo_namespace = {"__len__" : _len_volinfo,
"create_from_binary" : classmethod(_from_binary_volinfo)
}
VolumeInformation = _create_attrcontent_class("VolumeInformation",
("major_ver", "minor_ver", "vol_flags"),
inheritance=(AttributeContentRepr,), data_structure="<8x2BH",
extra_functions=_volinfo_namespace, docstring=_docstring_volinfo)
#******************************************************************************
# FILENAME ATTRIBUTE
#******************************************************************************
def _from_binary_filename(cls, binary_stream):
"""See base class."""
''' File reference to parent directory - 8
TIMESTAMPS(32)
Creation time - 8
File altered time - 8
MFT/Metadata altered time - 8
Accessed time - 8
Allocated size of file - 8 (multiple of the cluster size)
Real size of file - 8 (actual file size, might also be stored by the directory)
Flags - 4
Reparse value - 4
Name length - 1 (in characters)
Name type - 1
Name - variable
'''
f_tag, t_created, t_changed, t_mft_changed, t_accessed, alloc_fsize, \
real_fsize, flags, reparse_value, name_len, name_type = cls._REPR.unpack(binary_stream[:cls._REPR.size])
name = binary_stream[cls._REPR.size:].tobytes().decode("utf_16_le")
file_ref, file_seq = get_file_reference(f_tag)
nw_obj = cls((file_ref, file_seq,
Timestamps((convert_filetime(t_created), convert_filetime(t_changed),
convert_filetime(t_mft_changed), convert_filetime(t_accessed))
), alloc_fsize, real_fsize, FileInfoFlags(flags), reparse_value, NameType(name_type), name))
_MOD_LOGGER.debug("Attempted to unpack FILENAME from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_filename(self):
return FileName._REPR.size + len(name.encode("utf_16_le"))
_docstring_filename = '''Represents the content of a FILENAME attribute.
The FILENAME attribute is one of the most important for MFT. It is not a mandatory
field, but if present, holds multiple timestamps, flags of the file and the name
of the file. It may be present multiple times.
Warning:
The information related to "allocated file size" and "real file size"
in this attribute is NOT reliable. Blame Microsoft. If you want a more
reliable information, use the ``Datastream`` objects in the api module.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (int): Parent reference
content[1] (int): Parent sequence
content[2] (:obj:`Timestamps`): Filename timestamps
content[3] (int): Allocated size of the file
content[4] (int): Logica/Real file size
content[5] (:obj:`FileInfoFlags`): File flags
content[6] (int): Reparse value
content[7] (int): Name length
content[8] (:obj:`NameType`): Name type
content[9] (str): Name
Attributes:
parent_ref (int): Parent refence
parent_seq (int): Parent sequence
timestamps (:obj:`Timestamps`): Filename timestamps
alloc_file_size (int): Allocated size of the file
real_file_size (int): Logica/Real file size
flags (:obj:`FileInfoFlags`): File flags
reparse_value (int): Reparse value
name_type (:obj:`NameType`): Name type
name (str): Name
'''
_filename_namespace = {"__len__" : _len_filename,
"create_from_binary" : classmethod(_from_binary_filename)
}
FileName = _create_attrcontent_class("FileName",
("parent_ref", "parent_seq", "timestamps", "alloc_file_size",
"real_file_size", "flags", "reparse_value", "name_type", "name"),
inheritance=(AttributeContentRepr,), data_structure="<7Q2I2B",
extra_functions=_filename_namespace, docstring=_docstring_filename)
#******************************************************************************
# DATA ATTRIBUTE
#******************************************************************************
def _from_binary_data(cls, binary_stream):
"""See base class."""
return cls(binary_stream.tobytes())
def _len_data(self):
return len(self.content)
_docstring_data = """Represents the content of a DATA attribute.
This is a placeholder class to the data attribute. By itself, it does
very little and holds almost no information. If the data is resident, holds the
content and the size.
Args:
binary_data (:obj:`bytes`): Data content
Attributes:
content (:obj:`bytes`): Data content
"""
_data_namespace = {"__len__" : _len_data,
"create_from_binary" : classmethod(_from_binary_data)
}
Data = _create_attrcontent_class("Data",
("content", ),
inheritance=(AttributeContentNoRepr,), data_structure=None,
extra_functions=_data_namespace, docstring=_docstring_data)
#******************************************************************************
# INDEX_ROOT ATTRIBUTE
#******************************************************************************
def _from_binary_idx_nh(cls, binary_stream):
"""See base class."""
''' Offset to start of index entry - 4
Offset to end of used portion of index entry - 4
Offset to end of the allocated index entry - 4
Flags - 4
'''
nw_obj = cls(cls._REPR.unpack(binary_stream[:cls._REPR.size]))
_MOD_LOGGER.debug("Attempted to unpack Index Node Header Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_idx_nh(self):
return IndexNodeHeader._REPR.size
_docstring_idx_nh = '''Represents the Index Node Header, that is always present in the INDEX_ROOT
and INDEX_ALLOCATION attribute.
The composition of an INDEX_ROOT and INDEX_ALLOCATION always start with
a header. This class represents this header.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (int): Start offset
content[1] (int): End offset
content[2] (int): Allocated size of the node
content[3] (int): Non-leaf node Flag (has subnodes)
Attributes:
start_offset (int): Start offset
end_offset (int): End offset
end_alloc_offset (int): Allocated size of the node
flags (int): Non-leaf node Flag (has subnodes)
'''
_idx_nh_namespace = {"__len__" : _len_idx_nh,
"create_from_binary" : classmethod(_from_binary_idx_nh)
}
IndexNodeHeader = _create_attrcontent_class("IndexNodeHeader",
("start_offset", "end_offset", "end_alloc_offset", "flags"),
inheritance=(AttributeContentRepr,), data_structure="<4I",
extra_functions=_idx_nh_namespace, docstring=_docstring_idx_nh)
#------------------------------------------------------------------------------
def _from_binary_idx_e(cls, binary_stream, content_type=None):
"""See base class."""
#TODO don't save this here and overload later?
#TODO confirm if this is really generic or is always a file reference
''' Undefined - 8
Length of entry - 2
Length of content - 2
Flags - 4
Content - variable
VCN of child node - 8 (exists only if flag is set, aligned to a 8 byte boundary)
'''
repr_size = cls._REPR.size
generic, entry_len, cont_len, flags = cls._REPR.unpack(binary_stream[:repr_size])
vcn_child_node = (None,)
#if content is known (filename), create a new object to represent the content
if content_type is AttrTypes.FILE_NAME and cont_len:
binary_content = FileName.create_from_binary(binary_stream[repr_size:repr_size+cont_len])
else:
binary_content = binary_stream[repr_size:repr_size+cont_len].tobytes()
#if there is a next entry, we need to pad it to a 8 byte boundary
if flags & IndexEntryFlags.CHILD_NODE_EXISTS:
temp_size = repr_size + cont_len
boundary_fix = (entry_len - temp_size) % 8
vcn_child_node = cls._REPR_VCN.unpack(binary_stream[temp_size+boundary_fix:temp_size+boundary_fix+8])
nw_obj = cls((generic, entry_len, cont_len, IndexEntryFlags(flags), binary_content, vcn_child_node))
_MOD_LOGGER.debug("Attempted to unpack Index Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_idx_e(self):
return self._entry_len
_docstring_idx_e = '''Represents an entry in the index.
An Index, from the MFT perspective is composed of multiple entries. This class
represents these entries. Normally entries contain a FILENAME attribute.
Note the entry can have other types of content, for these cases the class
saves the raw bytes
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (int): File reference?
content[1] (int): Length of the entry
content[2] (int): Length of the content
content[3] (:obj:`IndexEntryFlags`): Flags
content[4] (:obj:`FileName` or bytes): Content of the entry
content[5] (int): VCN child node
Attributes:
generic (int): File reference?
content_len (int): Length of the content
flags (:obj:`IndexEntryFlags`): Flags
content (:obj:`FileName` or bytes): Content of the entry
vcn_child_node (int): VCN child node
'''
_idx_e_namespace = {"__len__" : _len_idx_e,
"_REPR_VCN" : struct.Struct("<Q"),
"create_from_binary" : classmethod(_from_binary_idx_e)
}
IndexEntry = _create_attrcontent_class("IndexEntry",
("generic", "_entry_len", "content_len", "flags", "content", "vcn_child_node"),
inheritance=(AttributeContentRepr,), data_structure="<Q2HI",
extra_functions=_idx_e_namespace, docstring=_docstring_idx_e)
#------------------------------------------------------------------------------
def _from_binary_idx_root(cls, binary_stream):
"""See base class."""
''' Attribute type - 4
Collation rule - 4
Bytes per index record - 4
Clusters per index record - 1
Padding - 3
'''
attr_type, collation_rule, b_per_idx_r, c_per_idx_r = cls._REPR.unpack(binary_stream[:cls._REPR.size])
node_header = IndexNodeHeader.create_from_binary(binary_stream[cls._REPR.size:])
attr_type = AttrTypes(attr_type) if attr_type else None
index_entry_list = []
offset = cls._REPR.size + node_header.start_offset
#loads all index entries related to the root node
while True:
entry = IndexEntry.create_from_binary(binary_stream[offset:], attr_type)
index_entry_list.append(entry)
if entry.flags & IndexEntryFlags.LAST_ENTRY:
break
else:
offset += len(entry)
nw_obj = cls((attr_type, CollationRule(collation_rule), b_per_idx_r,
c_per_idx_r, node_header, index_entry_list ))
_MOD_LOGGER.debug("Attempted to unpack INDEX_ROOT Entry from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_idx_root(self):
return IndexRoot._REPR.size
_docstring_idx_root = '''Represents the content of a INDEX_ROOT attribute.
The structure of an index is a B+ tree, as such an root is always present.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`AttrTypes`): Attribute type
content[1] (:obj:`CollationRule`): Collation rule
content[2] (int): Index record size in bytes
content[3] (int): Index record size in clusters
node_header (IndexNodeHeader) - Node header related to this index root
idx_entry_list (list(IndexEntry))- List of index entries that belong to
this index root
Attributes:
attr_type (:obj:`AttrTypes`): Attribute type
collation_rule (:obj:`CollationRule`): Collation rule
index_len_in_bytes (int): Index record size in bytes
index_len_in_cluster (int): Index record size in clusters
node_header (IndexNodeHeader): Node header related to this index root
index_entry_list (list(IndexEntry)): List of index entries that belong to
'''
_idx_root_namespace = {"__len__" : _len_idx_root,
"create_from_binary" : classmethod(_from_binary_idx_root)
}
IndexRoot = _create_attrcontent_class("IndexRoot",
("attr_type", "collation_rule", "index_len_in_bytes", "index_len_in_cluster",
"node_header", "index_entry_list"),
inheritance=(AttributeContentRepr,), data_structure="<3IB3x",
extra_functions=_idx_root_namespace, docstring=_docstring_idx_root)
#******************************************************************************
# BITMAP ATTRIBUTE
#******************************************************************************
def _allocated_entries_bitmap(self):
'''Creates a generator that returns all allocated entries in the
bitmap.
Yields:
int: The bit index of the allocated entries.
'''
for entry_number in range(len(self._bitmap) * 8):
if self.entry_allocated(entry_number):
yield entry_number
def _entry_allocated_bitmap(self, entry_number):
"""Checks if a particular index is allocated.
Args:
entry_number (int): Index to verify
Returns:
bool: True if it is allocated, False otherwise.
"""
index, offset = divmod(entry_number, 8)
return bool(self._bitmap[index] & (1 << offset))
def _get_next_empty_bitmap(self):
"""Returns the next empty entry.
Returns:
int: The value of the empty entry
"""
#TODO probably not the best way, redo
for i, byte in enumerate(self._bitmap):
if byte != 255:
for offset in range(8):
if not byte & (1 << offset):
return (i * 8) + offset
def _from_binary_bitmap(cls, binary_stream):
"""See base class."""
return cls(binary_stream.tobytes())
def _len_bitmap(self):
'''Returns the size of the bitmap in bytes'''
return len(self._bitmap)
_docstring_bitmap = """Represents the content of a BITMAP attribute.
Correctly represents a bitmap as seen by the MFT. That basically means that
the underlying data structure is interpreted bit by bit, where if the bit
is 1, the entry is "occupied"/allocated.
Args:
binary_data (:obj:`bytes`): The bytes where the data is maintained
"""
_bitmap_namespace = {"__len__" : _len_bitmap,
"get_next_empty" : _get_next_empty_bitmap,
"entry_allocated" : _entry_allocated_bitmap,
"allocated_entries" : _allocated_entries_bitmap,
"create_from_binary" : classmethod(_from_binary_bitmap)
}
Bitmap = _create_attrcontent_class("Bitmap",
("_bitmap", ),
inheritance=(AttributeContentNoRepr,), data_structure=None,
extra_functions=_bitmap_namespace, docstring=_docstring_bitmap)
#******************************************************************************
# REPARSE_POINT ATTRIBUTE
#******************************************************************************
def _from_binary_junc_mnt(cls, binary_stream):
"""See base class."""
''' Offset to target name - 2 (relative to 16th byte)
Length of target name - 2
Offset to print name - 2 (relative to 16th byte)
Length of print name - 2
'''
offset_target_name, len_target_name, offset_print_name, len_print_name = \
cls._REPR.unpack(binary_stream[:cls._REPR.size])
offset = cls._REPR.size + offset_target_name
target_name = binary_stream[offset:offset+len_target_name].tobytes().decode("utf_16_le")
offset = cls._REPR.size + offset_print_name
print_name = binary_stream[offset:offset+len_print_name].tobytes().decode("utf_16_le")
nw_obj = cls((target_name, print_name))
_MOD_LOGGER.debug("Attempted to unpack Junction or MNT point from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_junc_mnt(self):
'''Returns the size of the bitmap in bytes'''
return len(self.target_name.encode("utf_16_le")) + len(self.print_nameencode("utf_16_le")) + 4 #size of offsets
_docstring_junc_mnt = """Represents the content of a REPARSE_POINT attribute when it is a junction
or mount point.
Args:
target_name (str): Target name
print_name (str): Print name
Attributes:
target_name (str): Target name
print_name (str): Print name
"""
_junc_mnt_namespace = {"__len__" : _len_junc_mnt,
"create_from_binary" : classmethod(_from_binary_junc_mnt)
}
JunctionOrMount = _create_attrcontent_class("JunctionOrMount",
("target_name", "print_name"),
inheritance=(AttributeContentRepr,), data_structure="<4H",
extra_functions=_junc_mnt_namespace, docstring=_docstring_junc_mnt)
#------------------------------------------------------------------------------
def _from_binary_syn_link(cls, binary_stream):
"""See base class."""
''' Offset to target name - 2 (relative to 16th byte)
Length of target name - 2
Offset to print name - 2 (relative to 16th byte)
Length of print name - 2
Symbolic link flags - 4
'''
offset_target_name, len_target_name, offset_print_name, \
len_print_name, syn_flags = \
cls._REPR.unpack(binary_stream[:cls._REPR.size])
offset = cls._REPR.size + offset_target_name
target_name = binary_stream[offset:offset+len_target_name].tobytes().decode("utf_16_le")
offset = cls._REPR.size + offset_print_name
print_name = binary_stream[offset:offset+len_print_name].tobytes().decode("utf_16_le")
nw_obj = cls((target_name, print_name, SymbolicLinkFlags(syn_flags)))
_MOD_LOGGER.debug("Attempted to unpack Symbolic Link from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_syn_link(self):
'''Returns the size of the bitmap in bytes'''
return len(self.target_name.encode("utf_16_le")) + len(self.print_nameencode("utf_16_le")) + 8 #size of offsets + flags
_docstring_syn_link = """Represents the content of a REPARSE_POINT attribute when it is a
symbolic link.
Args:
target_name (str): Target name
print_name (str): Print name
sym_flags (:obj:`SymbolicLinkFlags`): Symbolic link flags
Attributes:
target_name (str): Target name
print_name (str): Print name
sym_flags (:obj:`SymbolicLinkFlags`): Symbolic link flags
"""
_syn_link_namespace = {"__len__" : _len_syn_link,
"create_from_binary" : classmethod(_from_binary_syn_link)
}
SymbolicLink = _create_attrcontent_class("SymbolicLink",
("target_name", "print_name", "symbolic_flags"),
inheritance=(AttributeContentRepr,), data_structure="<4HI",
extra_functions=_syn_link_namespace, docstring=_docstring_junc_mnt)
#------------------------------------------------------------------------------
def _from_binary_reparse(cls, binary_stream):
"""See base class."""
''' Reparse type flags - 4
Reparse tag - 4 bits
Reserved - 12 bits
Reparse type - 2 bits
Reparse data length - 2
Padding - 2
'''
#content = cls._REPR.unpack(binary_view[:cls._REPR.size])
reparse_tag, data_len = cls._REPR.unpack(binary_stream[:cls._REPR.size])
#reparse_tag (type, flags) data_len, guid, data
reparse_type = ReparseType(reparse_tag & 0x0000FFFF)
reparse_flags = ReparseFlags((reparse_tag & 0xF0000000) >> 28)
guid = None #guid exists only in third party reparse points
if reparse_flags & ReparseFlags.IS_MICROSOFT:#a microsoft tag
if reparse_type is ReparseType.SYMLINK:
data = SymbolicLink.create_from_binary(binary_stream[cls._REPR.size:])
elif reparse_type is ReparseType.MOUNT_POINT:
data = JunctionOrMount.create_from_binary(binary_stream[cls._REPR.size:])
else:
data = binary_stream[cls._REPR.size:].tobytes()
else:
guid = UUID(bytes_le=binary_stream[cls._REPR.size:cls._REPR.size+16].tobytes())
data = binary_stream[cls._REPR.size+16:].tobytes()
nw_obj = cls((reparse_type, reparse_flags, data_len, guid, data))
_MOD_LOGGER.debug("Attempted to unpack REPARSE_POINT from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_reparse(self):
'''Returns the size of the bitmap in bytes'''
return ReparsePoint._REPR.size + self.data_len
_docstring_reparse = '''Represents the content of a REPARSE_POINT attribute.
The REPARSE_POINT attribute is a little more complicated. We can have
Microsoft predefinied content and third-party content. As expected,
this completely changes how the data is interpreted.
All Microsoft types of REPARSE_POINT can be gathered from the winnt.h file.
However, as of now, only two have been implemented:
* Symbolic Links - SYMLINK
* Mount or junction point - MOUNT_POINT
As for third-party data, this is always saved in raw (bytes).
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`ReparseType`): Reparse point type
content[1] (:obj:`ReparseFlags`): Reparse point flags
content[2] (int): Reparse data length
content[3] (:obj:`UUID`): GUID
content[4] (*variable*): Content of the reparse type
Attributes:
reparse_type (:obj:`ReparseType`): Reparse point type
reparse_flags (:obj:`ReparseFlags`): Reparse point flags
data_len (int): Reparse data length
guid (:obj:`UUID`): GUID. This exists only in the third-party
reparse points. If it is a Microsoft one, it defaults to ``None``
data (*variable*): Content of the reparse type
'''
_reparse_namespace = {"__len__" : _len_reparse,
"create_from_binary" : classmethod(_from_binary_reparse)
}
ReparsePoint = _create_attrcontent_class("ReparsePoint",
("reparse_type", "reparse_flags", "data_len", "guid", "data"),
inheritance=(AttributeContentRepr,), data_structure="<IH2x",
extra_functions=_reparse_namespace, docstring=_docstring_reparse)
#******************************************************************************
# EA_INFORMATION ATTRIBUTE
#******************************************************************************
def _from_binary_ea_info(cls, binary_stream):
"""See base class."""
''' Size of Extended Attribute entry - 2
Number of Extended Attributes which have NEED_EA set - 2
Size of extended attribute data - 4
'''
return cls(cls._REPR.unpack(binary_stream[:cls._REPR.size]))
def _len_ea_info(self):
return EaInformation._REPR.size
_docstring_ea_info = '''Represents the content of a EA_INFORMATION attribute.
The (HPFS) extended attribute information ($EA_INFORMATION) contains
information about the extended attribute ($EA).
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (int): Size of the EA attribute entry
content[1] (int): Number of EA attributes with NEED_EA set
content[2] (int): Size of the EA data
Attributes:
entry_len (int): Size of the EA attribute entry
ea_set_number (int): Number of EA attributes with NEED_EA set
ea_size (int): Size of the EA data
'''
_ea_info_namespace = {"__len__" : _len_ea_info,
"create_from_binary" : classmethod(_from_binary_ea_info)
}
EaInformation = _create_attrcontent_class("EaInformation",
("entry_len", "ea_set_number", "ea_size"),
inheritance=(AttributeContentRepr,), data_structure="<2HI",
extra_functions=_ea_info_namespace, docstring=_docstring_ea_info)
#******************************************************************************
# EA ATTRIBUTE
#******************************************************************************
def _from_binary_ea_entry(cls, binary_stream):
"""See base class."""
''' Offset to the next EA - 4
Flags - 1
Name length - 1
Value length - 2
'''
offset_next_ea, flags, name_len, value_len = cls._REPR.unpack(binary_stream[:cls._REPR.size])
name = binary_stream[cls._REPR.size:cls._REPR.size + name_len].tobytes().decode("ascii")
#it looks like the value is 8 byte aligned, do some math to compensate
#TODO confirm if this is true
value_alignment = (_ceil((cls._REPR.size + name_len) / 8) * 8)
value = binary_stream[value_alignment:value_alignment + value_len].tobytes()
nw_obj = cls((offset_next_ea, EAFlags(flags), name, value))
_MOD_LOGGER.debug("Attempted to unpack EA entry from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_ea_entry(self):
'''Returns the size of the entry'''
return EaEntry._REPR.size + len(self.name.encode("ascii")) + self.value_len
_docstring_ea_entry = '''Represents an entry for EA.
The EA attribute is composed by multiple EaEntries. Some information is not
completely understood for this. One of those is if it is necessary some
kind of aligment from the name to the value. The code considers a 8 byte
aligment and calculates that automatically.
Warning:
The interpretation of the binary data MAY be wrong. The community does
not have all the data.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (int): Offset to the next EA
content[1] (:obj:`EAFlags`): Changed timestamp
content[2] (str): Name of the EA attribute
content[3] (bytes): Value of the attribute
Attributes:
offset_next_ea (int): Offset to next extended attribute entry.
The offset is relative from the start of the extended attribute data.
flags (:obj:`EAFlags`): Changed timestamp
name (str): Name of the EA attribute
value (bytes): Value of the attribute
'''
_ea_entry_namespace = {"__len__" : _len_ea_entry,
"create_from_binary" : classmethod(_from_binary_ea_entry)
}
EaEntry = _create_attrcontent_class("EaEntry",
("offset_next_ea", "flags", "name", "value"),
inheritance=(AttributeContentRepr,), data_structure="<I2BH",
extra_functions=_ea_entry_namespace, docstring=_docstring_ea_entry)
#------------------------------------------------------------------------------
def _from_binary_ea(cls, binary_stream):
"""See base class."""
_ea_list = []
offset = 0
#_MOD_LOGGER.debug(f"Creating Ea object from binary stream {binary_stream.tobytes()}...")
_MOD_LOGGER.debug("Creating Ea object from binary '%s'...", binary_stream.tobytes())
while True:
entry = EaEntry.create_from_binary(binary_stream[offset:])
offset += entry.offset_next_ea
_ea_list.append(entry)
if offset >= len(binary_stream):
break
nw_obj = cls(_ea_list)
_MOD_LOGGER.debug("Attempted to unpack EA from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_ea(self):
'''Return the number of entries in the attribute list'''
return len(self._attr_list)
def _iter_ea(self):
return iter(self._attr_list)
def _gitem_ea(self, index):
return _getitem(self._attr_list, index)
_docstring_ea = '''Represents the content of a EA attribute.
Is a list of EaEntry. It behaves as list in python, you can iterate
over it, access by member, etc.
Important:
Using the ``len()`` method on the objects of this class returns the number
of elements in the list.
Args:
content (list(:obj:`EaEntry`)): List of AttributeListEntry
'''
_ea_namespace = {"__len__" : _len_ea,
"__iter__" : _iter_ea,
"__getitem__" : _gitem_ea,
"create_from_binary" : classmethod(_from_binary_ea)
}
Ea = _create_attrcontent_class("Ea",
("_ea_list",),
inheritance=(AttributeContentNoRepr,), data_structure=None,
extra_functions=_ea_namespace, docstring=_docstring_ea)
#******************************************************************************
# SECURITY_DESCRIPTOR ATTRIBUTE
#******************************************************************************
def _from_binary_secd_header(cls, binary_stream):
"""See base class."""
''' Revision number - 1
Padding - 1
Control flags - 2
Reference to the owner SID - 4 (offset relative to the header)
Reference to the group SID - 4 (offset relative to the header)
Reference to the DACL - 4 (offset relative to the header)
Reference to the SACL - 4 (offset relative to the header)
'''
nw_obj = cls(cls._REPR.unpack(binary_stream))
nw_obj.control_flags = SecurityDescriptorFlags(nw_obj.control_flags)
_MOD_LOGGER.debug("Attempted to unpack Security Descriptor Header from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_secd_header(self):
'''Returns the logical size of the file'''
return SecurityDescriptorHeader._REPR.size
_docstring_secd_header = '''Represents the header of the SECURITY_DESCRIPTOR attribute.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (int): Revision number
content[1] (:obj:`SecurityDescriptorFlags`): Control flags
content[2] (int): Offset to the owner SID
content[3] (int): Offset to the group SID
content[4] (int): Offset to the DACL
content[5] (int): Offset to the SACL
Attributes:
revision_number (int): Revision number
control_flags (:obj:`SecurityDescriptorFlags`): Control flags
owner_sid_offset (int): Offset to the owner SID
group_sid_offset (int): Offset to the group SID
dacl_offset (int): Offset to the DACL
sacl_offset (int): Offset to the SACL
'''
_secd_header_namespace = {"__len__" : _len_secd_header,
"create_from_binary" : classmethod(_from_binary_secd_header)
}
SecurityDescriptorHeader = _create_attrcontent_class("SecurityDescriptorHeader",
("revision_number", "control_flags", "owner_sid_offset",
"group_sid_offset", "dacl_offset", "sacl_offset"),
inheritance=(AttributeContentRepr,), data_structure="<B1xH4I",
extra_functions=_secd_header_namespace, docstring=_docstring_secd_header)
#------------------------------------------------------------------------------
def _from_binary_ace_header(cls, binary_stream):
"""See base class."""
''' ACE Type - 1
ACE Control flags - 1
Size - 2 (includes header size)
'''
type, control_flags, size = cls._REPR.unpack(binary_stream)
nw_obj = cls((ACEType(type), ACEControlFlags(control_flags), size))
_MOD_LOGGER.debug("Attempted to unpack ACE Header from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_ace_header(self):
'''Returns the logical size of the file'''
return ACEHeader._REPR.size
_docstring_ace_header = '''Represents header of an ACE object.
As part of the an ACL, all ACE (Access Control Entry) have a header that
is represented by this class.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`ACEType`): Type of ACE entry
content[1] (:obj:`ACEControlFlags`): ACE control flags
content[2] (int): size of the ACE entry, including the header
Attributes:
type (:obj:`ACEType`): Type of ACE entry
control_flags (:obj:`ACEControlFlags`): ACE control flags
ace_size (int): size of the ACE entry, including the header
'''
_ace_header_namespace = {"__len__" : _len_ace_header,
"create_from_binary" : classmethod(_from_binary_ace_header)
}
ACEHeader = _create_attrcontent_class("ACEHeader",
("type", "control_flags", "ace_size"),
inheritance=(AttributeContentRepr,), data_structure="<2BH",
extra_functions=_ace_header_namespace, docstring=_docstring_ace_header)
#-------------------------------------------------------------------------------
def _from_binary_sid(cls, binary_stream):
"""See base class."""
''' Revision number - 1
Number of sub authorities - 1
Authority - 6
Array of 32 bits with sub authorities - 4 * number of sub authorities
'''
rev_number, sub_auth_len, auth = cls._REPR.unpack(binary_stream[:cls._REPR.size])
if sub_auth_len:
sub_auth_repr = struct.Struct("<" + str(sub_auth_len) + "I")
sub_auth = sub_auth_repr.unpack(binary_stream[cls._REPR.size:cls._REPR.size + sub_auth_repr.size])
else:
sub_auth = ()
nw_obj = cls((rev_number, int.from_bytes(auth, byteorder="big"), sub_auth))
_MOD_LOGGER.debug("Attempted to unpack SID from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_sid(self):
'''Returns the size of the SID in bytes'''
return SID._REPR.size + (4 * sub_auth_len)
def _str_sid(self):
'Return a nicely formatted representation string'
sub_auths = "-".join([str(sub) for sub in self.sub_authorities])
return f'S-{self.revision_number}-{self.authority}-{sub_auths}'
_docstring_sid = '''Represents the content of a SID object to be used by the SECURITY_DESCRIPTOR
attribute.
This represents a Microsoft SID, normally seen as::
S-1-5-21-7623811015-3361044348-030300820-1013
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (int): Revision number
content[1] (int): Number of sub authorities
content[2] (int): Authority
sub_authorities (list(int)): List of sub authorities
Attributes:
revision_number (int): Revision number
authority (int): Authority
sub_authorities (list(int)): List of sub authorities
'''
_sid_namespace = {"__len__" : _len_sid,
"create_from_binary" : classmethod(_from_binary_sid),
"__str__" : _str_sid
}
SID = _create_attrcontent_class("SID",
("revision_number", "authority", "sub_authorities"),
inheritance=(AttributeContentRepr,), data_structure="<2B6s",
extra_functions=_sid_namespace, docstring=_docstring_sid)
#-------------------------------------------------------------------------------
def _from_binary_b_ace(cls, binary_stream):
"""See base class."""
''' Access rights flags - 4
SID - n
'''
access_flags = cls._REPR.unpack(binary_stream[:cls._REPR.size])[0]
sid = SID.create_from_binary(binary_stream[cls._REPR.size:])
nw_obj = cls((ACEAccessFlags(access_flags), sid))
return nw_obj
def _len_b_ace(self):
'''Returns the logical size of the file'''
return BasicACE._REPR.size
_docstring_b_ace = '''Represents one the types of ACE entries. The Basic type.
The Basic ACE is a very simple entry that contains the access flags for a
particular SID.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`ACEAccessFlags`): Access rights flags
content[1] (:obj:`SID`): SID
self.access_rights_flags, self.sid
Attributes:
access_rights_flags (:obj:`ACEAccessFlags`): Access rights flags
sid (:obj:`SID`): SID
'''
_b_ace_namespace = {"__len__" : _len_b_ace,
"create_from_binary" : classmethod(_from_binary_b_ace)
}
BasicACE = _create_attrcontent_class("BasicACE",
("access_rights_flags", "SID"),
inheritance=(AttributeContentRepr,), data_structure="<I",
extra_functions=_b_ace_namespace, docstring=_docstring_b_ace)
#-------------------------------------------------------------------------------
def _from_binary_obj_ace(cls, binary_stream):
"""See base class."""
''' Access rights flags - 4
Flags - 4
Object type class identifier (GUID) - 16
Inherited object type class identifier (GUID) - 16
SID - n
'''
#content = cls._REPR.unpack(binary_stream[:cls._REPR.size])
access_flags, flags, object_guid, inher_guid = cls._REPR.unpack(binary_stream[:cls._REPR.size])
sid = SID.create_from_binary(binary_stream[cls._REPR.size:])
nw_obj = cls((ACEAccessFlags(access_flags),flags, UUID(bytes_le=object_guid), UUID(bytes_le=inher_guid), sid))
return nw_obj
def _len_obj_ace(self):
'''Returns the logical size of the file'''
return ObjectACE._REPR.size + len(self.sid)
_docstring_obj_ace = '''Represents one the types of ACE entries. The Object type.
This is a more complex type of ACE that contains the access flags, a group
of undocumented flags, the object id and its inherited object id and the SID
where it is applicable.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`ACEAccessFlags`): Access rights flags
content[1] (int): Flags
content[2] (:obj:`UUID`): Object type class identifier (GUID)
content[3] (:obj:`UUID`): Inherited object type class identifier (GUID)
content[4] (:obj:`SID`): SID
Attributes:
access_rights_flags (:obj:`ACEAccessFlags`): Access rights flags
flags (int): Flags
object_guid (:obj:`UUID`): Object type class identifier (GUID)
inherited_guid (:obj:`UUID`): Inherited object type class identifier (GUID)
sid (:obj:`SID`): SID
'''
_obj_ace_namespace = {"__len__" : _len_b_ace,
"create_from_binary" : classmethod(_from_binary_b_ace)
}
ObjectACE = _create_attrcontent_class("ObjectACE",
("access_rights_flags", "flags", "object_guid", "inherited_guid", "sid"),
inheritance=(AttributeContentRepr,), data_structure="<2I16s16s",
extra_functions=_obj_ace_namespace, docstring=_docstring_obj_ace)
#-------------------------------------------------------------------------------
class CompoundACE():
'''Nobody knows this structure'''
pass
#-------------------------------------------------------------------------------
def _from_binary_ace(cls, binary_stream):
nw_obj = cls()
header = ACEHeader.create_from_binary(binary_stream[:cls._HEADER_SIZE])
nw_obj.header = header
#TODO create a _dispatcher and replace this slow ass comparison
if "OBJECT" in header.type.name:
nw_obj.object_ace = ObjectACE.create_from_binary(binary_stream[cls._HEADER_SIZE:])
elif "COMPOUND" in header.type.name:
pass
else:
nw_obj.basic_ace = BasicACE.create_from_binary(binary_stream[cls._HEADER_SIZE:])
return nw_obj
def _len_ace(self):
'''Returns the logical size of the file'''
return self.header.ace_size
_docstring_ace = '''Represents an ACE object.
This class aggregates all the information about an ACE (Access Control Entry).
Its header, if it is an Object or Basic ACE.
Important:
The class should never have both basic ace and object ace attributes.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`ACEHeader`): Created timestamp
content[1] (:obj:`BasicACE`): Changed timestamp
content[2] (:obj:`ObjectACE`): MFT change timestamp
Attributes:
header (:obj:`ACEHeader`): Created timestamp
basic_ace (:obj:`BasicACE`): Changed timestamp
object_ace (:obj:`ObjectACE`): MFT change timestamp
'''
_ace_namespace = {"__len__" : _len_ace,
"create_from_binary" : classmethod(_from_binary_ace),
"_HEADER_SIZE" : ACEHeader.get_representation_size(),
}
ACE = _create_attrcontent_class("ACE",
("header", "basic_ace", "object_ace"),
inheritance=(AttributeContentNoRepr,), data_structure=None,
extra_functions=_ace_namespace, docstring=_docstring_ace)
#-------------------------------------------------------------------------------
def _from_binary_acl(cls, binary_stream):
"""See base class."""
''' Revision number - 1
Padding - 1
Size - 2
ACE Count - 2
Padding - 2
'''
rev_number, size, ace_len = cls._REPR.unpack(binary_stream[:cls._REPR.size])
#content = cls._REPR.unpack(binary_stream[:cls._REPR.size])
aces = []
offset = cls._REPR.size
for i in range(ace_len):
ace = ACE.create_from_binary(binary_stream[offset:])
offset += len(ace)
aces.append(ace)
_MOD_LOGGER.debug("Next ACE offset = %d", offset)
nw_obj = cls((rev_number, size, aces))
_MOD_LOGGER.debug("Attempted to unpack SID from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj
def _len_acl(self):
'''Returns the logical size of the file'''
return self.size
_docstring_acl = '''Represents an ACL for the SECURITY_DESCRIPTOR.
Represents a Access Control List (ACL), which contains multiple ACE entries.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`datetime`): Revision number
content[1] (int): Size
content[2] (int): Number of ACE entries
aces (list(:obj:`ACE`)): MFT change timestamp
Attributes:
revision_number[0] (:obj:`datetime`): Revision number
size (int): Size
aces (list(:obj:`ACE`)): MFT change timestamp
'''
_acl_namespace = {"__len__" : _len_acl,
"create_from_binary" : classmethod(_from_binary_acl)
}
ACL = _create_attrcontent_class("ACL",
("revision_number", "size", "aces"),
inheritance=(AttributeContentRepr,), data_structure="<B1x2H2x",
extra_functions=_acl_namespace, docstring=_docstring_acl)
#-------------------------------------------------------------------------------
def _from_binary_sec_desc(cls, binary_stream):
"""See base class."""
header = SecurityDescriptorHeader.create_from_binary(binary_stream[:SecurityDescriptorHeader.get_representation_size()])
owner_sid = SID.create_from_binary(binary_stream[header.owner_sid_offset:])
group_sid = SID.create_from_binary(binary_stream[header.group_sid_offset:])
dacl = None
sacl = None
if header.sacl_offset:
sacl = ACL.create_from_binary(binary_stream[header.sacl_offset:])
if header.dacl_offset:
dacl = ACL.create_from_binary(binary_stream[header.dacl_offset:])
nw_obj = cls((header, owner_sid, group_sid, sacl, dacl))
return nw_obj
def _len_sec_desc(self):
'''Returns the logical size of the file'''
return len(self.header) + len(self.owner_sid) + len(self.group_sid) + len(self.sacl) + len(self.dacl)
_docstring_sec_desc = '''Represents the content of a SECURITY_DESCRIPTOR attribute.
The Security Descriptor in Windows has a header, an owner SID and group SID, plus a
discretionary access control list (DACL) and a system access control list (SACL).
Both DACL and SACL are ACLs with the same format.
Note:
This class receives an Iterable as argument, the "Parameters/Args" section
represents what must be inside the Iterable. The Iterable MUST preserve
order or things might go boom.
Args:
content[0] (:obj:`SecurityDescriptorHeader`): Created timestamp
content[1] (:obj:`SID`): Changed timestamp
content[2] (:obj:`SID`): MFT change timestamp
content[3] (:obj:`ACL`): Accessed timestamp
content[4] (:obj:`ACL`): Accessed timestamp
Attributes:
header (:obj:`SecurityDescriptorHeader`): Created timestamp
owner_sid (:obj:`SID`): Changed timestamp
group_sid (:obj:`SID`): MFT change timestamp
sacl (:obj:`ACL`): Accessed timestamp
dacl (:obj:`ACL`): Accessed timestamp
'''
_sec_desc_namespace = {"__len__" : _len_sec_desc,
"create_from_binary" : classmethod(_from_binary_sec_desc)
}
SecurityDescriptor = _create_attrcontent_class("SecurityDescriptor",
("header", "owner_sid", "group_sid", "sacl", "dacl"),
inheritance=(AttributeContentNoRepr,),
extra_functions=_sec_desc_namespace, docstring=_docstring_sec_desc)
#******************************************************************************
# LOGGED_TOOL_STREAM ATTRIBUTE
#******************************************************************************
class LoggedToolStream():
#TODO implement the know cases of this attribute
def __init__(self, bin_view):
'''Initialize the class. Expects the binary_view that represents the
content. Size information is derived from the content.
'''
self.content = bin_view.tobytes()
def __repr__(self):
'Return a nicely formatted representation string'
return self.__class__.__name__ + '(content={})'.format(
self.content)
``` |
{
"source": "jldeen/pytryfi",
"score": 2
} |
#### File: pytryfi/pytryfi/__init__.py
```python
import logging
import requests
from pytryfi.const import (API_HOST_URL_BASE, API_LOGIN, API_GRAPHQL, PYTRYFI_VERSION)
from pytryfi.fiUser import FiUser
from pytryfi.fiPet import FiPet
from pytryfi.fiBase import FiBase
from pytryfi.common import query
from pytryfi.const import SENTRY_URL
import sentry_sdk
from sentry_sdk import capture_message, capture_exception
LOGGER = logging.getLogger(__name__)
class PyTryFi(object):
"""base object for TryFi"""
def __init__(self, username=None, password=None):
try:
sentry = sentry_sdk.init(
SENTRY_URL,
release=PYTRYFI_VERSION,
)
self._api_host = API_HOST_URL_BASE
self._session = requests.Session()
self._user_agent = "pyTryFi"
self._username = username
self._password = password
self.login()
self._currentUser = FiUser(self._userId)
self._currentUser.setUserDetails(self._session)
petListJSON = query.getPetList(self._session)
self._pets = []
for pet in petListJSON:
p = FiPet(pet['id'])
p.setPetDetailsJSON(pet)
#get the current location and set it
pLocJSON = query.getCurrentPetLocation(self._session,p._petId)
p.setCurrentLocation(pLocJSON)
#get the daily, weekly and monthly stats and set
pStatsJSON = query.getCurrentPetStats(self._session,p._petId)
p.setStats(pStatsJSON['dailyStat'],pStatsJSON['weeklyStat'],pStatsJSON['monthlyStat'])
LOGGER.debug(f"Adding Pet: {p._name} with Device: {p._device._deviceId}")
self._pets.append(p)
self._bases = []
baseListJSON = query.getBaseList(self._session)
for base in baseListJSON:
b = FiBase(base['baseId'])
b.setBaseDetailsJSON(base)
LOGGER.debug(f"Adding Base: {b._name} Online: {b._online}")
self._bases.append(b)
except Exception as e:
capture_exception(e)
def __str__(self):
instString = f"Username: {self.username}"
userString = f"{self.currentUser}"
baseString = ""
petString = ""
for b in self.bases:
baseString = baseString + f"{b}"
for p in self.pets:
petString = petString + f"{p}"
return f"TryFi Instance - {instString}\n Pets in Home:\n {petString}\n Bases In Home:\n {baseString}"
#refresh pet details for all pets
def updatePets(self):
try:
petListJSON = query.getPetList(self._session)
updatedPets = []
for pet in petListJSON:
p = FiPet(pet['id'])
p.setPetDetailsJSON(pet)
#get the current location and set it
pLocJSON = query.getCurrentPetLocation(self._session,p._petId)
p.setCurrentLocation(pLocJSON)
#get the daily, weekly and monthly stats and set
pStatsJSON = query.getCurrentPetStats(self._session,p._petId)
p.setStats(pStatsJSON['dailyStat'],pStatsJSON['weeklyStat'],pStatsJSON['monthlyStat'])
LOGGER.debug(f"Adding Pet: {p._name} with Device: {p._device._deviceId}")
updatedPets.append(p)
self._pets = updatedPets
except Exception as e:
capture_exception(e)
def updatePetObject(self, petObj):
try:
petId = petObj.petId
count = 0
for p in self.pets:
if p.petId == petId:
self._pets.pop(count)
self._pets.append(petObj)
LOGGER.debug(f"Updating Existing Pet: {petId}")
break
count = count + 1
except Exception as e:
capture_exception(e)
# return the pet object based on petId
def getPet(self, petId):
try:
for p in self.pets:
if petId == p.petId:
return p
LOGGER.error(f"Cannot find Pet: {petId}")
return None
except Exception as e:
capture_exception(e)
#refresh base details
def updateBases(self):
try:
updatedBases = []
baseListJSON = query.getBaseList(self._session)
for base in baseListJSON:
b = FiBase(base['baseId'])
b.setBaseDetailsJSON(base)
updatedBases.append(b)
self._bases = updatedBases
except Exception as e:
capture_exception(e)
# return the pet object based on petId
def getBase(self, baseId):
try:
for b in self.bases:
if baseId == b.baseId:
return b
LOGGER.error(f"Cannot find Base: {baseId}")
return None
except Exception as e:
capture_exception(e)
def update(self):
self.updateBases()
self.updatePets()
@property
def currentUser(self):
return self._currentUser
@property
def pets(self):
return self._pets
@property
def bases(self):
return self._bases
@property
def username(self):
return self._username
@property
def session(self):
return self._session
@property
def cookies(self):
return self._cookies
@property
def userID(self):
return self._userID
@property
def session(self):
return self._session
# login to the api and get a session
def login(self):
error = None
url = API_HOST_URL_BASE + API_LOGIN
params={
'email' : self._username,
'password' : <PASSWORD>,
}
LOGGER.debug(f"Logging into TryFi")
try:
response = self._session.post(url, data=params)
response.raise_for_status()
#validate if the response contains error or not
try:
error = response.json()['error']
except Exception as e:
#capture_exception(e)
error = None
#if error set or response is non-200
if error or not response.ok:
errorMsg = error['message']
LOGGER.error(f"Cannot login, response: ({response.status_code}): {errorMsg} ")
capture_exception(errorMsg)
raise Exception("TryFiLoginError")
#storing cookies but don't need them. Handled by session mgmt
self._cookies = response.cookies
#store unique userId from login for future use
self._userId = response.json()['userId']
self._sessionId = response.json()['sessionId']
LOGGER.debug(f"Successfully logged in. UserId: {self._userId}")
except requests.RequestException as e:
LOGGER.error(f"Cannot login, error: ({e})")
capture_exception(e)
raise requests.RequestException(e)
except Exception as e:
capture_exception(e)
``` |
{
"source": "JLDevOps/Branch-Bomb",
"score": 3
} |
#### File: Scripts/python/branch_bomb.py
```python
import threading
import argparse
import subprocess as sp
import random
import string
import time
def git_clone(clone_link):
sp.check_output(['git', 'clone', clone_link])
print("[Branch Bombing] Git Clone Repo: [" + clone_link + "]")
def create_branch(branch_name):
sp.check_output(['git', 'checkout', '-b', branch_name])
print("[Branch Bombing] Created Branch [" + branch_name + "]")
def push_local_branch_to_remote(branch_name):
# This may require authentication via ssh key or username/password
sp.check_output(['git', 'push', '-u', 'origin', branch_name])
print("[Branch Bombing] Branch [" + branch_name + "] : Pushed to Remote")
def generate_unique_branch_name(num_chars):
return (''.join(
random.SystemRandom().choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in
range(num_chars)))
def execute_branch_bombing(clone_link, name_size, thread_index, reset_branch_name):
if reset_branch_name is not None:
unique_branch_name = generate_unique_branch_name(name_size)
git_clone(clone_link)
create_branch(unique_branch_name)
push_local_branch_to_remote(unique_branch_name)
reset_to_branch(reset_branch_name)
delete_branch(unique_branch_name)
print("[Branch Bombing] Thread " + str(
thread_index) + " : Created Branch [" + unique_branch_name + "] & Pushed to Origin")
else:
unique_branch_name = generate_unique_branch_name(name_size)
create_branch(unique_branch_name)
push_local_branch_to_remote(unique_branch_name)
reset_to_branch(reset_branch_name)
delete_branch(unique_branch_name)
print("[Branch Bombing] Thread " + str(
thread_index) + " : Created Branch [" + unique_branch_name + "] & Pushed to Origin")
def reset_to_branch(branch_name):
sp.check_output(['git', 'checkout', branch_name])
print("[Branch Bombing] Reset to Branch [" + branch_name + "]")
def delete_branch(branch_name):
sp.check_output(['git', 'branch', '-d', branch_name])
print("[Branch Bombing] Deleted Branch [" + branch_name + "]")
def main():
parser = argparse.ArgumentParser(description='Run branch bombing on a specific Git repo.')
parser.add_argument('-t', '--Threads', help='Number of threads', required=False)
parser.add_argument('-ib', '--Initial_Branch', help='Initial branch used to start creating branches from.',
required=False)
parser.add_argument('-rb', '--Reset_Branch', help='Reset branch after the bombing.', required=False)
parser.add_argument('-n', '--Size_Of_Branch_Name', help='Number of characters for each unique branch generated.',
required=False)
parser.add_argument('-c', '--Git_Clone_Repo', help='The SSH link for Git cloning the repo.', required=False)
args = vars(parser.parse_args())
num_of_threads = args['Threads']
initial_branch = args['Initial_Branch']
reset_branch_name = args['Reset_Branch']
number_chars_for_branch_name = args['Size_Of_Branch_Name']
git_clone_link = args['Git_Clone_Repo']
print("[Branch Bombing] Starting Branch Bombing")
if initial_branch is not None:
sp.Popen(["git", "checkout " + initial_branch], stdin=sp.PIPE, stdout=sp.PIPE,
shell=True)
print("[Branch Bombing] Git Checked Out to " + initial_branch)
if num_of_threads is None:
num_of_threads = 10
if reset_branch_name is None:
reset_branch_name = 'master'
if number_chars_for_branch_name is None:
number_chars_for_branch_name = 10
for x in range(num_of_threads):
print("[Branch Bombing] Thread " + str(x) + " : Executing the Branch Bombing")
thread_start = threading.Thread(target=execute_branch_bombing,
args=(git_clone_link, number_chars_for_branch_name, x, reset_branch_name))
thread_start.start()
time.sleep(.9)
if __name__ == "__main__":
main()
``` |
{
"source": "JLDevOps/ISBN-Book-OCR",
"score": 3
} |
#### File: ISBN-Book-OCR/src/scan.py
```python
try:
from PIL import Image
except ImportError:
import Image
import re
import pytesseract
import cv2
from pytesseract import Output
from image_processing import *
from os import listdir
from os.path import isfile, join, isdir
from matplotlib import pyplot as plt
from scipy import ndimage
import numpy as np
import argparse
import os
import platform
import pathlib
import requests
import csv
import time
from isbn import *
# Global variables
custom_config = r''
online = False
isbn_found = 0
file_found = 0
# PLT variables
plt.figure(figsize=(16,12))
# Converting images to a different image format
def convert_image_format(file, folder=None, dpi=(600,600), extension='.tiff'):
base = os.path.basename(file)
split_text = os.path.splitext(base)
filename = split_text[0] + extension
im = Image.open(file)
if folder:
folder_path = str(pathlib.Path(folder).absolute())
if platform == 'Windows':
filename = folder_path + '\\' + filename
else:
filename = folder_path + '/' + filename
im.save(filename, dpi=dpi)
else:
im.save(filename, dpi=dpi)
return os.path.abspath(filename)
def create_local_temp_folder(folder=None):
# Create folder to store temp files
# Store temp files in them
if not folder:
folder = "temp"
if not os.path.exists(folder):
os.makedirs(folder)
def create_csv(output_filename='output.csv', data_list=None):
# Create a csv with the ISBN and Image OCR results
header = ['Image Name', 'Rotation', 'ISBN Number', 'Raw Data', 'Found Online', 'URL']
with open(output_filename, 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(header)
csv_file.close()
def scan_image(file=None, csv=None):
global custom_config
global online
global isbn_found
isbn_check = False
row_list = []
isbn_value = None
found_angle = None
raw_data = None
print('-----------------------------------------')
print('TESSERACT OUTPUT --> ' + file)
print('-----------------------------------------')
base = os.path.basename(file)
# Checks if the image is tiff, if not convert to tiff temp file
# .tiff files provide better results for tessseract
if os.path.splitext(base)[1] != '.tiff':
create_local_temp_folder()
file = convert_image_format(file=file, folder='temp')
image = cv2.imread(file)
image = cv2.resize(image, None, fx=1.5, fy=1.5, interpolation=cv2.INTER_CUBIC)
angle_list = [90, 180, 270, 360]
try:
url = None
# Cleaning up image before rotation
gray = get_grayscale(image)
# thresh_image = adaptiveThreshold(gray)
# noise_removed = remove_noise(thresh_image)
for index, angle in enumerate(angle_list):
print('angle is: ' + str(angle))
rotate_image = rotate(image=gray, rotate_angle=angle)
raw_data = pytesseract.image_to_string(rotate_image, config=custom_config)
isbn_value = find_isbn(raw_data)
if isbn_value:
# If you want to confirm that the isbn is found online
print(isbn_value)
if online:
isbn_check, url = check_isbn(isbn_value)
if(isbn_check):
isbn_found+=1
found_angle = angle
break
row_list = [str(file), str(found_angle if found_angle else None), str(isbn_value), str(raw_data), str(isbn_check), str(url)]
print(row_list)
return row_list
except Exception as e:
print("image: " + file + " Error: " + str(e))
def main():
global file_found
global isbn_found
global custom_config
global online
global scan_list
is_file = False
parser = argparse.ArgumentParser(description='Book ISBN image scanner')
parser.add_argument('-p', '--path', help='File or Folder Path', required=True)
parser.add_argument('-c', '--config', help='Tesseract config commands (ex. --oem 3)', required=False)
parser.add_argument('-o', '--online', help='Allow the scanner to check isbns online', action='store_true', required=False)
parser.add_argument('-x', '--csv', help='Exports a csv file from the results', required=False)
args = vars(parser.parse_args())
path = args['path']
custom_config = args['config'] if args['config'] else custom_config
csv_name = args['csv'] if args['csv'] else None
if isdir(path):
is_file = False
elif isfile(path):
is_file = True
else:
raise Exception('Unable to determine file or directory')
if args['online']:
online = True
start_time = time.perf_counter()
if csv_name:
create_csv(output_filename=csv_name)
with open(csv_name, 'a', newline='') as csv_file:
csv_writer = csv.writer(csv_file)
if is_file:
csv_writer.writerow(scan_image(path))
csv_file.flush()
file_found+=1
else:
for files in listdir(path):
csv_writer.writerow(scan_image(path + files))
csv_file.flush()
file_found+=1
csv_file.close()
end_time = time.perf_counter()
print("Total files: " + str(file_found))
print("Total ISBN to Files: " + str(isbn_found) + "/" + str(file_found))
print(f"Total time: {end_time - start_time:0.4f} seconds")
if __name__ == "__main__":
main()
``` |
{
"source": "JLDevOps/Jackbox-Online",
"score": 2
} |
#### File: management/commands/clear_models.py
```python
from django.core.management.base import BaseCommand
from jackboxonline.models import JackboxRoom
class Command(BaseCommand):
def handle(self, *args, **options):
JackboxRoom.objects.all().delete()
```
#### File: Jackbox-Online/jackboxonline/views.py
```python
import threading
from rest_framework import permissions
from rest_framework import viewsets, filters, generics
from rest_framework.decorators import action
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.renderers import JSONRenderer
from .jackbox_room_finder import set_room_data
from .models import JackboxRoom
from .serializers import JackboxRoomSerializer
class StandardResultsSetPagination(PageNumberPagination):
page_size = 100
page_size_query_param = 'page_size'
max_page_size = 100
class JackboxRoomView(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
pagination_class = StandardResultsSetPagination
serializer_class = JackboxRoomSerializer
filter_backends = (filters.OrderingFilter,)
ordering_fields = ('room_code', 'online', 'last_updated', 'game_type', 'join_able')
ordering = ('room_code',)
def get(self, request, *args, **kwargs):
response = super(JackboxRoomView, self).get(request, **kwargs)
# response['Content-Range'] = '100'
# response['X-Total-Count'] = '100'
# response['Access-Control-Expose-Headers'] = 'X-Total-Count, Content-Range'
return response
def get_queryset(self):
queryset = JackboxRoom.objects.all()
room = self.request.query_params.get('room_code', None)
if room is not None:
queryset = queryset.filter(room_code=room)
online_status = self.request.query_params.get('online', None)
if online_status is not None:
queryset = queryset.filter(online=online_status)
join_able_status = self.request.query_params.get('join_able', None)
if join_able_status is not None:
queryset = queryset.filter(join_able=join_able_status)
locked_status = self.request.query_params.get('locked', None)
if locked_status is not None:
queryset = queryset.filter(locked=locked_status)
return queryset
class Initiate(APIView):
renderer_classes = (JSONRenderer, )
permission_classes = (permissions.IsAdminUser,)
# Multithreading GET for view URL
def get(self, request, format=None):
t = threading.Thread(target=set_room_data(), args=(), kwargs={})
t.setDaemon(True)
t.daemon = True
t.start()
t._stop()
return Response("Thread Done")
# def get(self, request, format=None):
# t = threading.Thread(target=set_room_data(), args=(), kwargs={})
# t.setDaemon(True)
# t.daemon = True
# t.start()
# t._stop()
# return Response("Thread Done")
# class JackboxGame(APIView):
#
# def get(self, request, format=None):
# room = self.request.query_params.get('room_code', None)
``` |
{
"source": "JLDevOps/Markov-Morse",
"score": 3
} |
#### File: Markov-Morse/Script/morse.py
```python
import re
import subprocess
import sys
import time
import pygame
from markov_chain import *
CODE = {'A': '.-', 'B': '-...', 'C': '-.-.',
'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..',
'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---',
'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..',
'0': '-----', '1': '.----', '2': '..---',
'3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..',
'9': '----.'
}
ONE_UNIT = 0.5
THREE_UNITS = 3 * ONE_UNIT
SEVEN_UNITS = 7 * ONE_UNIT
dir = os.path.dirname(__file__)
oog_PATH = 'morse_sound_files/'
PATH = os.path.join(dir, oog_PATH)
def verify(string):
keys = CODE.keys()
for char in string:
if char.upper() not in keys and char != ' ':
sys.exit('Error the character ' + char + ' cannot be translated to Morse Code')
def generate_morse_code(sentence_list):
pygame.mixer.init()
for i in range(len(sentence_list)):
sentence = str(sentence_list[i])
new_string = re.sub('[!@#$?={}|^*&()_:[,\].\-;<>\"/`~\\\'+%]', '', sentence)
verify(new_string)
morse_char = []
if new_string is not None:
# print new_string ## Testing purposes
for char in new_string:
if char == ' ':
print ' ' * 7, ## Testing purposes
morse_char.append(str(" " * 4))
time.sleep(SEVEN_UNITS)
else:
print CODE[char.upper()]
code = CODE[char.upper()]
pygame.mixer.music.load(PATH + char.upper() + '_morse_code.ogg')
pygame.mixer.music.play()
morse_char.append(''.join(code))
time.sleep(THREE_UNITS)
morse_code = " ".join([str(i) for i in morse_char])
# print "Morse Code is : " + morse_code ##Testing purposes
return morse_code
def generate_markov_morse(filename, file_format, num_of_sentences):
list = generate_markov_text(filename, file_format, num_of_sentences)
generate_morse_code(list)
def input_prompt():
subprocess.call('clear', shell=True)
print 'Welcome to Alphabet to Morse Code Translator v.01\n'
msg = raw_input('Enter Message: ')
msg = re.sub('[!@#$?={}|^*&()_:[,\].\-;<>\"/`~\\\'+%]', '', msg)
verify(msg)
pygame.init()
for char in msg:
if char == ' ':
print ' ' * 7,
time.sleep(SEVEN_UNITS)
else:
print CODE[char.upper()],
pygame.mixer.music.load(PATH + char.upper() + '_morse_code.ogg')
pygame.mixer.music.play()
time.sleep(THREE_UNITS)
## For testing purposes
if __name__ == "__main__":
input_prompt()
# generate_markov_morse("sherlock", "txt", 1)
``` |
{
"source": "jldiaz/terminus",
"score": 3
} |
#### File: terminus/ImportantScripts/restoreInfluxDump.py
```python
from subprocess import call
import os
def get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def restoreInfluxDBDatabases(dir_names, hostname):
for dirname in dir_names:
newdbname_k8s = dirname+"_k8s"
newdbname_TestK6 = dirname+"_TestK6"
print ("dirName: "+ dirname)
call(["influxd", "restore", "-portable", "-host", hostname, "-db", "k8s", "-newdb", newdbname_k8s, dirname])
call(["influxd", "restore", "-portable", "-host", hostname, "-db", "TestK6", "-newdb", newdbname_TestK6, dirname])
dir_names = get_immediate_subdirectories("./")
restoreInfluxDBDatabases(dir_names, "192.168.127.12:8088")
```
#### File: regression/src/main.py
```python
from http.server import BaseHTTPRequestHandler, HTTPServer
import socketserver
import pickle
import urllib.request
import json
from pprint import pprint
from pandas.io.json import json_normalize
import pandas as pd
from sklearn import preprocessing
from sklearn.preprocessing import PolynomialFeatures
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Ridge
from math import sqrt
import os
import errno
from pymongo import MongoClient
import urllib.parse as urlparse
from influxdb import InfluxDBClient
from pymongo import MongoClient
import pandas as pd
from pandas.io.json import json_normalize
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import TheilSenRegressor
from sklearn.datasets import make_regression
class Terminus(BaseHTTPRequestHandler):
def getAllNodeNames(self,client):
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY=nodename;")
nodeNames_temp = list(queryResult.get_points())
dfnodeNames = pd.DataFrame(nodeNames_temp)
allNodeNames = dfnodeNames[:]["value"]
return allNodeNames
def getNamespaceNames(self,client,node):
nsQuery = client.query("SHOW TAG VALUES FROM uptime WITH KEY=namespace_name WHERE nodename = '"+node+"';")
nsQuery_temp = list(nsQuery.get_points())
dfnsNames = pd.DataFrame(nsQuery_temp)
allnsNames = dfnsNames[:]["value"]
return allnsNames
def getAllPodNames(self,client,node,ns_name):
queryResult = client.query("SHOW TAG VALUES FROM uptime WITH KEY = pod_name WHERE namespace_name = '"+ns_name+"' AND nodename = '"+node+"';")
podNames_temp = list(queryResult.get_points())
dfpodNames = pd.DataFrame(podNames_temp)
if dfpodNames.empty:
return dfpodNames
else:
allpodNames = dfpodNames[:]["value"]
return allpodNames
def getCPUUtilizationNode(self,client, node):
queryResult = client.query('SELECT * FROM "cpu/node_utilization" where nodename = \''+node+'\' AND type=\'node\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/node_utilization'])
return dfcpuUtilization
def getCPUUtilizationPod(self,client, node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "cpu/usage_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/usage_rate'])
return dfcpuUtilization
def getCPUUtilizationPodContainer(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "cpu/usage_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod_container\';')
dfcpuUtilization = pd.DataFrame(queryResult['cpu/usage_rate'])
return dfcpuUtilization
def prepareCpuUtilization(self,client,node,ns_name, pod_name):
cpuUtilization = self.getCPUUtilizationNode(client,node)
podCpuUtilization = self.getCPUUtilizationPod(client,node,ns_name, pod_name)
containercpuUtilization = self.getCPUUtilizationPodContainer(client,node,ns_name, pod_name)
plt.plot(cpuUtilization.index, cpuUtilization['value'] *1000, 'r', label="node") # plotting t, a separately
plt.plot(podCpuUtilization.index, podCpuUtilization['value'], 'b', label="pod") # plotting t, b separately
plt.plot(containercpuUtilization.index, containercpuUtilization['value'], 'g', label="container") # plotting t, c separately
plt.legend(loc='upper left')
plt.show()
def getMemoryUtilizationNode(self,client,node):
queryResult = client.query('SELECT * FROM "memory/node_utilization" where nodename = \''+node+'\' AND type=\'node\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/node_utilization'])
return dfmemUtilization
def getMemoryUtilizationPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "memory/usage" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/usage'])
return dfmemUtilization
def getMemoryUtilizationPodContainer(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "memory/usage" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod_container\';')
dfmemUtilization = pd.DataFrame(queryResult['memory/usage'])
return dfmemUtilization
def prepareMemoryUtilization(self,client,node,ns_name, pod_name):
memoryUtilization = self.getMemoryUtilizationNode(client,node)
podMemoryUtilization = self.getMemoryUtilizationPod(client,node,ns_name, pod_name)
containerMemoryUtilization = self.getMemoryUtilizationPodContainer(client,node,ns_name, pod_name)
plt.plot(memoryUtilization.index, memoryUtilization['value'], 'r', label="node") # plotting t, a separately
plt.plot(podMemoryUtilization.index, podMemoryUtilization['value'], 'b', label="pod") # plotting t, b separately
plt.plot(containerMemoryUtilization.index, containerMemoryUtilization['value'], 'g', label="container") # plotting t, c separately
plt.legend(loc='upper left')
plt.show()
def getNetworkTxRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_rate'])
return dfmemUtilization
def getNetworkTxPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx'])
return dfmemUtilization
def getNetworkTxErrorsPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_errors" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_errors'])
return dfmemUtilization
def getNetworkTxErrorsRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/tx_errors_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/tx_errors_rate'])
return dfmemUtilization
def prepareNetworkTxRateUtilization(self,client,node,ns_name, pod_name):
podNetworTxRate = self.getNetworkTxRatePod(client,node,ns_name, pod_name)
podNetworTx = self.getNetworkTxPod(client,node,ns_name, pod_name)
podNetworkError = self.getNetworkTxErrorsPod(client,node,ns_name, pod_name)
podNetworkErrorRate = self.getNetworkTxErrorsRatePod(client,node,ns_name, pod_name)
plt.plot(podNetworTxRate.index, podNetworTxRate['value'], 'b') # plotting t, b separately
#plt.plot(podNetworTx.index, podNetworTx['value'], 'g') # plotting t, b separately
#plt.plot(podNetworkError.index, podNetworkError['value'], 'y') # plotting t, b separately
plt.plot(podNetworkErrorRate.index, podNetworkErrorRate['value'], 'r') # plotting t, b separately
plt.show()
def getNetworkRxRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_rate'])
return dfmemUtilization
def getNetworkRxPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx'])
return dfmemUtilization
def getNetworkRxErrorsPod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_errors" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_errors'])
return dfmemUtilization
def getNetworkRxErrorsRatePod(self,client,node,ns_name, pod_name):
queryResult = client.query('SELECT * FROM "network/rx_errors_rate" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\';')
dfmemUtilization = pd.DataFrame(queryResult['network/rx_errors_rate'])
return dfmemUtilization
def prepareNetworkRxRateUtilization(self,client,node,ns_name, pod_name):
podNetworRxRate = self.getNetworkRxRatePod(client,node,ns_name, pod_name)
podNetworRx = self.getNetworkRxPod(client,node,ns_name, pod_name)
podNetworkError = self.getNetworkRxErrorsPod(client,node,ns_name, pod_name)
podNetworkErrorRate = self.getNetworkRxErrorsRatePod(client,node,ns_name, pod_name)
plt.plot(podNetworRxRate.index, podNetworRxRate['value'], 'b') # plotting t, b separately
#plt.plot(podNetworRx.index, podNetworRx['value'], 'g') # plotting t, b separately
#plt.plot(podNetworkError.index, podNetworkError['value'], 'y') # plotting t, b separately
plt.plot(podNetworkErrorRate.index, podNetworkErrorRate['value'], 'r') # plotting t, b separately
plt.show()
def getRelevantNodeName(self,client,ns_name):
allNodeNames = self.getAllNodeNames(client)
#nsNames = getNamespaceNames(allNodeNames[0])
relevantNodes = []
for node in allNodeNames:
allPodNamesNode = self.getAllPodNames(client,node,'default')
if(not allPodNamesNode.empty):
relevantNodes.append(node)
return relevantNodes
def getNodeResourceUtilizationDf(self,client, nodeName):
Result_node_CPU = client.query("SELECT value from \"cpu/node_utilization\" where nodename = '"+nodeName+"' AND type = 'node' ")
Result_node_MEM = client.query("SELECT value from \"memory/node_utilization\" where nodename = '"+nodeName+"' AND type = 'node' ")
Result_node_CPU_Cores = client.query("SELECT mean(\"value\") FROM \"cpu/node_capacity\" where nodename = '"+nodeName+
"' AND type = 'node' GROUP BY time(1m)")
Result_node_mem_node = client.query("SELECT mean(\"value\")FROM \"memory/node_capacity\" where nodename = '"+
nodeName+"' AND type = 'node' GROUP BY time(1m)")
cpu_points = pd.DataFrame(Result_node_CPU.get_points())
cpu_points['time'] = pd.to_datetime(cpu_points['time'])
cpu_points = cpu_points.set_index('time')
cpu_points.columns = ['node_cpu_util']
mem_points = pd.DataFrame(Result_node_MEM.get_points())
mem_points['time'] = pd.to_datetime(mem_points['time'])
mem_points = mem_points.set_index('time')
mem_points.columns = ['node_mem_util']
cores_points = pd.DataFrame(Result_node_CPU_Cores.get_points())
cores_points['time'] = pd.to_datetime(cores_points['time'])
cores_points = cores_points.set_index('time')
cores_points.columns = ['node_cores']
mem_node_points = pd.DataFrame(Result_node_mem_node.get_points())
mem_node_points['time'] = pd.to_datetime(mem_node_points['time'])
mem_node_points = mem_node_points.set_index('time')
mem_node_points.columns = ['node_mem']
df_node =pd.concat([cpu_points, mem_points,cores_points,mem_node_points], axis=1)
return df_node
def getPodResourceUtilizationDf(self,client, node, ns_name, pod_name):
Result_Pod_CPU_usage = client.query('SELECT value FROM "cpu/usage_rate" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\';')
Result_Pod_MEM_usage = client.query('SELECT value from \"memory/usage\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\';')
Result_Pod_CPU_limit = client.query('SELECT mean(\"value\") FROM "cpu/limit" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
Result_Pod_MEM_limit = client.query('SELECT mean(\"value\") from \"memory/limit\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\' group by time(1m);')
Result_Pod_CPU_requests = client.query('SELECT mean(\"value\") FROM "cpu/request" where nodename = \''+node+'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+'\' AND type=\'pod\' group by time(1m);')
Result_Pod_MEM_requests = client.query('SELECT mean(\"value\") from \"memory/request\" where nodename = \''+node+
'\' AND pod_name = \''+pod_name+'\' AND namespace_name = \''+ns_name+
'\' AND type=\'pod\' group by time(1m);')
cpu_points_usage = pd.DataFrame(Result_Pod_CPU_usage.get_points())
cpu_points_usage['time'] = pd.to_datetime(cpu_points_usage['time'])
cpu_points_usage = cpu_points_usage.set_index('time')
cpu_points_usage.columns = ['pod_cpu_usage']
mem_points_usage = pd.DataFrame(Result_Pod_MEM_usage.get_points())
mem_points_usage['time'] = pd.to_datetime(mem_points_usage['time'])
mem_points_usage = mem_points_usage.set_index('time')
mem_points_usage.columns = ['pod_mem_usage']
cpu_points_limits = pd.DataFrame(Result_Pod_CPU_limit.get_points())
cpu_points_limits['time'] = pd.to_datetime(cpu_points_limits['time'])
cpu_points_limits = cpu_points_limits.set_index('time')
cpu_points_limits.columns = ['pod_cpu_limit']
mem_points_limits = pd.DataFrame(Result_Pod_MEM_limit.get_points())
mem_points_limits['time'] = pd.to_datetime(mem_points_limits['time'])
mem_points_limits = mem_points_limits.set_index('time')
mem_points_limits.columns = ['pod_mem_limit']
cpu_points_request = pd.DataFrame(Result_Pod_CPU_requests.get_points())
cpu_points_request['time'] = pd.to_datetime(cpu_points_request['time'])
cpu_points_request = cpu_points_request.set_index('time')
cpu_points_request.columns = ['pod_cpu_request']
mem_points_request = pd.DataFrame(Result_Pod_MEM_requests.get_points())
mem_points_request['time'] = pd.to_datetime(mem_points_request['time'])
mem_points_request = mem_points_request.set_index('time')
mem_points_request.columns = ['pod_mem_request']
df_pod =pd.concat([cpu_points_usage, mem_points_usage,cpu_points_limits,mem_points_limits,cpu_points_request,mem_points_request ], axis=1)
return df_pod
def getRequestsDf(self,clientK6):
queryResult = clientK6.query('SELECT sum("value") FROM "vus" group by time(1m);')
vus = pd.DataFrame(queryResult['vus'])
vus.columns = ['vus','time']
vus = vus.set_index('time')
queryResultReqs = clientK6.query('SELECT sum("value") FROM "http_reqs" group by time(1m);')
reqs = pd.DataFrame(queryResultReqs['http_reqs'])
reqs.columns = ['requests','time']
reqs = reqs.set_index('time')
queryResultReqsDuration95 = clientK6.query('SELECT percentile("value", 95) FROM "http_req_duration" group by time(1m) ;')
reqs_duration95 = pd.DataFrame(queryResultReqsDuration95['http_req_duration'])
reqs_duration95.columns = [ 'requests_duration_percentile_95','time']
reqs_duration95 = reqs_duration95.set_index('time')
queryResultReqsDuration90 = clientK6.query('SELECT percentile("value", 90) FROM "http_req_duration" group by time(1m) ;')
reqs_duration90 = pd.DataFrame(queryResultReqsDuration90['http_req_duration'])
reqs_duration90.columns = ['requests_duration_percentile_90','time']
reqs_duration90 = reqs_duration90.set_index('time')
queryResultMaxDuration = clientK6.query('SELECT max("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_max = pd.DataFrame(queryResultMaxDuration['http_req_duration'])
reqs_duration_max.columns = ['requests_duration_max','time']
reqs_duration_max = reqs_duration_max.set_index('time')
queryResultMinDuration = clientK6.query('SELECT min("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_min = pd.DataFrame(queryResultMinDuration['http_req_duration'])
reqs_duration_min.columns = ['requests_duration_min','time']
reqs_duration_min = reqs_duration_min.set_index('time')
queryResultMeanDuration = clientK6.query('SELECT mean("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_mean = pd.DataFrame(queryResultMeanDuration['http_req_duration'])
reqs_duration_mean.columns = ['requests_duration_mean','time']
reqs_duration_mean = reqs_duration_mean.set_index('time')
queryResultMedianDuration = clientK6.query('SELECT median("value") FROM "http_req_duration" group by time(1m);')
reqs_duration_median = pd.DataFrame(queryResultMedianDuration['http_req_duration'])
reqs_duration_median.columns = ['requests_duration_median','time']
reqs_duration_median = reqs_duration_median.set_index('time')
finalDF = pd.merge(vus, reqs, left_index=True, right_index=True)
finalDF = pd.merge(finalDF, reqs_duration95, left_index=True, right_index=True)
finalDF = pd.merge(finalDF, reqs_duration90, left_index=True, right_index=True)
finalDF = pd.merge(finalDF,reqs_duration_max, left_index=True, right_index=True)
finalDF = pd.merge(finalDF,reqs_duration_min, left_index=True, right_index=True)
finalDF = pd.merge(finalDF,reqs_duration_mean, left_index=True, right_index=True)
finalDF = pd.merge(finalDF,reqs_duration_median, left_index=True, right_index=True)
finalDF.index = pd.to_datetime(finalDF.index)
return finalDF
def getPodsNodesRequestsDf(self,appNames, client, clientK6):
default_ns_name = "default"
df_pods_node = []
relevantNodeNames = self.getRelevantNodeName(client,default_ns_name)
for relevantNodeName in relevantNodeNames:
if relevantNodeName is not None:
podNames = self.getAllPodNames(client,relevantNodeName, default_ns_name)
df_node = self.getNodeResourceUtilizationDf(client,relevantNodeName)
for podName in podNames:
if appNames[0] in podName:
df_pod = self.getPodResourceUtilizationDf(client,relevantNodeName, default_ns_name, podName)
finalDF = pd.merge(df_node,df_pod, left_index=True, right_index=True)
requestsDF = self.getRequestsDf(clientK6)
finalDF = pd.merge(finalDF,requestsDF, left_index=True, right_index=True)
if(finalDF['pod_cpu_limit'].values[0]==0):
finalDF['pod_cpu_usage'] = finalDF['pod_cpu_usage']/(finalDF['node_cores'])
finalDF['pod_cpu_limit'] = finalDF['node_cores']/1000
finalDF['pod_cpu_request'] = finalDF['node_cores']/1000
else:
finalDF['pod_cpu_usage'] = finalDF['pod_cpu_usage']/(finalDF['pod_cpu_limit'])
finalDF['pod_cpu_limit'] = finalDF['pod_cpu_limit']/1000
finalDF['pod_cpu_request'] = finalDF['pod_cpu_request']/1000
if(finalDF['pod_mem_limit'].values[0]==0):
finalDF['pod_mem_usage'] = finalDF['pod_mem_usage']/(finalDF['node_mem'])
finalDF['pod_mem_limit'] = finalDF['node_mem']/(1073741824)
finalDF['pod_mem_request'] = finalDF['node_mem']/(1073741824)
else:
finalDF['pod_mem_usage'] = finalDF['pod_mem_usage']/(finalDF['pod_mem_limit'])
finalDF['pod_mem_limit'] = finalDF['pod_mem_limit']/(1073741824)
finalDF['pod_mem_request'] = finalDF['pod_mem_request']/(1073741824)
finalDF['node_cores'] = finalDF['node_cores']/1000
finalDF['node_mem'] = finalDF['node_mem']/(1073741824)
finalDF = finalDF.fillna(0)
finalDF = finalDF[(finalDF.T != 0).any()]
df_pods_node.append(finalDF)
return df_pods_node
def getAndCombineAllDbs(self, host, port, username, password,appNames, folderNames):
allFinalDFs = []
print("FolderNames len = ", len(folderNames))
for folderName in folderNames:
client = InfluxDBClient(host, port,username , password, folderName+'_k8s')
clientK6 = InfluxDBClient(host, port, username, password, folderName+'_TestK6')
df_pods_node = self.getPodsNodesRequestsDf(appNames, client, clientK6)
print(folderName)
if(len(df_pods_node)>0):
finalDF = pd.DataFrame()
finalDF['pod_util_cpu_sum'] = 0
finalDF['pod_util_mem_sum'] = 0
first = 1
for i in range(len(df_pods_node)):
df_pods_node[i] = df_pods_node[i].reset_index(drop=True)
if(first==1):
finalDF['pod_util_cpu_sum'] = df_pods_node[i]['pod_cpu_usage']
finalDF['pod_util_mem_sum'] = df_pods_node[i]['pod_mem_usage']
first=0
else:
finalDF['pod_util_cpu_sum'] = finalDF['pod_util_cpu_sum'] + df_pods_node[i]['pod_cpu_usage']
finalDF['pod_util_mem_sum'] = finalDF['pod_util_mem_sum'] + df_pods_node[i]['pod_mem_usage']
finalDF['num_pods'] = int(len(df_pods_node))
finalDF['pod_util_cpu_avg'] = finalDF['pod_util_cpu_sum']/finalDF['num_pods']
finalDF['pod_util_mem_avg'] = finalDF['pod_util_mem_sum']/finalDF['num_pods']
finalDF = pd.concat([finalDF, df_pods_node[0][['node_cores', 'node_mem','node_cpu_util','node_mem_util', 'pod_cpu_limit', 'pod_cpu_request','pod_mem_limit',
'pod_mem_request','vus','requests','requests_duration_percentile_95',
'requests_duration_percentile_90','requests_duration_max', 'requests_duration_min',
'requests_duration_mean', 'requests_duration_median'
]]], axis=1)
allFinalDFs.append(finalDF)
df = pd.DataFrame()
print("All Dfs len = ", len(allFinalDFs))
for idx in range(len(allFinalDFs)):
df = df.append(allFinalDFs[idx])
final_df = df[['requests','requests_duration_mean','num_pods','pod_cpu_limit','node_cores', 'node_mem','pod_mem_limit','pod_util_cpu_avg','pod_util_mem_avg',
]]
final_df['pod_util_cpu_avg'] = final_df['pod_util_cpu_avg']*final_df['pod_cpu_limit']
final_df['pod_util_mem_avg'] = final_df['pod_util_mem_avg']*final_df['pod_mem_limit']
final_df = final_df.sort_values(['requests'])
final_df = final_df[(final_df[['pod_util_cpu_avg','pod_util_mem_avg','requests_duration_mean']] != 0).all(axis=1)]
final_df = final_df[np.isfinite(final_df['requests'])]
final_df = final_df[np.isfinite(final_df['requests_duration_mean'])]
final_df = final_df[np.isfinite(final_df['pod_util_cpu_avg'])]
final_df = final_df[np.isfinite(final_df['pod_util_mem_avg'])]
final_df = final_df[final_df.requests_duration_mean < 2500]
final_df = final_df.reset_index(drop=True)
return final_df
def train_and_return_model(self,host, port, username, password,appType, appNames, folderNames ):
df = self.getAndCombineAllDbs(host, port, username, password,appNames, folderNames)
df['total_cpu_util'] = df['pod_util_cpu_avg']*df['num_pods']
df['total_mem_util'] = df['pod_util_mem_avg']*df['num_pods']
df_X = df[['total_cpu_util']].values
df_Y = df[['requests']].values
X_train, X_test, y_train, y_test = train_test_split(df_X, df_Y, test_size=0.33, random_state=42)
X, y = make_regression(n_samples=df_X.shape[0], n_features=1, noise=4.0, random_state=0)
regr = TheilSenRegressor(random_state=0).fit(X_train, y_train)
regr.score(X, y)
y_pred = regr.predict(X_test)
rms = sqrt(mean_squared_error(y_test, y_pred))
print('RMs score: %.2f' % rms)
return regr, rms
def train_and_return_model_replicas(self,host, port, username, password,appType, appNames, folderNames ):
df = self.getAndCombineAllDbs(host, port, username, password,appNames, folderNames)
df['total_cpu_util'] = df['pod_util_cpu_avg']*df['num_pods']
df['total_mem_util'] = df['pod_util_mem_avg']*df['num_pods']
df_X = df[['requests']].values
df_Y = df[['total_cpu_util']].values
X_train, X_test, y_train, y_test = train_test_split(df_X, df_Y, test_size=0.33, random_state=42)
X, y = make_regression(n_samples=df_X.shape[0], n_features=1, noise=4.0, random_state=0)
regr = TheilSenRegressor(random_state=0).fit(X_train, y_train)
regr.score(X, y)
y_pred = regr.predict(X_test)
rms = sqrt(mean_squared_error(y_test, y_pred))
print('RMs score: %.2f' % rms)
return regr, rms
def train_and_return_model_smart(self,host, port, username, password,appType, appNames, folderNames ):
df = self.getAndCombineAllDbs(host, port, username, password,appNames, folderNames)
df['total_cpu_util'] = df['pod_util_cpu_avg']*df['num_pods']
df['total_mem_util'] = df['pod_util_mem_avg']*df['num_pods']
df = df.head(46)
df_X = df[['total_cpu_util']].values
df_Y = df[['requests']].values
if(df.shape[0] < 15):
testSize = 0
X_train, X_test, y_train, y_test = train_test_split(df_X, df_Y, test_size=testSize, random_state=42)
X_test = X_train
y_test = y_train
else:
testSize = 0.33
X_train, X_test, y_train, y_test = train_test_split(df_X, df_Y, test_size=testSize, random_state=42)
lin_reg = linear_model.LinearRegression()
regr = Pipeline([
("lin_reg", lin_reg),
])
regr.fit(X_train, y_train)
# Make predictions using the testing set
y_pred = regr.predict(X_test)
# The coefficients
# The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(y_test, y_pred))
#print ('Test score %.2f', regr.score(X_test, y_test) )
print("Train Mean squared error: %.2f"
% mean_squared_error(y_train, regr.predict(X_train)))
rms = sqrt(mean_squared_error(y_test, y_pred))
print('RMs score: %.2f' % rms)
return regr, rms
def do_training(self, host, port, username, password, instanceFamily, appNames, appType, folderNames, filename):
model, rms = self.train_and_return_model(host, port, username, password,appType, appNames,folderNames)
print(filename)
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
pickle.dump(model, open(filename, 'wb'))
return rms
def do_training_replicas(self, host, port, username, password, instanceFamily, appNames, appType, folderNames, filename):
model, rms = self.train_and_return_model_replicas(host, port, username, password,appType, appNames,folderNames)
print(filename)
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
pickle.dump(model, open(filename, 'wb'))
return rms
def do_training_smart(self, host, port, username, password, instanceFamily, appNames, appType, folderNames, filename):
model, rms = self.train_and_return_model_smart(host, port, username, password,appType, appNames,folderNames)
print(filename)
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
pickle.dump(model, open(filename, 'wb'))
return rms
def get_folder_names(self, appName, colName, dbName):
mongoclient = MongoClient(mongo_host, 27017, username=mongo_username, password=mongo_password)
db = mongoclient[dbName]
col = db[colName]
datapoints = list(col.find({"servicename": appName}))
dfMongo = json_normalize(datapoints)
return dfMongo.foldername
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self._set_headers()
output=''
#http://localhost:9002/pretrain?appname=primeapp&apptype=compute&instancefamily=t2&colname=ALL_BRUTE_FORCE_CONDUCTED_TEST_NAMES&dbname=TERMINUS
if '/pretrain' in self.path:
parsed = urlparse.urlparse(self.path)
appName = urlparse.parse_qs(parsed.query)['appname'][0]
appType = urlparse.parse_qs(parsed.query)['apptype'][0]
instanceFamily = urlparse.parse_qs(parsed.query)['instancefamily'][0]
colName = urlparse.parse_qs(parsed.query)['colname'][0]
dbName = urlparse.parse_qs(parsed.query)['dbname'][0]
mainServiceName = urlparse.parse_qs(parsed.query)['mainServiceName'][0]
folderNames = self.get_folder_names(appName, colName, dbName)
filename = "/app/training/preTrained/"+appType+"/"+appName+"/"+mainServiceName+"/"+instanceFamily+"/"+"trained.sav"
appNames = [mainServiceName]
rms = self.do_training(host, port, username, password, instanceFamily, appNames, appType, folderNames, filename)
output=""+str(rms)
self.wfile.write(output.encode())
#http://localhost:9002/trainedreplicas?appname=primeapp&apptype=compute&instancefamily=t2&colname=ALL_BRUTE_FORCE_CONDUCTED_TEST_NAMES&dbname=TERMINUS
elif '/trainedreplicas' in self.path:
parsed = urlparse.urlparse(self.path)
appName = urlparse.parse_qs(parsed.query)['appname'][0]
appType = urlparse.parse_qs(parsed.query)['apptype'][0]
instanceFamily = urlparse.parse_qs(parsed.query)['instancefamily'][0]
colName = urlparse.parse_qs(parsed.query)['colname'][0]
dbName = urlparse.parse_qs(parsed.query)['dbname'][0]
mainServiceName = urlparse.parse_qs(parsed.query)['mainServiceName'][0]
folderNames = self.get_folder_names(appName, colName, dbName)
filename = "/app/training/preTrainedReplicas/"+appType+"/"+appName+"/"+mainServiceName+"/"+instanceFamily+"/"+"trained.sav"
appNames = [mainServiceName]
rms = self.do_training_replicas(host, port, username, password, instanceFamily, appNames, appType, folderNames, filename)
output=""+str(rms)
self.wfile.write(output.encode())
# http://localhost:9002/getPredictionPreTrained?appname=primeapp&apptype=compute&replicas=2&numcoresutil=0.1
# &numcoreslimit=0.1&nummemlimit=0.1&instancefamily=t2&requestduration=1000
elif '/getPredictionPreTrained' in self.path:
parsed = urlparse.urlparse(self.path)
appName = urlparse.parse_qs(parsed.query)['appname'][0]
appType = urlparse.parse_qs(parsed.query)['apptype'][0]
replicas = urlparse.parse_qs(parsed.query)['replicas'][0]
numcoresUtil = urlparse.parse_qs(parsed.query)['numcoresutil'][0]
numcoresLimit = urlparse.parse_qs(parsed.query)['numcoreslimit'][0]
nummemLimit = urlparse.parse_qs(parsed.query)['nummemlimit'][0]
requestDuration = urlparse.parse_qs(parsed.query)['requestduration'][0]
instanceFamily = urlparse.parse_qs(parsed.query)['instancefamily'][0]
mainServiceName = urlparse.parse_qs(parsed.query)['mainServiceName'][0]
filename = "/app/training/preTrained/"+appType+"/"+appName+"/"+mainServiceName+"/"+instanceFamily+"/"+"trained.sav"
print(filename)
loaded_model = pickle.load(open(filename, 'rb'))
val = [[float(numcoresUtil)*float(replicas)]]
predict = loaded_model.predict(val)
output=""+str(predict[0])
print (predict)
print (output)
self.wfile.write(output.encode())
# http://localhost:9002/getPredictionPreTrained?appname=primeapp&apptype=compute&msc=1000&numcoresutil=0.1
# &numcoreslimit=0.1&nummemlimit=0.1&instancefamily=t2&requestduration=1000
elif '/getPredictionReplicas' in self.path:
parsed = urlparse.urlparse(self.path)
appName = urlparse.parse_qs(parsed.query)['appname'][0]
appType = urlparse.parse_qs(parsed.query)['apptype'][0]
msc = urlparse.parse_qs(parsed.query)['msc'][0]
numcoresUtil = urlparse.parse_qs(parsed.query)['numcoresutil'][0]
numcoresLimit = urlparse.parse_qs(parsed.query)['numcoreslimit'][0]
nummemLimit = urlparse.parse_qs(parsed.query)['nummemlimit'][0]
requestDuration = urlparse.parse_qs(parsed.query)['requestduration'][0]
instanceFamily = urlparse.parse_qs(parsed.query)['instancefamily'][0]
mainServiceName = urlparse.parse_qs(parsed.query)['mainServiceName'][0]
filename = "/app/training/preTrainedReplicas/"+appType+"/"+appName+"/"+mainServiceName+"/"+instanceFamily+"/"+"trained.sav"
print(filename)
loaded_model = pickle.load(open(filename, 'rb'))
val = [[float(msc)]]
predict = loaded_model.predict(val)
output=""+str(predict[0]/float(numcoresLimit))
print (predict)
print (output)
self.wfile.write(output.encode())
#http://localhost:9002/smartTestTrain?appname=primeapp&apptype=compute&instancefamily=t2&containerName=s1t1rc1nc1t2xlargecomputeprimeappt2nanob8j
elif '/smartTestTrain' in self.path:
parsed = urlparse.urlparse(self.path)
appName = urlparse.parse_qs(parsed.query)['appname'][0]
appType = urlparse.parse_qs(parsed.query)['apptype'][0]
folderName = urlparse.parse_qs(parsed.query)['containerName'][0]
instanceFamily = urlparse.parse_qs(parsed.query)['instancefamily'][0]
mainServiceName = urlparse.parse_qs(parsed.query)['mainServiceName'][0]
appNames = [mainServiceName]
folderNames = [folderName]
filename = "/app/training/smartTest/"+appType+"/"+appName+"/"+mainServiceName+"/"+instanceFamily+"/"+folderName+".sav"
rms = self.do_training_smart(host, port, username, password, instanceFamily, appNames, appType, folderNames, filename)
output=""+str(rms)
self.wfile.write(output.encode())
#http://localhost:9002/smartTestGetResult?appname=primeapp&apptype=compute&numcoresutil=0.1
# &nummemutil=0.1&instancefamily=t2&requestduration=1000&containerName=s1t1rc1nc1t2xlargecomputeprimeappt2nanob8j
elif '/smartTestGetResult' in self.path:
parsed = urlparse.urlparse(self.path)
folderName = urlparse.parse_qs(parsed.query)['containerName'][0]
appName = urlparse.parse_qs(parsed.query)['appname'][0]
appType = urlparse.parse_qs(parsed.query)['apptype'][0]
mainServiceName = urlparse.parse_qs(parsed.query)['mainServiceName'][0]
numcoresUtil = urlparse.parse_qs(parsed.query)['numcoresutil'][0]
requestDuration = urlparse.parse_qs(parsed.query)['requestduration'][0]
instanceFamily = urlparse.parse_qs(parsed.query)['instancefamily'][0]
nummemUtil = urlparse.parse_qs(parsed.query)['nummemutil'][0]
filename = "/app/training/smartTest/"+appType+"/"+appName+"/"+mainServiceName+"/"+instanceFamily+"/"+folderName+".sav"
loaded_model = pickle.load(open(filename, 'rb'))
val = [[]]
if(appType=="compute"):
val = [[float(numcoresUtil)]]
else:
val = [[float(numcoresUtil)]]
predict = loaded_model.predict(val)
output=""+str(predict[0][0])
print (predict)
print (output)
self.wfile.write(output.encode())
# /getActualTRN?appname=primeapp&containerName=s1t1rc1nc1t2xlargecomputeprimeappt2nanob8j&requestduration=1000
elif '/getActualTRN' in self.path:
parsed = urlparse.urlparse(self.path)
appName = urlparse.parse_qs(parsed.query)['appname'][0]
folderName = urlparse.parse_qs(parsed.query)['containerName'][0]
requestDuration = urlparse.parse_qs(parsed.query)['requestduration'][0]
mainServiceName = urlparse.parse_qs(parsed.query)['mainServiceName'][0]
appNames = [mainServiceName]
folderNames = [folderName]
df = self.getAndCombineAllDbs(host, port, username, password,appNames, folderNames)
hit=0
finaldf = df
idxt = 0
for idxt, valt in enumerate(df['requests_duration_mean']):
if(valt > float(requestDuration)):
hit+=1
print(valt)
if(hit >=10):
break
if(hit==0):
for idxt, valt in enumerate(df['pod_util_cpu_avg']):
threshhold = df['pod_cpu_limit'][idxt] - 0.3*df['pod_cpu_limit'][idxt]
if(valt > threshhold):
hit+=1
print (valt)
if(hit >=10):
break
finaldf = df.head(idxt)
finaldf= finaldf.sort_values(['requests'])
val = float(finaldf.tail(1).requests)
output=""+str(val)
print (val)
print (output)
self.wfile.write(output.encode())
else:
self.wfile.write("Path not found".encode())
def do_HEAD(self):
self._set_headers()
host = os.environ['INFLUXDB_HOST']
port = os.environ['INFLUXDB_PORT']
username= os.environ['INFLUXDB_USER']
password = os.environ['<PASSWORD>']
mongo_host = os.environ['MONGODB_HOST']
mongo_port = os.environ['MONGODB_PORT']
mongo_username= os.environ['MONGODB_USER']
mongo_password = os.environ['MONGODB_PASS']
def run(server_class=HTTPServer, handler_class=Terminus, port=9002):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print ('Starting httpd...')
httpd.serve_forever()
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
``` |
{
"source": "jldiaz-uniovi/molotov",
"score": 2
} |
#### File: molotov/molotov/stats.py
```python
from urllib.parse import urlparse
from aiodogstatsd import Client
def get_statsd_client(address="udp://127.0.0.1:8125", **kw):
res = urlparse(address)
return Client(host=res.hostname, port=res.port, **kw)
```
#### File: molotov/tests/example8.py
```python
import json
import molotov
import time
_T = {}
def _now():
return time.time() * 1000
@molotov.events()
async def record_time(event, **info):
if event == "scenario_start":
scenario = info["scenario"]
index = (info["wid"], scenario["name"])
_T[index] = _now()
if event == "scenario_success":
scenario = info["scenario"]
index = (info["wid"], scenario["name"])
start_time = _T.pop(index, None)
duration = int(_now() - start_time)
if start_time is not None:
print(
json.dumps(
{
"ts": time.time(),
"type": "scenario_success",
"name": scenario["name"],
"duration": duration,
}
)
)
elif event == "scenario_failure":
scenario = info["scenario"]
exception = info["exception"]
index = (info["wid"], scenario["name"])
start_time = _T.pop(index, None)
duration = int(_now() - start_time)
if start_time is not None:
print(
json.dumps(
{
"ts": time.time(),
"type": "scenario_failure",
"name": scenario["name"],
"exception": exception.__class__.__name__,
"errorMessage": str(exception),
"duration": duration,
}
)
)
```
#### File: molotov/tests/test_sharedconsole.py
```python
import unittest
import asyncio
import sys
import os
import re
import io
from molotov.util import multiprocessing
from molotov.sharedconsole import SharedConsole
from molotov.tests.support import dedicatedloop, catch_output
OUTPUT = """\
one
two
3
TypeError\\("unsupported operand type(.*)?
TypeError\\("unsupported operand type.*"""
# pre-forked variable
_CONSOLE = SharedConsole(interval=0.0)
_PROC = []
def run_worker(input):
if os.getpid() not in _PROC:
_PROC.append(os.getpid())
_CONSOLE.print("hello")
try:
3 + ""
except Exception:
_CONSOLE.print_error("meh")
with catch_output() as (stdout, stderr):
loop = asyncio.new_event_loop()
fut = asyncio.ensure_future(_CONSOLE.display(), loop=loop)
loop.run_until_complete(fut)
loop.close()
stdout = stdout.read()
assert stdout == "", stdout
class TestSharedConsole(unittest.TestCase):
@dedicatedloop
def test_simple_usage(self):
test_loop = asyncio.get_event_loop()
stream = io.StringIO()
console = SharedConsole(interval=0.0, stream=stream)
async def add_lines():
console.print("one")
console.print("two")
console.print("3")
try:
1 + "e"
except Exception as e:
console.print_error(e)
console.print_error(e, sys.exc_info()[2])
await asyncio.sleep(0.2)
await console.stop()
with catch_output() as (stdout, stderr):
adder = asyncio.ensure_future(add_lines())
displayer = asyncio.ensure_future(console.display())
test_loop.run_until_complete(asyncio.gather(adder, displayer))
stream.seek(0)
output = stream.read()
test_loop.close()
self.assertTrue(re.match(OUTPUT, output, re.S | re.M) is not None, output)
@unittest.skipIf(os.name == "nt", "win32")
@dedicatedloop
def test_multiprocess(self):
test_loop = asyncio.get_event_loop()
# now let's try with several processes
pool = multiprocessing.Pool(3)
try:
inputs = [1] * 3
pool.map(run_worker, inputs)
finally:
pool.close()
async def stop():
await asyncio.sleep(1)
await _CONSOLE.stop()
with catch_output() as (stdout, stderr):
stop = asyncio.ensure_future(stop())
display = asyncio.ensure_future(_CONSOLE.display())
test_loop.run_until_complete(asyncio.gather(stop, display))
output = stdout.read()
for pid in _PROC:
self.assertTrue("[%d]" % pid in output)
test_loop.close()
```
#### File: molotov/tests/test_slave.py
```python
import os
import pytest
from unittest import mock
import tempfile
import subprocess
from shutil import copytree, copyfile
from molotov import __version__
from molotov.slave import main
from molotov.tests.support import TestLoop, dedicatedloop, set_args
_REPO = "https://github.com/loads/molotov"
NO_INTERNET = os.environ.get("NO_INTERNET") is not None
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
CALLS = [0]
def check_call(cmd, *args, **kw):
if CALLS[0] == 3:
return
if not cmd.startswith("git clone"):
subprocess.check_call(cmd, *args, **kw)
CALLS[0] += 1
@pytest.mark.skipif(NO_INTERNET, reason="This test requires internet access")
class TestSlave(TestLoop):
@classmethod
def setUpClass(cls):
cls.dir = tempfile.mkdtemp()
copytree(os.path.join(ROOT, "molotov"), os.path.join(cls.dir, "molotov"))
for f in ("setup.py", "molotov.json", "requirements.txt"):
copyfile(os.path.join(ROOT, f), os.path.join(cls.dir, f))
@dedicatedloop
@mock.patch("molotov.slave.check_call", new=check_call)
def test_main(self):
with set_args("moloslave", _REPO, "test", "--directory", self.dir) as out:
main()
if os.environ.get("TRAVIS") is not None:
return
output = out[0].read()
self.assertTrue("Preparing 1 worker..." in output, output)
self.assertTrue("OK" in output, output)
@dedicatedloop
@mock.patch("molotov.slave.check_call", new=check_call)
def test_fail(self):
with set_args("moloslave", _REPO, "fail", "--directory", self.dir):
self.assertRaises(Exception, main)
@dedicatedloop
@mock.patch("molotov.slave.check_call", new=check_call)
def test_version(self):
with set_args("moloslave", "--version", "--directory", self.dir) as out:
try:
main()
except SystemExit:
pass
version = out[0].read().strip()
self.assertTrue(version, __version__)
```
#### File: molotov/molotov/worker.py
```python
import asyncio
import time
from inspect import isgenerator
from molotov.listeners import EventSender
from molotov.session import get_session, get_context
from molotov.api import get_fixture, pick_scenario, get_scenario, next_scenario
from molotov.util import cancellable_sleep, is_stopped, set_timer, get_timer, stop
class FixtureError(Exception):
pass
def _now():
return int(time.time())
class Worker(object):
""""The Worker class creates a Session and runs scenario.
"""
def __init__(self, wid, results, console, args, statsd=None, delay=0, loop=None):
self.wid = wid
self.results = results
self.console = console
self.loop = loop or asyncio.get_event_loop()
self.args = args
self.statsd = statsd
self.delay = delay
self.count = 0
self.worker_start = 0
self.eventer = EventSender(console)
self._exhausted = False
# fixtures
self._session_setup = get_fixture("setup_session")
self._session_teardown = get_fixture("teardown_session")
self._setup = get_fixture("setup")
self._teardown = get_fixture("teardown")
async def send_event(self, event, **options):
await self.eventer.send_event(event, wid=self.wid, **options)
async def run(self):
if self.delay > 0.0:
await cancellable_sleep(self.delay)
if is_stopped():
return
self.results["WORKER"] += 1
self.results["MAX_WORKERS"] += 1
try:
res = await self._run()
finally:
self.teardown()
self.results["WORKER"] -= 1
return res
def _may_run(self):
if is_stopped():
return False
if _now() - self.worker_start > self.args.duration:
return False
if self._exhausted:
return False
if self.results["REACHED"] == 1:
return False
if self.args.max_runs and self.count > self.args.max_runs:
return False
return True
async def setup(self):
if self._setup is None:
return {}
try:
options = await self._setup(self.wid, self.args)
except Exception as e:
self.console.print_error(e)
raise FixtureError(str(e))
if options is None:
options = {}
elif not isinstance(options, dict):
msg = "The setup function needs to return a dict"
self.console.print(msg)
raise FixtureError(msg)
return options
async def session_setup(self, session):
if self._session_setup is None:
return
try:
await self._session_setup(self.wid, session)
except Exception as e:
self.console.print_error(e)
raise FixtureError(str(e))
async def session_teardown(self, session):
if self._session_teardown is None:
return
try:
await self._session_teardown(self.wid, session)
except Exception as e:
# we can't stop the teardown process
self.console.print_error(e)
async def _run(self):
verbose = self.args.verbose
exception = self.args.exception
if self.args.single_mode:
single = get_scenario(self.args.single_mode)
elif self.args.single_run:
single = next_scenario()
else:
single = None
self.count = 1
self.worker_start = _now()
try:
options = await self.setup()
except FixtureError as e:
self.results["SETUP_FAILED"] += 1
stop(why=e)
return
async with get_session(
self.loop, self.console, verbose, self.statsd, **options
) as session:
get_context(session).args = self.args
get_context(session).worker_id = self.wid
try:
await self.session_setup(session)
except FixtureError as e:
self.results["SESSION_SETUP_FAILED"] += 1
stop(why=e)
return
while self._may_run():
step_start = _now()
get_context(session).step = self.count
result = await self.step(self.count, session, scenario=single)
if result == 1:
self.results["OK"] += 1
self.results["MINUTE_OK"] += 1
elif result != 0:
self.results["FAILED"] += 1
self.results["MINUTE_FAILED"] += 1
if exception:
stop(why=result)
if not is_stopped() and self._reached_tolerance(step_start):
stop()
cancellable_sleep.cancel_all()
break
self.count += 1
if self.args.delay > 0.0:
await cancellable_sleep(self.args.delay)
else:
# forces a context switch
await asyncio.sleep(0)
await self.session_teardown(session)
def teardown(self):
if self._teardown is None:
return
try:
self._teardown(self.wid)
except Exception as e:
# we can't stop the teardown process
self.console.print_error(e)
def _reached_tolerance(self, current_time):
if not self.args.sizing:
return False
if current_time - get_timer() > 60:
# we need to reset the tolerance counters
set_timer(current_time)
self.results["MINUTE_OK"].value = 0
self.results["MINUTE_FAILED"].value = 0
return False
OK = self.results["MINUTE_OK"].value
FAILED = self.results["MINUTE_FAILED"].value
if OK + FAILED < 100:
# we don't have enough samples
return False
current_ratio = float(FAILED) / float(OK) * 100.0
reached = current_ratio > self.args.sizing_tolerance
if reached:
self.results["REACHED"].value = 1
self.results["RATIO"].value = int(current_ratio * 100)
return reached
async def step(self, step_id, session, scenario=None):
""" single scenario call.
When it returns 1, it works. -1 the script failed,
0 the test is stopping or needs to stop.
"""
if scenario is None:
scenario = pick_scenario(self.wid, step_id)
elif isgenerator(scenario):
try:
scenario = next(scenario)
except StopIteration:
self._exhausted = True
return 0
try:
await self.send_event("scenario_start", scenario=scenario)
await scenario["func"](session, *scenario["args"], **scenario["kw"])
await self.send_event("scenario_success", scenario=scenario)
if scenario["delay"] > 0.0:
await cancellable_sleep(scenario["delay"])
return 1
except Exception as exc:
await self.send_event("scenario_failure", scenario=scenario, exception=exc)
if self.args.verbose > 0:
self.console.print_error(exc)
await self.console.flush()
return exc
return -1
``` |
{
"source": "jldinh/multicell",
"score": 2
} |
#### File: src/multicell/simulation_ptm.py
```python
import multicell.simulation
import copy
import time
from multicell.simulation import print_flush
from openalea.container import property_topomesh
from vplants.meshing.property_topomesh_analysis import compute_topomesh_property
from openalea.container.utils import IdDict
import numpy as np
class SimulationPTM(multicell.simulation.Simulation):
def __init__(self):
multicell.simulation.Simulation.__init__(self)
self.computed = set()
def import_propertytopomesh(self, mesh):
time_start = time.time()
print_flush("Topomesh importation: started")
self.set_mesh(mesh)
pos = self.get_pos().values()
self.set_pos(pos - np.mean(pos, axis=0))
self.initialize_mesh_properties()
print_flush("Topomesh importation: finished (%.2f s)" % (time.time() - time_start))
def set_mesh(self, mesh):
if type(mesh) is property_topomesh.PropertyTopomesh:
self.mesh = mesh
else:
degree = 3
# Manually build a PropertyTopomesh from a regular Topomesh
# Dirty but necessary in the current version of openalea
ptm = copy.deepcopy(mesh)
ptm.__class__ = property_topomesh.PropertyTopomesh
ptm._wisp_properties = [{} for d in xrange(degree+1)]
ptm._interface_properties = [{} for d in xrange(degree+1)]
ptm._topomesh_properties= {}
ptm._interface = [None] + [IdDict(idgenerator="set") for i in xrange(degree)]
self.mesh = ptm #property_topomesh.PropertyTopomesh(3, mesh)
def get_pos(self):
return self.mesh.wisp_property("barycenter", 0)
def set_pos(self, pos):
self.mesh.update_wisp_property("barycenter", 0, pos)
for degree in xrange(1, 4):
self.mesh._wisp_properties[degree].clear()
self.computed = set()
def compute_property(self, name, degree):
compute_topomesh_property(self.mesh, name, degree)
self.computed.add((name, degree))
def get_property(self, name, degree, wid):
return self.get_properties(name, degree)[wid]
def get_properties(self, name, degree):
if not (name, degree) in self.computed:
self.compute_property(name, degree)
return self.mesh.wisp_property(name, degree)
def compute_surfaces(self):
self.compute_property("area", 2)
def get_surface(self, wid):
return self.get_property("area", 2, wid)
def get_surfaces(self):
return self.get_properties("area", 2)
def compute_barycenters(self):
self.compute_property("barycenter", 3)
def get_barycenter(self, wid):
return self.get_property("barycenter", 3, wid)
def get_barycenters(self):
return self.get_properties("barycenter", 3)
``` |
{
"source": "jldj1/Blackjack",
"score": 3
} |
#### File: src/components/login_form.py
```python
import pygame
from buttons.button import Button
from buttons.input_box import InputBox
from network.users.users import UserModel
padding = 10
# Login Form component holds two elements,
# 2 Input Boxes
# 1 Button
class LoginForm:
def __init__(self, screen, x, y, w, h):
self.screen = screen
self.user_box = InputBox(screen, x, y, w, h)
self.pass_box = InputBox(screen, x, y + h + padding, w, h)
self.button = Button(screen, x, y + h * 2 + padding + 10, w, h, "Login")
self.logout_button = Button(screen, x, y, w, h, "Logout")
self.status = {}
def handle_event(self, event):
self.user_box.handle_event(event)
self.pass_box.handle_event(event)
pos = pygame.mouse.get_pos()
if event.type == pygame.MOUSEBUTTONDOWN:
if not self.status:
if self.button.collides(pos):
self.status = self.submit()
else:
if self.logout_button.collides(pos):
self.status = {}
print("CLICKEDKKDSK")
return self.status
def draw(self):
# draw three elements at once
if not self.status:
self.button.draw()
self.user_box.draw()
self.pass_box.draw()
else:
self.logout_button.draw()
def submit(self):
username = self.user_box.getText()
password = self.pass_box.getText()
response = UserModel.authenticate(username, password)
self.user_box.setText("")
self.pass_box.setText("")
if "error" in response:
return {}
else:
return response
```
#### File: src/buttons/text.py
```python
import pygame
class Text:
def __init__(self, screen, x, y, text="", font_size=25, color=(255, 255, 255)):
self.font = pygame.font.SysFont('leelawadee', font_size)
self.screen = screen
self.color = color
self.text = text
self.pos = (x, y)
def setText(self, text):
self.text = text
def draw(self):
textsurface = self.font.render(self.text, True, self.color)
self.screen.blit(textsurface, self.pos)
```
#### File: src/screens/blackjackGame_screen.py
```python
import pygame, sys
from buttons.button import Button
from buttons.image_button import ImageButton
from buttons.text import Text
from components.card import CardComponent
from components.deckComponent import DeckComponent
from components.hand import HandComponent
from components.testblackjack import TestBlackJack
BG_COLOR = (30, 30, 30)
BLACK_COLOR = (0, 0, 0)
def dealCards(deck, hand, n):
for i in range(n):
temp_card = deck.deal()
hand.addCard(temp_card.suit, temp_card.name)
class BlackJackGame:
def __init__(self, user):
self.user = user
self.width = 800
self.height = 600
self.setup_screen()
self.hand1 = HandComponent(self.screen, 200, 500)
self.dealer_hand = HandComponent(self.screen, 200, 200)
self.usernamelabel = Text(self.screen, 5, 15, f"{user['username']}")
self.balancelabel = Text(self.screen, 5, 35, f"Balance:{user['balance']}")
self.game = TestBlackJack(self.screen, self.hand1, self.dealer_hand, user["username"], user["balance"])
self.click = False
self.running = True
self.players = [self.hand1, self.dealer_hand]
#blackjack table image
self.bj_table = pygame.image.load('blackjackGame/assets/blackJackTable.png')
self.bj_table = pygame.transform.scale(self.bj_table, (800, 600))
# self, screen, x, y, width, height, text="", color=(DARK_GREY)
#chip buttons
self.chip500 = ImageButton(self.screen, 670, 415, 'blackjackGame/assets/Chips/chip500.png', 0.45)
self.chip100 = ImageButton(self.screen, 740, 415, 'blackjackGame/assets/Chips/chip100.png', 0.45)
self.chip50 = ImageButton(self.screen, 670, 465, 'blackjackGame/assets/Chips/chip50.png', 0.45)
self.chip25 = ImageButton(self.screen, 740, 465, 'blackjackGame/assets/Chips/chip25.png', 0.45)
self.chip5 = ImageButton(self.screen, 670, 510, 'blackjackGame/assets/Chips/chip5.png', 0.45)
self.chip1 = ImageButton(self.screen, 740, 510, 'blackjackGame/assets/Chips/chip1.png', 0.45)
self.new_hand = Button(self.screen, 300, 100, 200, 50, "New Hand")
#bank and bet area
self.current_bet1 = Text(self.screen, self.width - 150, self.height/2 + 100, f"Bank: $0")
self.clock = pygame.time.Clock()
self.status_text = Text(self.screen, 100, 100, "")
self.button1 = Button(self.screen, 300, 100, 200, 50, "Deal")
self.done_betting = False
self.hit = ImageButton(self.screen, 20, 400, 'assets/imgs/hit-hand-signal.gif', 0.45)
self.stand = ImageButton(self.screen, 20, 500, 'assets/imgs/stand-sign.png', 0.19)
def draw(self):
self.screen.fill(BG_COLOR)
# screen.fill always in beginning of draw func
self.screen.blit(self.bj_table, (0, 0))
self.usernamelabel.draw()
self.balancelabel.draw()
#bank/bet area background
pygame.draw.rect(self.screen, (65, 86, 127), pygame.Rect(640, 390, 300, 200))
#pygame.draw.rect(self.screen, (65, 86, 127), pygame.Rect(self.width/2 + 190, self.height/2 + 50, 210, 50))
self.chip500.draw()
self.chip100.draw()
self.chip50.draw()
self.chip25.draw()
self.chip5.draw()
self.chip1.draw()
self.current_bet1.draw()
self.game.draw()
#self.hand1.draw()
if not self.game.done_betting:
self.button1.draw()
#self.dealer_hand.draw()
for hand in self.players:
hand.draw()
if self.game.stand_check:
self.status_text.draw()
self.new_hand.draw()
self.hit.draw()
self.stand.draw()
# display.update() always in end of draw func
pygame.display.update()
def setup_screen(self):
self.screen = pygame.display.set_mode((self.width, self.height))
pygame.display.set_caption("BLACKJACK TABLE")
def run(self):
while self.running:
pos = pygame.mouse.get_pos()
#print(pos)
self.draw()
#set bets
if self.chip500.collides(pos):
if self.click:
print("BUTTON CLICKED")
#self.bet1 += 500
text = self.game.addBet(500)
# self.game.addBet(500)
self.current_bet1.setText(text)
elif self.chip100.collides(pos):
if self.click:
print("BUTTON CLICKED")
text = self.game.addBet(100)
self.current_bet1.setText(text)
elif self.chip50.collides(pos):
if self.click:
text = self.game.addBet(50)
self.current_bet1.setText(text)
elif self.chip25.collides(pos):
if self.click:
text = self.game.addBet(25)
self.current_bet1.setText(text)
elif self.chip1.collides(pos):
if self.click:
text = self.game.addBet(1)
self.current_bet1.setText(text)
if self.chip5.collides(pos):
if self.click:
text = self.game.addBet(5)
self.current_bet1.setText(text)
self.balancelabel.setText(f"Balance: {self.game.getBalance()}")
if self.button1.collides(pos) and self.click and self.game.ready_to_start_round:
self.game.startRoundDeal()
value1 = self.hand1.evaluateHand()
if value1 == 21:
text = self.game.stand()
self.status_text.setText("BLACKJACK!")
if self.hit.collides(pos) and self.click:
self.game.hit()
if not self.game.stand_check and self.stand.collides(pos) and self.click:
text = self.game.stand()
print(text)
self.status_text.setText(text)
if self.game.stand_check and self.new_hand.collides(pos) and self.click:
self.game.reset()
self.current_bet1.setText(f"Current bet: {self.game.bet}")
self.status_text.setText("")
self.click = False
for event in pygame.event.get():
self.handle_event(event)
self.clock.tick(60)
#game play logic (bet, card deal, hit/stay, win/lose)
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
self.click = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.running = False
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
``` |
{
"source": "jldj1/Practice-2",
"score": 3
} |
#### File: Practice_2/game/main.py
```python
from languages.english import English
from languages.spanish import Spanish
from languages.french import French
class BlackjackGame:
def __init__(self):
self.storage = {}
def addPlayer(self, player_name, lang):
if player_name not in self.storage:
self.storage[player_name] = Player(player_name, 100, lang)
return True
return False
def getPlayerCash(self, name):
return self.storage[name].getCash()
def getAllPlayers(self):
print(self.storage)
def startGame(self):
for name, player_object in self.storage.items():
print(name, player_object.getLang().start())
class Player:
def __init__(self, name, cash, lang):
self.name = name
self.cash = cash
self.lang = lang
def getCash(self):
return self.cash
def getLang(self):
return self.lang
game1 = BlackjackGame()
lang = English
option = input("select language:")
if option == "es":
lang = Spanish
elif option == "fr":
lang = French
game1.addPlayer("Chris", lang)
game1.addPlayer("Jessie", English)
game1.addPlayer("Robert", Spanish)
game1.getAllPlayers()
game1.startGame()
```
#### File: Practice_2/languages/spanish.py
```python
class Spanish:
@staticmethod
def start():
return "Bienvenido!"
```
#### File: Practice-2/Practice_2/main2.py
```python
import sys
from typing import Type
import pygame
from pygame.transform import scale
from buttons.button import Button
from buttons.input_box import InputBox
from image_button import ImageButton
login: Type[InputBox] = InputBox
window = (1150, 1050)
screen = pygame.display.set_mode(window)
pygame.font.init()
clock = pygame.time.Clock()
BG_COLOR = (30, 30, 30)
BLACK_COLOR = (0, 0, 0)
class Blank:
def __init__(self):
self.width = 600
self.height = 600
self.setup_screen()
self.click = False
self.running = True
self.button = Button(self.screen, self.width // 2 - 100, self.height // 2 - 25, 200, 50, "esc to go back", (BLACK_COLOR))
self.start = ImageButton(self.screen, self.width, self.height, "assets/start_btn.png", scale)
self.clock = pygame.time.Clock()
def start(self):
cats = ImageButton(screen, 200, 250, "assets/cats.png", 1)
esc = Button(screen, 50, 40, 800, 600, "esc to go back", (BLACK_COLOR))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
if event.type == pygame.MOUSEBUTTONDOWN:
if esc.collides(pygame.mouse.get_pos()):
pygame.quit()
elif cats.collides(pygame.mouse.get_pos()):
login.draw()
pygame.display.update()
pygame.quit()
def draw(self):
self.screen.fill(BG_COLOR)
# screen.fill always in beginning of draw func
#self.button.draw()
#self.start.draw()
# display.update() always in end of draw func
pygame.display.update()
#####
def setup_screen(self):
self.screen = pygame.display.set_mode((self.width, self.height))
pygame.display.set_caption("Blank Template Screen")
def run(self):
while self.running:
pos = pygame.mouse.get_pos()
print(pos)
self.draw()
if self.start.collides(pos):
if self.click:
print("BUTTON CLICKED")
self.click = False
for event in pygame.event.get():
self.handle_event(event)
self.clock.tick(60)
def handle_event(self, event):
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
self.click = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.running = False
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
cats = ImageButton(screen, 200, 250, "assets/cats.png", 1)
pygame.display.flip()
done = False
while not done:
cats.draw()
go = Blank()
go.start()
pygame.display.update()
clock.tick(60)
``` |
{
"source": "jldohmann/quote-generator-python",
"score": 3
} |
#### File: jldohmann/quote-generator-python/__init__.py
```python
from flask import Flask, render_template
import json
import random
def create_app():
"""
create_app is the application factory
"""
# Create the app
app = Flask(__name__)
# Add a route
@app.route('/')
def home():
# Load JSON file with quotes
with app.open_resource('quotes.json') as infile:
imported_data = json.load(infile)
# Format into Python list
lst = []
q = imported_data['quotes']
for quote in q:
lst.append(q[quote])
# Select a random quote
selected_quote = random.choice(lst)
# Pass the selected quote to the template
return render_template('quotes.html', quote=selected_quote)
# Return the app
return app
``` |
{
"source": "jldohmann/WolframClientForPython",
"score": 3
} |
#### File: examples/python/asynchronous3.py
```python
import asyncio
import time
from wolframclient.evaluation import WolframLanguageAsyncSession
from wolframclient.language import wl
async def delayed_evaluation(delay, async_session, expr):
await asyncio.sleep(delay)
return await async_session.evaluate(expr)
async def main():
async with WolframLanguageAsyncSession() as async_session:
start = time.perf_counter()
print('Running two tasks concurrently.')
task1 = asyncio.ensure_future(delayed_evaluation(1, async_session, '"hello"'))
task2 = asyncio.ensure_future(delayed_evaluation(1, async_session, '"world!"'))
# wait for the two tasks to finish
result1 = await task1
result2 = await task2
print('After %.02fs, both evaluations finished returning: %s, %s'
% (time.perf_counter()-start, result1, result2))
# python 3.5+
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
# python 3.7+
# asyncio.run(main())
```
#### File: deserializers/wxf/__init__.py
```python
from __future__ import absolute_import, print_function, unicode_literals
from wolframclient.deserializers.wxf.wxfconsumer import WXFConsumerNumpy
from wolframclient.deserializers.wxf.wxfparser import WXFParser
from wolframclient.exception import WolframParserException
__all__ = ["binary_deserialize"]
def binary_deserialize(wxf_input, consumer=None, **kwargs):
"""Deserialize binary data and return a Python object.
Serialize a Python object to WXF::
>>> wxf = export({'key' : [1,2,3]}, target_format='wxf')
Retrieve the input object::
>>> binary_deserialize(wxf)
{'key': [1, 2, 3]}
A stream of :class:`~wolframclient.deserializers.wxf.wxfparser.WXFToken` is generated from the WXF input by a instance
of :class:`~wolframclient.deserializers.wxf.wxfparser.WXFParser`.
The consumer must be an instance of :class:`~wolframclient.deserializers.wxf.wxfconsumer.WXFConsumer`. If none is
provided, :class:`~wolframclient.deserializers.wxf.wxfconsumer.WXFConsumerNumpy` is used. To disable NumPy array support,
use :class:`~wolframclient.deserializers.wxf.wxfconsumer.WXFConsumer`.
Named parameters are passed to the consumer. They can be any valid parameter of
:meth:`~wolframclient.deserializers.wxf.wxfconsumer.WXFConsumer.next_expression`, namely:
* `dict_class`: map WXF `Association` to `dict_class` in place of a regular :class:`dict`
"""
parser = WXFParser(wxf_input)
if consumer is None:
consumer = WXFConsumerNumpy()
try:
o = consumer.next_expression(parser.tokens(), **kwargs)
except StopIteration:
raise WolframParserException(
"Input data does not represent a valid expression in WXF format. Expecting more input data."
)
if not parser.context.is_valid_final_state():
raise WolframParserException(
"Input data does not represent a valid expression in WXF format. Some expressions are incomplete."
)
return o
```
#### File: evaluation/cloud/asynccloudsession.py
```python
from __future__ import absolute_import, print_function, unicode_literals
import json
import logging
from wolframclient.evaluation.base import WolframAsyncEvaluator
from wolframclient.evaluation.cloud.asyncoauth import (
OAuth1AIOHttpAsyncSession as OAuthAsyncSession,
)
from wolframclient.evaluation.cloud.asyncoauth import (
XAuthAIOHttpAsyncSession as XAuthAsyncSession,
)
from wolframclient.evaluation.cloud.base import WolframAPICallBase
from wolframclient.evaluation.cloud.server import WOLFRAM_PUBLIC_CLOUD_SERVER
from wolframclient.evaluation.result import (
WolframAPIResponseBuilder,
WolframEvaluationWXFResponseAsync,
)
from wolframclient.exception import AuthenticationException
from wolframclient.serializers import export
from wolframclient.utils import six
from wolframclient.utils.api import aiohttp, ssl
from wolframclient.utils.url import evaluation_api_url, user_api_url
logger = logging.getLogger(__name__)
__all__ = ["WolframCloudAsyncSession", "WolframAPICallAsync"]
class WolframCloudAsyncSession(WolframAsyncEvaluator):
""" Interact with a Wolfram Cloud asynchronously using coroutines.
Asynchronous cloud operations are provided through coroutines using modules :mod:`asyncio` and
`aiohttp <https://pypi.org/project/aiohttp/>`_.
Instances of this class can be managed with an asynchronous context manager::
async with WolframCloudAsyncSession() as session:
await session.call(...)
An event loop can be explicitly passed using the named parameter `loop`; otherwise, the one
returned by :func:`~asyncio.get_event_loop` is used.
The initialization options of the class :class:`~wolframclient.evaluation.WolframCloudSession` are also supported by
this class.
"""
def __init__(
self,
credentials=None,
server=None,
loop=None,
inputform_string_evaluation=True,
oauth_session_class=None,
xauth_session_class=None,
http_sessionclass=None,
ssl_context_class=None,
):
super().__init__(loop, inputform_string_evaluation=inputform_string_evaluation)
self.server = server or WOLFRAM_PUBLIC_CLOUD_SERVER
self.http_session = None
self.http_sessionclass = http_sessionclass or aiohttp.ClientSession
self.credentials = credentials
self.evaluation_api_url = evaluation_api_url(self.server)
self.xauth_session_class = xauth_session_class or XAuthAsyncSession
self.oauth_session_class = oauth_session_class or OAuthAsyncSession
self.ssl_context_class = ssl_context_class or ssl.SSLContext
self.oauth_session = None
if self.server.certificate is not None:
self._ssl_context = self.ssl_context_class()
self._ssl_context.load_verify_locations(self.server.certificate)
# self._ssl_context = ssl.create_default_context(cafile=self.server.certificate)
else:
self._ssl_context = None
def duplicate(self):
return self.__class__(
credentials=self.credentials,
server=self.server,
loop=self._loop,
inputform_string_evaluation=self.inputform_string_evaluation,
oauth_session_class=self.oauth_session_class,
xauth_session_class=self.xauth_session_class,
http_sessionclass=self.http_sessionclass,
ssl_context_class=self.ssl_context_class,
)
async def start(self):
self.stopped = False
try:
if not self.started:
if self.http_session is None or self.http_session.closed:
self.http_session = self.http_sessionclass(
headers={"User-Agent": "WolframClientForPython/1.0"}, loop=self._loop
)
if not self.anonymous():
await self._authenticate()
except Exception as e:
try:
await self.terminate()
finally:
raise e
@property
def started(self):
return self.http_session is not None and (self.anonymous() or self.authorized())
""" Terminate gracefully stops. """
async def stop(self):
await self.terminate()
async def terminate(self):
self.stopped = True
if self.http_session:
await self.http_session.close()
self.http_session = None
self.oauth_session = None
def anonymous(self):
return self.credentials is None
def authorized(self):
return self.oauth_session is not None and self.oauth_session.authorized()
async def _authenticate(self):
"""Authenticate with the server using the credentials.
This method supports both oauth and xauth methods. It is not necessary
to call it, since the session will try to authenticate when the first
request is issued. """
logger.info("Authenticating to the server.")
if self.credentials is None:
raise AuthenticationException("Missing credentials.")
if self.credentials.is_xauth:
self.oauth_session = self.xauth_session_class(
self.credentials, self.http_session, self.server
)
else:
self.oauth_session = self.oauth_session_class(
self.http_session,
self.server,
self.credentials.consumer_key,
self.credentials.consumer_secret,
)
await self.oauth_session.authenticate()
async def call(
self,
api,
input_parameters={},
files={},
target_format="wl",
permissions_key=None,
**kwargv
):
"""Call a given API using the provided input parameters.
`api` can be a string url or a :class:`tuple` (`username`, `api name`). The username is generally the Wolfram
Language symbol ``$UserName``. The API name can be a UUID or a relative path, e.g. *myapi/foo/bar*.
The input parameters are provided as a dictionary with string keys being the name
of the parameters associated to their value.
Files are passed in a dictionary. Values can have multiple forms::
{'parameter name': file_pointer}
It is possible to explicitly specify a filename and a content type::
{'parameter name': ('filename', file_pointer, 'content-type')}
Bytes can also be passed as files::
{'parameter name': ('filename', b'...binary...data...', 'content-type')}
It is possible to pass a ``PermissionsKey`` to the server alongside the query and get access to a given
resource.
"""
url = user_api_url(self.server, api)
params = {}
if permissions_key is not None:
params["_key"] = permissions_key
is_multipart = isinstance(files, dict) and len(files) > 0
encoded_inputs = encode_api_inputs(
input_parameters, files=files, target_format=target_format, **kwargv
)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Encoded input %s", encoded_inputs)
# in multipart requests we have to merge input parameters with files.
# and use the same format for both.
if is_multipart:
response = await self._post(url, data=encoded_inputs, params=params)
else:
response = await self._post(url, data=encoded_inputs, params=params)
return WolframAPIResponseBuilder.build(response)
async def _post(self, url, headers={}, data=None, params={}):
"""Do a POST request, signing the content only if authentication has been successful."""
if not self.started:
await self.start()
if self.stopped:
await self.restart()
headers["User-Agent"] = "WolframClientForPython/1.0"
if self.authorized():
logger.info("Authenticated call to api %s", url)
return await self.oauth_session.signed_request(url, headers=headers, data=data)
else:
logger.info("Anonymous call to api %s", url)
return await self.http_session.post(
url, params=params, headers=headers, data=data, ssl=self._ssl_context
)
async def _call_evaluation_api(self, expr, **kwargs):
data = aiohttp.BytesPayload(export(expr, target_format="wl", **kwargs))
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Sending expression to cloud server for evaluation: %s", data)
response = await self._post(self.evaluation_api_url, data=data)
return WolframEvaluationWXFResponseAsync(response)
async def evaluate(self, expr, **kwargs):
"""Send `expr` to the cloud for evaluation and return the result.
`expr` can be a Python object serializable by :func:`~wolframclient.serializers.export` or the string
:wl:`InputForm` of an expression to evaluate.
"""
response = await self._call_evaluation_api(self.normalize_input(expr), **kwargs)
return await response.get()
async def evaluate_wrap(self, expr, **kwargs):
""" Similar to :func:`~wolframclient.evaluation.cloud.asynccloudsession.WolframCloudAsyncSession.evaluate` but
return the result as a :class:`~wolframclient.evaluation.result.WolframEvaluationJSONResponseAsync`.
"""
return await self._call_evaluation_api(self.normalize_input(expr), **kwargs)
def wolfram_api_call(self, api, **kwargs):
""" Build an helper class instance to call a given API. """
return WolframAPICallAsync(self, api, **kwargs)
def __repr__(self):
return "<{}:base={}, anonymous={}, autorized={}>".format(
self.__class__.__name__, self.server.cloudbase, self.anonymous(), self.authorized()
)
class WolframAPICallAsync(WolframAPICallBase):
"""Perform an API call using an asynchronous cloud session. """
async def perform(self, **kwargs):
"""Make the API call and return the result."""
return await self.target.call(
self.api,
input_parameters=self.parameters,
files=self.files,
permissions_key=self.permission_key,
**kwargs
)
### Some internal utilities focused on cloud data manipulation and
# formatting for http requests, based on aiohttp objects.
def _encode_inputs_as_wxf(form_data, inputs, **kwargs):
for name, value in inputs.items():
form_data.add_field(name + "__wxf", export(value, target_format="wxf", **kwargs))
def _encode_inputs_as_json(form_data, inputs, **kwargs):
for name, value in inputs.items():
form_data.add_field(name + "__json", json.dumps(value, **kwargs))
def _encode_inputs_as_wl(form_data, inputs, **kwargs):
for name, value in inputs.items():
# avoid double encoding of strings '\"string\"'.
if isinstance(value, six.string_types):
form_data.add_field(name, value)
else:
form_data.add_field(name, export(value, target_format="wl", **kwargs))
SUPPORTED_ENCODING_FORMATS = {
"json": _encode_inputs_as_json,
"wxf": _encode_inputs_as_wxf,
"wl": _encode_inputs_as_wl,
}
def encode_api_inputs(inputs, files={}, target_format="wl", **kwargs):
if inputs == {} and files == {}:
return None
encoder = SUPPORTED_ENCODING_FORMATS.get(target_format, None)
if encoder is None:
raise ValueError(
"Invalid encoding format %s. Choices are: %s"
% (target_format, ", ".join(SUPPORTED_ENCODING_FORMATS.keys()))
)
form_data = aiohttp.FormData()
# files are specified by file pointer or bytes, or a tuple.
for name, file_info in files.items():
# tuple must contain: the filename, the data as bytes, the content type.
if isinstance(file_info, tuple) and len(file_info) == 3:
form_data.add_field(
name, file_info[1], filename=file_info[0], content_type=file_info[2]
)
# otherwise it must be the filename. Delegate input validation to FormData:
else:
form_data.add_field(name, file_info)
encoder(form_data, inputs, **kwargs)
return form_data
```
#### File: evaluation/cloud/request_adapter.py
```python
from __future__ import absolute_import, print_function, unicode_literals
from wolframclient.utils.api import aiohttp, requests
from wolframclient.utils.encoding import force_text
__all__ = ["wrap_response"]
class HTTPResponseAdapterBase(object):
""" Unify various request classes as a unique API. """
asynchronous = False
def __init__(self, httpresponse):
self.response = httpresponse
def response_object(self):
return self.response
def status(self):
""" HTTP status code """
return self.response.status_code
def json(self):
""" Request body as a json object """
return self.response.json()
def text(self):
""" Request body as decoded text. """
return self.response.text
def content(self):
""" Request body as raw bytes """
return self.response.content
def url(self):
""" String URL. """
return force_text(self.response.url)
def headers(self):
""" Headers as a dict. """
return self.response.headers
class RequestsHTTPRequestAdapter(HTTPResponseAdapterBase):
pass
class AIOHttpHTTPRequestAdapter(HTTPResponseAdapterBase):
asynchronous = True
def status(self):
return self.response.status
async def json(self):
return await self.response.json()
async def text(self):
return await self.response.text()
async def content(self):
return await self.response.read()
def wrap_response(response):
if isinstance(response, requests.Response):
return RequestsHTTPRequestAdapter(response)
elif isinstance(response, aiohttp.ClientResponse):
return AIOHttpHTTPRequestAdapter(response)
else:
raise ValueError("No adapter found for HTTP response class %s" % response.__class__)
```
#### File: WolframClientForPython/wolframclient/exception.py
```python
from __future__ import absolute_import, print_function, unicode_literals
from wolframclient.language.exceptions import WolframLanguageException
from wolframclient.utils.logger import str_trim
class RequestException(WolframLanguageException):
"""Error in an HTTP request."""
def __init__(self, response, msg=None):
self.response = response
if msg:
self.msg = msg
else:
try:
self.msg = response.text()
except UnicodeDecodeError:
self.msg = "Failed to decode request body."
def __str__(self):
if hasattr(self.response, "status"):
if callable(self.response.status):
status = self.response.status()
else:
status = self.response.status
elif hasattr(self.response, "status_code"):
status = self.response.status_code
else:
status = "N/A"
return "<status: %s> %s" % (status, self.msg or "")
class AuthenticationException(RequestException):
"""Error in an authentication request."""
class WolframKernelException(WolframLanguageException):
"""Error while interacting with a Wolfram kernel."""
class WolframEvaluationException(WolframLanguageException):
"""Error after an evaluation raising messages."""
def __init__(self, error, result=None, messages=[]):
self.error = error
self.result = result
if isinstance(messages, list):
self.messages = messages
else:
self.messages = [messages]
def __str__(self):
return self.error
def __repr__(self):
return "<%s error=%s, expr=%s, messages=%i>:" % (
self.__class__.__name__,
self.error,
str_trim(self.result),
len(self.messages),
)
class SocketException(WolframLanguageException):
"""Error while operating on socket."""
class WolframParserException(WolframLanguageException):
"""Error while deserializing WXF bytes."""
__all__ = [
"WolframLanguageException",
"RequestException",
"AuthenticationException",
"WolframKernelException",
"SocketException",
"WolframParserException",
"WolframEvaluationException",
]
```
#### File: serializers/wxfencoder/serializer.py
```python
from __future__ import absolute_import, print_function, unicode_literals
from wolframclient.serializers.wxfencoder.constants import (
WXF_HEADER_COMPRESS,
WXF_HEADER_SEPARATOR,
WXF_VERSION,
)
from wolframclient.serializers.wxfencoder.streaming import ZipCompressedWriter
__all__ = ["WXFExprSerializer", "SerializationContext", "WXFSerializerException"]
class WXFSerializerException(Exception):
pass
class _Context(object):
def __init__(self):
pass
def add_part(self):
raise NotImplementedError(
"class %s must implement a add_part method" % self.__class__.__name__
)
def step_into_new_function(self, length):
raise NotImplementedError(
"class %s must implement a step_into_new_function method" % self.__class__.__name__
)
def step_into_new_assoc(self, length):
raise NotImplementedError(
"class %s must implement a step_into_new_assoc method" % self.__class__.__name__
)
def step_into_new_rule(self):
raise NotImplementedError(
"class %s must implement a step_into_new_rule method" % self.__class__.__name__
)
def is_valid_final_state(self):
raise NotImplementedError(
"class %s must implement a is_valid_final_state method" % self.__class__.__name__
)
def is_rule_valid(self):
raise NotImplementedError(
"class %s must implement a is_rule_valid method" % self.__class__.__name__
)
class NoEnforcingContext(_Context):
""" This context doesn't prevent inconsistent state. """
def add_part(self):
pass
def step_into_new_function(self, length):
pass
def step_into_new_assoc(self, length):
pass
def step_into_new_rule(self):
pass
def is_valid_final_state(self):
return True
def is_rule_valid(self):
return True
class SerializationContext(_Context):
""" Keeps track of various parameter associated to an expression being serialized.
The finalized expression has a tree structure; it is serialized depth first. During
the serialization process of an expression involving many non-atomic elements (e.g List),
we end up having incomplete parts at various level.
For each level of the expression tree we have to remember the expected length, the number
of elements already inserted, and whether or not the node is an association. The first two
parameters prevent inconsistencies in the number of elements and the declared length, the
last one avoid incorrect use of `WXFExprRule(Delayed)` tokens.
"""
def __init__(self):
# first level has index 0 and lenght 1. It's the root of the expr
# but does not represent anything in the final WL expr.
self._depth = 0
self._expected_length_stack = [1]
# index starting at 0.
self._current_index_stack = [0]
# root is not an assoc.
self._in_assoc_stack = [False]
def _check_insert(self):
if (
self._depth >= 0
and self._current_index_stack[self._depth]
>= self._expected_length_stack[self._depth]
):
raise IndexError(
"Out of bound, number of parts is greater than declared length %d."
% self._expected_length_stack[self._depth]
)
def _step_out_finalized_expr(self):
while (
self._depth >= 0
and self._current_index_stack[self._depth]
== self._expected_length_stack[self._depth]
):
self._depth -= 1
def add_part(self):
self._check_insert()
self._current_index_stack[self._depth] += 1
# This is the best place to output the context. Otherwise index values can be confusing.
# print(self)
self._step_out_finalized_expr()
@staticmethod
def _set_at_index_or_append(array, index, value):
"""Set the element of an `array` at a given index if it exists,
append it to the array otherwise. The `index` must be at most the
length of the array.
"""
if len(array) == index:
array.append(value)
elif len(array) > index:
array[index] = value
else:
raise IndexError(
"Index {} is greater than array length: {}".format(index, len(array))
)
def step_into_new_function(self, length):
self.step_into_new_expr(length + 1)
def step_into_new_assoc(self, length):
self.step_into_new_expr(length, is_assoc=True)
def step_into_new_rule(self):
self.step_into_new_expr(2)
def step_into_new_expr(self, length, is_assoc=False):
""" Indicate the beginning of a new expr of a given length.
Note that the length is the number of WXF elements which includes the head for functions.
Association and rules don't have head in WXF so their length value matches the one of the
expression in the Wolfram Language.
"""
# increment the index
self.add_part()
# go down one level in the expr tree, into the new expr.
self._depth += 1
# set or append element at index self._depth
SerializationContext._set_at_index_or_append(
self._expected_length_stack, self._depth, length
)
SerializationContext._set_at_index_or_append(self._current_index_stack, self._depth, 0)
SerializationContext._set_at_index_or_append(
self._in_assoc_stack, self._depth, is_assoc
)
if len(self._expected_length_stack) <= self._depth:
self._expected_length_stack.append(length)
else:
self._expected_length_stack[self._depth] = length
if len(self._current_index_stack) <= self._depth:
self._current_index_stack.append(0)
else:
self._current_index_stack[self._depth] = 0
self._step_out_finalized_expr()
def is_valid_final_state(self):
return self._depth == -1
def is_rule_valid(self):
return self._in_assoc_stack[self._depth]
def __repr__(self):
return "{}(depth={}, element={}/{})".format(
self.__class__.__name__,
self._depth,
self._current_index_stack[self._depth],
self._expected_length_stack[self._depth],
)
class WXFExprSerializer(object):
"""Main serialization class that convert internal object into bytes.
Pulls instances of :class:`~wolframclient.serializers.wxfencoder.wxfexpr.WXFExpr` from an
:class:`~wolframclient.serializers.wxfencoder.wxfexprprovider.WXFExprProvider`, serializes
them into wxf bytes and write the data to a stream.
This class also ensures the output data is a valid WXF encoded expression, and raises an exception otherwise.
For an in-depth description of the format see `tutorial/WXFFormatDescription` from Wolfram product documentation
or visit http://reference.wolfram.com/language/tutorial/WXFFormatDescription.html.
"""
def __init__(self, stream, expr_provider=None, compress=False, enforce=True):
self._compress = compress
self._writer = stream
self._expr_provider = expr_provider
self._enforce = enforce
if enforce:
self._context = SerializationContext()
else:
self._context = NoEnforcingContext()
@property
def context(self):
return self._context
def provide_wxfexpr(self, pyExpr):
if self._expr_provider:
return self._expr_provider.provide_wxfexpr(pyExpr)
return pyExpr
def serialize(self, pyExpr):
"""
Serialize the python expression given as parameter.
"""
# the header is never compressed.
self._writer.write(WXF_VERSION)
if self._compress:
self._writer.write(WXF_HEADER_COMPRESS)
self._writer.write(WXF_HEADER_SEPARATOR)
# end of uncompressed header. Eventually using compressed data.
if self._compress:
with ZipCompressedWriter(self._writer) as writer:
for wxfexpr in self.provide_wxfexpr(pyExpr):
wxfexpr._serialize_to_wxf(writer, self._context)
else:
for wxfexpr in self.provide_wxfexpr(pyExpr):
wxfexpr._serialize_to_wxf(self._writer, self._context)
if not self._context.is_valid_final_state():
raise WXFSerializerException("Inconsistent state: truncated expr.")
```
#### File: tests/externalevaluate/ev_ast.py
```python
from __future__ import absolute_import, print_function, unicode_literals
from wolframclient.utils.externalevaluate import execute_from_string
from wolframclient.utils.tests import TestCase as BaseTestCase
class TestCase(BaseTestCase):
def test_execute_from_string(self):
context = {}
result = execute_from_string("a = 2+2", session_data=context)
self.assertEqual(result, None)
self.assertEqual(context.get("a", None), 4)
result = execute_from_string("a", session_data=context)
self.assertEqual(result, 4)
result = execute_from_string("z = a + 4\nz", session_data=context)
self.assertEqual(result, 8)
self.assertEqual(context.get("a", None), 4)
self.assertEqual(context.get("z", None), 8)
def test_context(self):
session_data = {}
local_context = {"a": 3}
result = execute_from_string("a", session_data=session_data, constants=local_context)
self.assertEqual(result, 3)
result = execute_from_string(
"a = 12; a", session_data=session_data, constants=local_context
)
self.assertEqual(result, 12)
self.assertEqual(session_data.get("a", None), 12)
def test_context_deletion(self):
session_data = {"a": 3}
execute_from_string("del a", session_data=session_data)
with self.assertRaises(KeyError):
session_data["a"]
def test_globals(self):
execute_from_string(
"import numpy\ndef arange(n): return numpy.arange(n).reshape(n)\n\narange(10)"
)
```
#### File: wolframclient/utils/logger.py
```python
from __future__ import absolute_import, print_function, unicode_literals
import logging
from wolframclient.utils import six
from wolframclient.utils.encoding import force_text
if six.PY2:
def setup_logging_to_file(path, level=None):
logging.basicConfig(
filename=path,
filemode="a",
format="%(asctime)s, %(name)s %(levelname)s %(message)s",
level=(level is not None) or logging.INFO,
)
else:
def setup_logging_to_file(path, level=None):
"""Setup a basic Python logging configuration to a given file."""
logging.basicConfig(
format="%(asctime)s, %(name)s %(levelname)s %(message)s",
handlers=[logging.FileHandler(path, mode="a", encoding="utf-8")],
level=(level is not None) or logging.INFO,
)
def str_trim(o, max_char=80):
"""Return the string representation of an object, trimmed to keep up to `max_char` characters.
"""
as_str = force_text(o)
if len(as_str) > max_char:
return "%s...(%i more)" % (as_str[:max_char], len(as_str) - max_char)
else:
return as_str
``` |
{
"source": "jlduan/fba",
"score": 2
} |
#### File: fba/fba/qc.py
```python
import dnaio
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
from itertools import cycle
from itertools import islice
from fba.levenshtein import (
create_index,
query_index,
select_query
)
from fba.utils import open_by_suffix, get_logger
logger = get_logger(logger_name=__name__)
params = {'pdf.fonttype': 42,
'mathtext.default': 'regular',
'axes.axisbelow': True}
plt.rcParams.update(params)
def plot_sequence_content(read_composition, title,
nucleotide_dict, ax, nucleotides='ACGT'):
"""Plots per base composition.
Parameters
----------
read_composition : DataFrame
A DataFrame of per base content. Index is read coordinate. Columns are
nucleotides in the order of ACGTN.
title : str
The title for the generated plot.
nucleotide_dict : dict
Color for each base.
ax : Axes
Axes for plotting.
nucleotides : str, optional
Selected nucleotides to visualize.
Returns
-------
Axes
Distribution of sequence content per base.
"""
p_handles = list()
for i in list(nucleotides):
p = ax.plot(read_composition.index.values,
read_composition[i],
c=nucleotide_dict[i],
linewidth=1)
p_handles.append(p[0])
ax.legend(handles=p_handles,
labels=list(nucleotides),
loc='upper left',
fontsize=6,
frameon=True,
shadow=False,
framealpha=0)
ax.set_title(label=title, fontsize=7)
ax.tick_params(labelsize=6, labelcolor='black', direction='out')
ax.xaxis.set_ticks(range(0, read_composition.shape[0], 2))
ax.set_yticks(ax.get_yticks())
limits_y = list(ax.get_ylim())
if limits_y[0] < 0:
limits_y[0] = 0
if limits_y[1] > 1:
limits_y[1] = 1
ax.set_ylim(limits_y[0], limits_y[1])
ax.set_yticklabels(labels=[f'{i:3,.1%}' for i in ax.get_yticks()])
for i in ['top', 'bottom', 'left', 'right']:
ax.spines[i].set_linewidth(w=0.5)
ax.spines[i].set_color(c='#333333')
ax.set_xbound(lower=-1, upper=read_composition.shape[0] + 1)
a = (limits_y[1] - limits_y[0]) * 0.025
ax.set_ybound(
lower=limits_y[0] - a,
upper=limits_y[1] + a)
return ax
def plot_barcode_startend(s, e, bases, title, ax):
"""Plots barcode starting and ending positions.
Parameters
----------
s : Series
The percentage of starting positions on each base. The length equals
e and bases.
e : Series
The percentage of ending positions on each base. The length equals
s and bases.
bases : Array
The bases of reads.
title : str
The title for the generated plot.
ax: Axes
Axes for plotting.
Returns
-------
Axes
Distribution of barcode starting and ending positions on reads.
"""
assert len(s) == len(bases)
ax.bar(x=bases,
height=s,
bottom=0)
assert len(e) == len(bases)
ax.bar(x=bases,
height=e,
bottom=s)
ax.set_title(label=title, fontsize=7)
ax.tick_params(labelsize=6, labelcolor='black', direction='out')
ax.xaxis.set_ticks(range(0, len(bases), 2))
ax.set_xbound(lower=-1, upper=len(bases) + 1)
ax.set_ylim(bottom=0, top=1)
ax.set_yticks(ax.get_yticks().tolist())
ax.set_yticklabels(labels=['{:,.1%}'.format(i) for i in ax.get_yticks()])
for i in ['top', 'bottom', 'left', 'right']:
ax.spines[i].set_linewidth(w=0.5)
ax.spines[i].set_color(c='#333333')
return ax
def summarize_sequence_content(read1_file,
read2_file,
num_reads=None,
output_directory='qc'):
"""Summarizes per base content for reads 1 and reads 2.
Parameters
----------
read1_file : str
The path and name of read 1 file.
read2_file : str
The path and name of read 2 file.
num_reads : int, optional
Number of reads to analyze.
output_directory : str, optional
The path and name for the output directory.
Returns
-------
str
The path and name for the output directory.
"""
logger.info('Summarizing per base read content ...')
if num_reads:
logger.info(f'Number of read pairs to analyze: {num_reads:,}')
else:
logger.info('Number of reads to analyze: all')
logger.info(f'Output directory: {output_directory}')
# read1
Path(output_directory).mkdir(exist_ok=True)
R1_ACGT_PLOT = \
Path(output_directory) / 'Pyplot_read1_per_base_seq_content.pdf'
R1_ACGT_PLOT_GREY = \
Path(output_directory) / 'Pyplot_read1_per_base_seq_content_grey.pdf'
R1_N_PLOT = \
Path(output_directory) / 'Pyplot_read1_per_base_seq_content_n.pdf'
# read2
R2_ACGT_PLOT = \
Path(output_directory) / 'Pyplot_read2_per_base_seq_content.pdf'
R2_ACGT_PLOT_GREY = \
Path(output_directory) / 'Pyplot_read2_per_base_seq_content_grey.pdf'
R2_N_PLOT = \
Path(output_directory) / 'Pyplot_read2_per_base_seq_content_n.pdf'
read1_matrix = []
read2_matrix = []
def _get_sequence(read1_file, read2_file):
"""Gets sequences."""
with dnaio.open(file1=read1_file,
file2=read2_file,
fileformat='fastq',
mode='r') as f:
for rec in f:
read1, read2 = rec
yield read1.sequence, read2.sequence
_reads = islice(
_get_sequence(read1_file, read2_file), 0, num_reads
)
read_counter = int()
for read1_seq, read2_seq in _reads:
read_counter += 1
read1_matrix.append(read1_seq)
read2_matrix.append(read2_seq)
logger.info(f'Number of reads processed: {read_counter:,}')
read1_matrix = np.array([list(i) for i in read1_matrix])
read2_matrix = np.array([list(i) for i in read2_matrix])
read1_composition = pd.DataFrame(
{i: (read1_matrix == i).mean(axis=0) for i in list('ACGTN')})
read2_composition = pd.DataFrame(
{i: (read2_matrix == i).mean(axis=0) for i in list('ACGTN')})
color_palettes = [
# ['#a0cbe8', '#8cd17d', '#e15759', '#f1ce63', 'black'],
['#E16A86', '#909800', '#00AD9A', '#9183E6', 'black'],
['#000000', '#404040', '#7f7f7f', '#bfbfbf', 'black']
]
read1_length = read1_composition.shape[0]
read2_length = read2_composition.shape[0]
# read1
for p, c, n in zip([R1_ACGT_PLOT, R1_ACGT_PLOT_GREY, R1_N_PLOT],
cycle(color_palettes),
['ACGT', 'ACGT', 'N']):
fig, ax = plt.subplots(nrows=1,
ncols=1,
figsize=(max(2.8, read1_length / 15), 2.5))
plot_sequence_content(
read_composition=read1_composition,
title='Read 1 per base sequence content',
nucleotide_dict={i: j for i, j in zip(list('ACGTN'), c)},
ax=ax,
nucleotides=n
)
plt.tight_layout()
fig.savefig(fname=p,
transparent=None,
bbox_inches='tight')
# read2
for p, c, n in zip([R2_ACGT_PLOT, R2_ACGT_PLOT_GREY, R2_N_PLOT],
cycle(color_palettes),
['ACGT', 'ACGT', 'N']):
fig, ax = plt.subplots(nrows=1,
ncols=1,
figsize=(max(2.8, read2_length / 15), 2.5))
plot_sequence_content(
read_composition=read2_composition,
title='Read 2 per base sequence content',
nucleotide_dict={i: j for i, j in zip(list('ACGTN'), c)},
ax=ax,
nucleotides=n
)
plt.tight_layout()
fig.savefig(fname=p,
transparent=None,
bbox_inches='tight')
return output_directory
def summarize_barcode_positions(matching_file, output_directory='qc'):
"""Summarizes barcode positions for reads 1 and reads 2.
Parameters
----------
matching_file : str
The path and name of matching result.
output_directory : str, optional
The path and name for the output directory.
Returns
-------
str
The path and name for the output directory.
"""
logger.info('Summarizing barcode coordinates ...')
logger.info(f'Output directory: {output_directory}')
# read1
Path(output_directory).mkdir(exist_ok=True)
R1_BC_STARTING_FILE = \
Path(output_directory) / 'Read1_barcodes_starting.csv'
R1_BC_ENDING_FILE = \
Path(output_directory) / 'Read1_barcodes_ending.csv'
R1_BC_STARTING_ENDING_PLOT = \
Path(output_directory) / 'Pyplot_read1_barcodes_starting_ending.pdf'
# read2
R2_BC_STARTING_FILE = \
Path(output_directory) / 'Read2_barcodes_starting.csv'
R2_BC_ENDING_FILE = \
Path(output_directory) / 'Read2_barcodes_ending.csv'
R2_BC_STARTING_ENDING_PLOT = \
Path(output_directory) / 'Pyplot_read2_barcodes_starting_ending.pdf'
# summary
CB_MISMATCHES_FILE = \
Path(output_directory) / 'Read1_barcodes_mismatches.csv'
FB_MISMATCHES_FILE = \
Path(output_directory) / 'Read2_barcodes_mismatches.csv'
MATCHED_BC_RATIO_FILE = Path(
output_directory) / 'matched_barcode_ratio.csv'
#
with open_by_suffix(file_name=matching_file) as f:
next(f)
first_line = next(f)
read1_length = len(first_line.split('\t')[0])
read2_length = len(first_line.split('\t')[4])
# barcode starts and ends
barcode_counter = [int(), int()]
cb_matching_pos = list()
cb_matching_description = list()
cb_mismatches = list()
fb_matching_pos = list()
fb_matching_description = list()
fb_mismatches = list()
with open_by_suffix(file_name=matching_file) as f:
next(f)
for line in f:
i = line.rstrip().split('\t')
barcode_counter[1] += 1
if (i[2] not in {'no_match', 'n_skipping'}
and i[5] not in {'no_match', 'NA'}):
barcode_counter[0] += 1
cb_matching_pos.append(i[2])
cb_matching_description.append(i[3])
_ = [int(ii) for ii in i[2].split(':')]
cb_mismatches.append(
len(i[1]) - (_[1] - _[0]) + sum(
[int(ii)
for ii in i[3].split(':')])
)
fb_matching_pos.append(i[6])
fb_matching_description.append(i[7])
_ = [int(ii) for ii in i[6].split(':')]
fb_mismatches.append(
len(i[5]) - (_[1] - _[0]) + sum(
[int(ii)
for ii in i[7].split(':')])
)
barcode_counter.append(barcode_counter[0] / barcode_counter[1])
with open_by_suffix(file_name=MATCHED_BC_RATIO_FILE, mode='w') as f:
f.write(
','.join(['valid', 'total', 'ratio'])
+ '\n'
+ ','.join([str(i) for i in barcode_counter])
+ '\n'
)
cb_mismatches = pd.Series(
cb_mismatches).value_counts().to_frame(name='count')
cb_mismatches['ratio'] = cb_mismatches['count'] / \
sum(cb_mismatches['count'])
cb_mismatches.sort_index().to_csv(CB_MISMATCHES_FILE)
fb_mismatches = pd.Series(
fb_mismatches).value_counts().to_frame(name='count')
fb_mismatches['ratio'] = fb_mismatches['count'] / \
sum(fb_mismatches['count'])
fb_mismatches.sort_index().to_csv(FB_MISMATCHES_FILE)
# cell barcode
cb_s = [int(i.split(':')[0]) for i in cb_matching_pos]
cb_e = [int(i.split(':')[1]) - 1 for i in cb_matching_pos]
cb_start_dist = pd.Series(
cb_s).value_counts().to_frame(
name='count').reindex(
list(range(read1_length))).fillna(0).astype(np.int64)
cb_start_dist.to_csv(R1_BC_STARTING_FILE)
cb_end_dist = pd.Series(
cb_e).value_counts().to_frame(name='count').reindex(
list(range(read1_length))).fillna(0).astype(np.int64)
cb_end_dist.to_csv(R1_BC_ENDING_FILE)
fig, ax = plt.subplots(nrows=1,
ncols=1,
figsize=(max(2.8, read1_length / 15), 2.5))
plot_barcode_startend(
s=cb_start_dist['count'] / sum(cb_start_dist['count']),
e=cb_end_dist['count'] / sum(cb_end_dist['count']),
bases=cb_start_dist.index.values,
title='Distribution of cell barcode positions',
ax=ax
)
plt.tight_layout()
fig.savefig(fname=R1_BC_STARTING_ENDING_PLOT,
transparent=None,
bbox_inches='tight')
# feature barcode
fb_s = [int(i.split(':')[0]) for i in fb_matching_pos]
fb_e = [int(i.split(':')[1]) - 1 for i in fb_matching_pos]
fb_start_dist = pd.Series(
fb_s).value_counts().to_frame(
name='count').reindex(
list(range(read2_length))).fillna(0).astype(np.int64)
fb_start_dist.to_csv(R2_BC_STARTING_FILE)
fb_end_dist = pd.Series(
fb_e).value_counts().to_frame(name='count').reindex(
list(range(read2_length))).fillna(0).astype(np.int64)
fb_end_dist.to_csv(R2_BC_ENDING_FILE)
fig, ax = plt.subplots(nrows=1,
ncols=1,
figsize=(max(2.8, read2_length / 15), 2.5))
plot_barcode_startend(
s=fb_start_dist['count'] / sum(fb_start_dist['count']),
e=fb_end_dist['count'] / sum(fb_end_dist['count']),
bases=fb_start_dist.index.values,
title='Distribution of feature barcode positions',
ax=ax
)
plt.tight_layout()
fig.savefig(fname=R2_BC_STARTING_ENDING_PLOT,
transparent=None,
bbox_inches='tight')
return output_directory
def analyze_bulk(read_file,
read_coords,
fb_file,
num_mismatches=1,
num_n_threshold=3,
num_reads=None):
"""Searches feature barcodes on reads 2 and generates matrix.
Parameters
----------
read_file : str
The path and name of read 2 file.
read2_coords : tuple or list
The positions on read 2 to search.
fb_file : str
The path and name of feature barcoding file.
num_mismatches : int, optional
Maximum levenshtein distance allowed.
num_n_threshoold : int, optional
Maximum Ns allowed for reads.
num_reads ; int, optional
Number of reads to analyze.
Returns
-------
dict
Count and frequency of each feature barcode in the provided fastq file.
"""
with open_by_suffix(file_name=fb_file) as f:
feature_barcodes = {
i.rstrip().split('\t')[-1]: i.rstrip().replace('\t', '_')
for i in f
}
fb_index = create_index(barcodes=feature_barcodes.keys(),
num_mismatches=num_mismatches)
feature_barcode_count = {i: int() for i in feature_barcodes}
logger.info('Number of reference feature barcodes: '
f'{len(feature_barcode_count):,}')
logger.info('Read 2 coordinates to search: [' +
', '.join([str(i) for i in read_coords]) + ')')
logger.info(
f'Feature barcode maximum number of mismatches: {num_mismatches}')
logger.info(
f'Read 2 maximum number of N allowed: {num_n_threshold}')
if num_reads:
logger.info(f'Number of read pairs to analyze: {num_reads:,}')
else:
logger.info('Number of read pairs to analyze: all')
def _get_sequence(read_file):
"""Gets sequences."""
with dnaio.open(file1=read_file,
file2=None,
fileformat='fastq',
mode='r') as f:
for read in f:
yield read.sequence, read.qualities
_reads = islice(_get_sequence(read_file), 0, num_reads)
logger.info('Matching ...')
read_counter = int()
for read_seq, read_qual in _reads:
read_counter += 1
if read_counter % 10_000_000 == 0:
logger.info(f'Reads processed: {read_counter:,}')
if read_seq.count('N') <= num_n_threshold:
x2, y2 = read_coords
fb_queries = query_index(read_seq[x2: y2],
barcode_index=fb_index,
num_mismatches=num_mismatches)
fb_matched = select_query(fb_queries,
read_seq[x2: y2],
read_qual[x2: y2])
if fb_matched:
feature_barcode_count[fb_matched[0]] += 1
feature_barcode_count = {
feature_barcodes[i]: feature_barcode_count[i]
for i in feature_barcode_count
}
logger.info(f'Number of reads processed: {read_counter:,}')
logger.info(
'Number of reads w/ valid feature barcodes: '
f'{sum(feature_barcode_count.values()):,}'
)
return feature_barcode_count
```
#### File: fba/fba/utils.py
```python
import logging
import gzip
import bz2
import subprocess
from pathlib import Path
def open_by_suffix(file_name, mode='r'):
"""Opens file based on suffix."""
# noqa modified from https://stackoverflow.com/questions/18367511/how-do-i-automatically-handle-decompression-when-reading-a-file-in-python
file_name = str(file_name)
if file_name.endswith('gz'):
handle = gzip.open(filename=file_name, mode=mode + 't')
elif file_name.endswith('bz2'):
handle = bz2.open(filename=file_name, mode=mode + 't')
else:
handle = open(file=file_name, mode=mode)
return handle
def open_by_magic(file_name):
"""Opens file based on magic."""
magic_dict = {'\x1f\x8b\x08': (gzip.open, 'rb'),
'\x42\x5a\x68': (bz2.BZ2File, 'r')}
max_len = max(len(x) for x in magic_dict)
with open(file=file_name) as f:
file_start = f.read(max_len)
for magic, (fn, flag) in magic_dict.items():
if file_start.startswith(magic):
return fn(file_name, flag)
return open(file_name, mode='r')
def get_binary_path(binary_name):
"""Gets executable path.
Parameters
----------
binary_name : str
The name of the executable.
Returns
-------
str
The path and name of the executable.
Raises
------
FileNotFoundError
If the executable is not found in the PATH.
"""
binary_path = subprocess.run(
['which', binary_name], stdout=subprocess.PIPE, universal_newlines=True
).stdout.rstrip()
if Path(binary_path).is_file():
return binary_path
else:
raise FileNotFoundError(binary_name, 'not found in PATH\n')
def run_executable(cmd_line, use_shell=False):
"""Runs executable."""
proc = subprocess.Popen(
cmd_line,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=use_shell,
universal_newlines=True
)
try:
outs, errs = proc.communicate(timeout=100)
except subprocess.TimeoutExpired:
# logger.critical(e, exc_info=True)
proc.kill()
outs, errs = proc.communicate()
return outs, errs
def get_logger(logger_name, log_file=False):
"""Creates a custom logger."""
FORMATTER = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logger = logging.getLogger(logger_name)
logger.setLevel(level=logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(logging.Formatter(FORMATTER))
logger.addHandler(console_handler)
if log_file:
file_handler = logging.FileHandler(filename='fba.log')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(FORMATTER))
logger.addHandler(file_handler)
return logger
def parse_bowtie2_version():
"""Parses bowtie2 version."""
cmd = [get_binary_path(binary_name='bowtie2'), '--version']
outs, _ = run_executable(cmd_line=cmd)
return outs.split(' version ')[1].split()[0]
def parse_bwa_version():
"""Parses bwa version."""
cmd = [get_binary_path(binary_name='bwa')]
_, errs = run_executable(cmd_line=cmd)
bwa_version = [i for i in errs.split(
'\n') if i.startswith('Ver')][0].split(' ')[1].split('-')[0]
return bwa_version
def parse_samtools_version():
"""Parses samtools version."""
cmd = [get_binary_path(binary_name='samtools'), '--version']
outs, _ = run_executable(cmd_line=cmd)
return outs.split(' ')[1].split()[0]
def parse_kallisto_version():
"""Parses kallisto version."""
cmd = [get_binary_path(binary_name='kallisto'), 'version']
outs, _ = run_executable(cmd_line=cmd)
return outs.rstrip().split(' ')[-1]
def parse_bustools_version():
"""Parses bustools version."""
cmd = [get_binary_path(binary_name='bustools'), 'version']
outs, _ = run_executable(cmd_line=cmd)
return outs.rstrip().split(' ')[-1]
``` |
{
"source": "jlduhaime/combatTest",
"score": 3
} |
#### File: jlduhaime/combatTest/game.py
```python
import sys
import pygame as pg
from pygame.locals import *
from player import Player
from mob import Mob
import constants as c
class Game:
def __init__(self):
pg.init()
self.is_running = False
self.clock = pg.time.Clock()
self.display = pg.display.set_mode((c.SCREEN_WIDTH, c.SCREEN_HEIGHT))
self.player = Player("Hector")
self.mobs = [Mob() for i in range(5)]
pg.display.set_caption('Combat Sample')
def on_event(self, event):
""" handles incoming events """
if event.type == QUIT:
self.is_running = False
if event.type == KEYDOWN:
if event.key == pg.K_ESCAPE:
self.is_running = False
return
def on_loop(self):
""" handles game loop """
return
def on_render(self):
""" handles changes to screen object """
self.display.fill(c.BLACK)
pg.display.flip()
return
def on_cleanup(self):
""" handles resource cleanup """
return
def run(self):
""" handels game looping method """
self.is_running = True
while self.is_running:
for event in pg.event.get():
self.on_event(event)
self.on_loop()
self.on_render()
self.clock.tick(c.FPS)
self.on_cleanup()
``` |
{
"source": "jldunne/py-witness",
"score": 3
} |
#### File: py-witness/src/witness.py
```python
import argparse
import os
from dotenv import load_dotenv
from web3 import Web3
load_dotenv()
NODE_HTTP = os.getenv('NODE_HTTP')
def construct_access_list(transactions):
block_addresses = {}
transaction_access_list = {}
# EIP-3584 block access list is structured as follows -
# block_access_list = [access_list_entry, ...]
#
# Each access_list_entry consists of the following:
# - address
# - storage keys associated with this address over whole block
# - list of 2-tuples (transaction index, list of storage keys for this tx)
for tx in transactions:
tx_index = tx.transactionIndex
if not hasattr(tx, 'accessList'):
continue
for access_list in tx.accessList:
address = access_list['address']
if address not in block_addresses:
block_addresses[address] = access_list['storageKeys']
else:
extra_slots = [i for i in block_addresses[address]+access_list['storageKeys']]
for elem in extra_slots:
block_addresses[address].append(elem)
if address not in transaction_access_list:
transaction_access_list[address] = [(tx_index, sorted(access_list['storageKeys']))]
else:
transaction_access_list[address].append((tx_index, sorted(access_list['storageKeys'])))
# at this point we have a dictionary of block addresses and storage slots
# and another one of addresses -> (tx indices, per-transaction storage slots)
# let's merge them together
block_access_list = []
for address in block_addresses.keys():
access_list_entry = [address, sorted(block_addresses[address]), sorted(transaction_access_list[address], key=lambda x: x[0])]
block_access_list.append(access_list_entry)
# The block access list needs to be sorted according to the following rules:
# 1. block access list is sorted by address
# 2. storage_keys list is sorted
# 3. transaction tuples are sorted by tx_index
# 2 & 3 are taken care of in construction above
# Sort the whole block_access_list by address
block_access_list = sorted(block_access_list, key=lambda x: x[0])
return block_access_list
def construct_partial_witness(transactions, w3, block_number):
from_addresses = []
to_addresses = []
for tx in transactions:
from_addresses.append(tx['from'])
to_addresses.append(tx.to)
addresses = set(from_addresses).union(set(to_addresses))
addresses = list(addresses)
proofs = []
slots = []
for i in range(0, 256):
slots.append(i)
for adx in addresses:
print("Generating proof for account: "+adx)
proof = w3.eth.get_proof(adx, slots, block_number)
print("Appending proof")
proofs.append(proof)
return proofs
def print_block(args, w3):
# get the block number
block_number = args.block[0]
block = w3.eth.get_block(block_number, full_transactions=True)
if not block.transactions[0].accessList:
return "This block does not contain tx-level access lists - aborting"
block_access_list = construct_access_list(block.transactions)
print(block)
print("Now printing block access list...\n")
print(block_access_list)
print("Constructing partial witness")
partial_witness = construct_partial_witness(block.transactions, w3, block_number)
def main():
# init connection from env
node_url = NODE_HTTP
w3 = Web3(Web3.HTTPProvider(node_url))
if not w3.isConnected():
print('Connection to node failed! Please check your env')
parser = argparse.ArgumentParser(
description="A command line tool to generate access lists and witnesses from blocks")
parser.add_argument("-b", "--block", type=int, nargs=1,
metavar="block-number", default=None,
help="Returns the specified block")
args = parser.parse_args()
if args.block is not None:
print_block(args, w3)
if __name__ == "__main__":
# calling the main function
main()
``` |
{
"source": "jleaniz/dftimewolf",
"score": 2
} |
#### File: lib/processors/turbinia_artifact.py
```python
import os
import tempfile
from typing import Optional, TYPE_CHECKING, Type
from turbinia import TurbiniaException, evidence
from turbinia import config as turbinia_config
from dftimewolf.lib import module
from dftimewolf.lib.containers import containers, interface
from dftimewolf.lib.modules import manager as modules_manager
from dftimewolf.lib.processors.turbinia_base import TurbiniaProcessorBase
if TYPE_CHECKING:
from dftimewolf.lib import state
# pylint: disable=no-member
class TurbiniaArtifactProcessor(TurbiniaProcessorBase,
module.ThreadAwareModule):
"""Processes Exported GRR Artifacts with Turbinia.
Attributes:
directory_path (str): Name of the directory to process.
"""
def __init__(self,
state: "state.DFTimewolfState",
name: Optional[str]=None,
critical: bool=False) -> None:
"""Initializes a Turbinia Artifacts disks processor.
Args:
state (DFTimewolfState): recipe state.
name (Optional[str]): The module's runtime name.
critical (Optional[bool]): True if the module is critical, which causes
the entire recipe to fail if the module encounters an error.
"""
module.ThreadAwareModule.__init__(self, state, name=name, critical=critical)
TurbiniaProcessorBase.__init__(self, self.logger)
self.output_directory = ''
# pylint: disable=arguments-differ
def SetUp(self,
turbinia_config_file: str,
project: str,
turbinia_zone: str,
output_directory: str,
sketch_id: int,
run_all_jobs: bool) -> None:
"""Sets up the object attributes.
Args:
turbinia_config_file (str): Full path to the Turbinia config file to use.
project (str): name of the GCP project containing the disk to process.
turbinia_zone (str): GCP zone in which the Turbinia server is running.
output_directory (str): Name of the directory to process.
sketch_id (int): The Timesketch sketch ID.
run_all_jobs (bool): Whether to run all jobs instead of a faster subset.
"""
self.turbinia_config_file = turbinia_config_file
self.output_directory = output_directory
if not self.output_directory:
self.output_directory = tempfile.mkdtemp(prefix='turbinia-results')
self.logger.success('Turbinia results will be dumped to {0:s}'.format(
self.output_directory))
try:
self.TurbiniaSetUp(project, turbinia_zone, sketch_id, run_all_jobs)
except TurbiniaException as exception:
self.ModuleError(str(exception), critical=True)
return
def Process(self, container: containers.RemoteFSPath) -> None:
"""Process files with Turbinia."""
log_file_path = os.path.join(self._output_path,
'{0:s}_{1:s}-turbinia.log'.format(
container.hostname, container.path.replace('/', '_')))
self.logger.info('Turbinia log file: {0:s}'.format(log_file_path))
self.logger.info(
'Processing remote FS path {0:s} from previous collector'.format(
container.path))
evidence_ = evidence.CompressedDirectory(
compressed_directory=container.path, source_path=container.path)
try:
task_data, _ = self.TurbiniaProcess(evidence_)
except TurbiniaException as exception:
self.ModuleError(str(exception), critical=True)
self.logger.info('Files generated by Turbinia:')
for task in task_data:
for path in task.get('saved_paths') or []:
# Ignore temporary files generated by turbinia
if path.startswith(turbinia_config.TMP_DIR):
continue
# We're only interested in plaso files for the time being.
if path.endswith('.plaso'):
self.logger.success(' {0:s}: {1:s}'.format(task['name'], path))
container = containers.RemoteFSPath(
path=path, hostname=container.hostname)
self.state.StoreContainer(container)
@staticmethod
def GetThreadOnContainerType() -> Type[interface.AttributeContainer]:
return containers.RemoteFSPath
def GetThreadPoolSize(self) -> int:
return self.parallel_count
@staticmethod
def KeepThreadedContainersInState() -> bool:
return False
def PreProcess(self) -> None:
pass
def PostProcess(self) -> None:
pass
modules_manager.ModulesManager.RegisterModule(TurbiniaArtifactProcessor)
```
#### File: lib/collectors/gce_disk_copy.py
```python
import unittest
from googleapiclient.errors import HttpError
import httplib2
import mock
from libcloudforensics.providers.gcp.internal import project as gcp_project
from libcloudforensics.providers.gcp.internal import compute
from libcloudforensics import errors as lcf_errors
from dftimewolf import config
from dftimewolf.lib import errors, state
from dftimewolf.lib.containers import containers
from dftimewolf.lib.collectors import gce_disk_copy
FAKE_PROJECT = gcp_project.GoogleCloudProject(
'test-target-project-name',
'fake_zone')
FAKE_INSTANCE = compute.GoogleComputeInstance(
FAKE_PROJECT.project_id,
'fake_zone',
'fake-instance')
FAKE_DISK = compute.GoogleComputeDisk(
FAKE_PROJECT.project_id,
'fake_zone',
'disk1')
FAKE_DISK_MULTIPLE = [
compute.GoogleComputeDisk(
FAKE_PROJECT.project_id,
'fake_zone',
'disk1'),
compute.GoogleComputeDisk(
FAKE_PROJECT.project_id,
'fake_zone',
'disk2')
]
FAKE_BOOT_DISK = compute.GoogleComputeDisk(
FAKE_PROJECT.project_id,
'fake_zone',
'bootdisk')
FAKE_DISK_COPY = [
compute.GoogleComputeDisk(
FAKE_PROJECT.project_id,
'fake_zone',
'disk1-copy'),
compute.GoogleComputeDisk(
FAKE_PROJECT.project_id,
'fake_zone',
'disk2-copy')
]
class GCEDiskCopyTest(unittest.TestCase):
"""Tests for the GCEDiskCopy collector."""
def testInitialization(self):
"""Tests that the collector can be initialized."""
test_state = state.DFTimewolfState(config.Config)
collector = gce_disk_copy.GCEDiskCopy(test_state)
self.assertIsNotNone(collector)
def testSetUp(self):
"""Tests the SetUp method of the collector."""
test_state = state.DFTimewolfState(config.Config)
# Test setup with single disk and instance
collector = gce_disk_copy.GCEDiskCopy(test_state)
collector.SetUp(
'test-destination-project-name',
'test-source-project-name',
'fake_zone',
remote_instance_names='my-owned-instance',
disk_names='fake-disk',
all_disks=True,
stop_instances=True
)
self.assertEqual(test_state.errors, [])
self.assertEqual(collector.destination_project.project_id,
'test-destination-project-name')
self.assertEqual(collector.source_project.project_id,
'test-source-project-name')
self.assertEqual(collector.remote_instance_names, ['my-owned-instance'])
self.assertEqual(collector.disk_names, ['fake-disk'])
self.assertEqual(collector.all_disks, True)
self.assertEqual(collector.stop_instances, True)
# Test setup with multiple disks and instances
collector = gce_disk_copy.GCEDiskCopy(test_state)
collector.SetUp(
'test-destination-project-name',
'test-source-project-name',
'fake_zone',
'my-owned-instance1,my-owned-instance2',
'fake-disk-1,fake-disk-2',
False,
False
)
self.assertEqual(test_state.errors, [])
self.assertEqual(collector.destination_project.project_id,
'test-destination-project-name')
self.assertEqual(collector.source_project.project_id,
'test-source-project-name')
self.assertEqual(sorted(collector.remote_instance_names), sorted([
'my-owned-instance1', 'my-owned-instance2']))
self.assertEqual(sorted(collector.disk_names), sorted([
'fake-disk-1', 'fake-disk-2']))
self.assertEqual(collector.all_disks, False)
self.assertEqual(collector.stop_instances, False)
# Test setup with no destination project
collector = gce_disk_copy.GCEDiskCopy(test_state)
collector.SetUp(
None,
'test-source-project-name',
'fake_zone',
remote_instance_names='my-owned-instance',
disk_names='fake-disk',
all_disks=True,
stop_instances=True
)
self.assertEqual(test_state.errors, [])
self.assertEqual(collector.destination_project.project_id,
'test-source-project-name')
self.assertEqual(collector.source_project.project_id,
'test-source-project-name')
self.assertEqual(collector.remote_instance_names, ['my-owned-instance'])
self.assertEqual(collector.disk_names, ['fake-disk'])
self.assertEqual(collector.all_disks, True)
self.assertEqual(collector.stop_instances, True)
def testSetUpNothingProvided(self):
"""Tests that SetUp fails if no disks or instances are provided."""
test_state = state.DFTimewolfState(config.Config)
collector = gce_disk_copy.GCEDiskCopy(test_state)
with self.assertRaises(errors.DFTimewolfError) as error:
collector.SetUp(
'test-destination-project-name',
'test-source-project-name',
'fake_zone',
None,
None,
False,
False
)
self.assertEqual(error.exception.message,
'You need to specify at least an instance name or disks to copy')
def testStopWithNoInstance(self):
"""Tests that SetUp fails if stop instance is requested, but no instance
provided.
"""
test_state = state.DFTimewolfState(config.Config)
collector = gce_disk_copy.GCEDiskCopy(test_state)
with self.assertRaises(errors.DFTimewolfError) as error:
collector.SetUp(
'test-destination-project-name',
'test-source-project-name',
'fake_zone',
None,
'disk1',
False,
True
)
self.assertEqual(error.exception.message,
'You need to specify an instance name to stop the instance')
# pylint: disable=line-too-long,invalid-name
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleComputeInstance.GetBootDisk')
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleCloudCompute.GetDisk')
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleComputeInstance.ListDisks')
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleCloudCompute.GetInstance')
# We're manually calling protected functions
# pylint: disable=protected-access
def testPreProcess(self,
mock_get_instance,
mock_list_disks,
mock_get_disk,
mock_GetBootDisk):
"""Tests the _FindDisksToCopy function with different SetUp() calls."""
test_state = state.DFTimewolfState(config.Config)
mock_list_disks.return_value = {
'bootdisk': FAKE_BOOT_DISK,
'disk1': FAKE_DISK
}
mock_get_disk.return_value = FAKE_DISK
mock_get_instance.return_value = FAKE_INSTANCE
mock_GetBootDisk.return_value = FAKE_BOOT_DISK
# Nothing is specified, GoogleCloudCollector should collect the instance's
# boot disk
collector = gce_disk_copy.GCEDiskCopy(test_state)
collector.SetUp(
'test-analysis-project-name',
'test-target-project-name',
'fake_zone',
'my-owned-instance',
None,
False,
False
)
collector.PreProcess()
disks = test_state.GetContainers(containers.GCEDisk)
self.assertEqual(len(disks), 1)
self.assertEqual(disks[0].name, 'bootdisk')
mock_GetBootDisk.assert_called_once()
# Specifying all_disks should return all disks for the instance
# (see mock_list_disks return value)
test_state.GetContainers(containers.GCEDisk, True) # Clear containers first
collector.SetUp(
'test-analysis-project-name',
'test-target-project-name',
'fake_zone',
'my-owned-instance',
None,
True,
False
)
collector.PreProcess()
disks = test_state.GetContainers(containers.GCEDisk)
self.assertEqual(len(disks), 2)
self.assertEqual(disks[0].name, 'bootdisk')
self.assertEqual(disks[1].name, 'disk1')
# Specifying a csv list of disks should have those included also
collector = gce_disk_copy.GCEDiskCopy(test_state)
test_state.GetContainers(containers.GCEDisk, True) # Clear containers first
collector.SetUp(
'test-analysis-project-name',
'test-target-project-name',
'fake_zone',
'my-owned-instance',
'another_disk_1,another_disk_2',
True,
False
)
collector.PreProcess()
disks = test_state.GetContainers(containers.GCEDisk)
self.assertEqual(len(disks), 4)
self.assertEqual(disks[0].name, 'another_disk_1')
self.assertEqual(disks[1].name, 'another_disk_2')
self.assertEqual(disks[2].name, 'bootdisk')
self.assertEqual(disks[3].name, 'disk1')
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleCloudCompute.GetInstance')
def testInstanceNotFound(self, mock_GetInstance):
"""Test that an error is thrown when the instance isn't found."""
mock_GetInstance.side_effect = lcf_errors.ResourceNotFoundError('message',
'name')
test_state = state.DFTimewolfState(config.Config)
collector = gce_disk_copy.GCEDiskCopy(test_state)
collector.SetUp(
'test-analysis-project-name',
'test-target-project-name',
'fake_zone',
'nonexistent',
None,
False,
False
)
with self.assertRaises(errors.DFTimewolfError) as error:
collector.PreProcess()
self.assertEqual(error.exception.message,
'Instance "nonexistent" in test-target-project-name not found or '
'insufficient permissions')
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleCloudCompute.GetInstance')
def testHTTPErrors(self, mock_GetInstance):
"""Tests the 403 checked for in PreProcess."""
test_state = state.DFTimewolfState(config.Config)
# 403
mock_GetInstance.side_effect = HttpError(httplib2.Response({
'status': 403,
'reason': 'The caller does not have permission'
}), b'')
collector = gce_disk_copy.GCEDiskCopy(test_state)
collector.SetUp(
'test-analysis-project-name',
'test-target-project-name',
'fake_zone',
'nonexistent',
None,
False,
False
)
with self.assertRaises(errors.DFTimewolfError) as error:
collector.PreProcess()
self.assertEqual(error.exception.message,
'403 response. Do you have appropriate permissions on the project?')
# Other (500)
mock_GetInstance.side_effect = HttpError(httplib2.Response({
'status': 500,
'reason': 'Internal Server Error'
}), b'')
collector = gce_disk_copy.GCEDiskCopy(test_state)
collector.SetUp(
'test-analysis-project-name',
'test-target-project-name',
'fake_zone',
'nonexistent',
None,
False,
False
)
with self.assertRaises(errors.DFTimewolfError) as error:
collector.PreProcess()
self.assertEqual(error.exception.message,
'<HttpError 500 "Ok">')
# pylint: disable=line-too-long
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleCloudCompute.GetInstance')
@mock.patch('libcloudforensics.providers.gcp.forensics.CreateDiskCopy')
@mock.patch('dftimewolf.lib.collectors.gce_disk_copy.GCEDiskCopy._GetDisksFromInstance')
@mock.patch('libcloudforensics.providers.gcp.internal.compute.GoogleComputeInstance.ListDisks')
def testProcess(self,
mock_list_disks,
mock_getDisksFromInstance,
mock_CreateDiskCopy,
mock_GetInstance):
"""Tests the collector's Process() function."""
mock_getDisksFromInstance.return_value = FAKE_DISK_MULTIPLE
mock_CreateDiskCopy.side_effect = FAKE_DISK_COPY
mock_GetInstance.return_value = FAKE_INSTANCE
mock_list_disks.return_value = {
'bootdisk': FAKE_BOOT_DISK,
'disk1': FAKE_DISK
}
test_state = state.DFTimewolfState(config.Config)
collector = gce_disk_copy.GCEDiskCopy(test_state)
collector.SetUp(
'test-analysis-project-name',
'test-target-project-name',
'fake_zone',
'my-owned-instance',
None,
True,
True
)
FAKE_INSTANCE.Stop = mock.MagicMock()
collector.PreProcess()
conts = test_state.GetContainers(collector.GetThreadOnContainerType())
for d in conts:
collector.Process(d)
mock_CreateDiskCopy.assert_called_with(
'test-target-project-name',
'test-analysis-project-name',
FAKE_INSTANCE.zone,
disk_name=d.name)
collector.PostProcess()
FAKE_INSTANCE.Stop.assert_called_once()
out_disks = test_state.GetContainers(containers.GCEDiskEvidence)
out_disk_names = sorted([d.name for d in out_disks])
expected_disk_names = ['disk1-copy', 'disk2-copy']
self.assertEqual(out_disk_names, expected_disk_names)
for d in out_disks:
self.assertEqual(d.project, 'test-analysis-project-name')
# Do it again, but we don't want to stop the instance this time.
# First, clear the containers
test_state.GetContainers(containers.GCEDisk, True)
test_state.GetContainers(containers.GCEDiskEvidence, True)
mock_CreateDiskCopy.side_effect = FAKE_DISK_COPY
collector = gce_disk_copy.GCEDiskCopy(test_state)
collector.SetUp(
'test-analysis-project-name',
'test-target-project-name',
'fake_zone',
'my-owned-instance',
None,
True,
False,
)
FAKE_INSTANCE.Stop = mock.MagicMock()
collector.PreProcess()
conts = test_state.GetContainers(collector.GetThreadOnContainerType())
for d in conts:
collector.Process(d)
mock_CreateDiskCopy.assert_called_with(
'test-target-project-name',
'test-analysis-project-name',
FAKE_INSTANCE.zone,
disk_name=d.name)
collector.PostProcess()
FAKE_INSTANCE.Stop.assert_not_called()
out_disks = test_state.GetContainers(containers.GCEDiskEvidence)
out_disk_names = sorted([d.name for d in out_disks])
expected_disk_names = ['disk1-copy', 'disk2-copy']
self.assertEqual(out_disk_names, expected_disk_names)
for d in out_disks:
self.assertEqual(d.project, 'test-analysis-project-name')
@mock.patch('libcloudforensics.providers.gcp.forensics.CreateDiskCopy')
def testProcessDiskCopyErrors(self, mock_CreateDiskCopy):
"""Tests that Process errors correctly in some scenarios."""
test_state = state.DFTimewolfState(config.Config)
# Fail if the disk cannot be found.
mock_CreateDiskCopy.side_effect = lcf_errors.ResourceNotFoundError(
'Could not find disk "nonexistent": Disk nonexistent was not found in '
'project test-source-project-name',
'name')
collector = gce_disk_copy.GCEDiskCopy(test_state)
collector.SetUp(
'test-destination-project-name',
'test-source-project-name',
'fake_zone',
None,
'nonexistent',
False,
False
)
collector.PreProcess()
conts = test_state.GetContainers(collector.GetThreadOnContainerType())
for d in conts:
collector.Process(d)
with self.assertRaises(errors.DFTimewolfError) as error:
collector.PostProcess()
self.assertEqual(error.exception.message,
'No successful disk copy operations completed.')
# Fail if the disk cannot be created
mock_CreateDiskCopy.side_effect = lcf_errors.ResourceCreationError(
'Could not create disk. Permission denied.',
'name')
collector = gce_disk_copy.GCEDiskCopy(test_state)
collector.SetUp(
'test-destination-project-name',
'test-source-project-name',
'fake_zone',
None,
'nonexistent',
False,
False
)
collector.PreProcess()
conts = test_state.GetContainers(collector.GetThreadOnContainerType())
with self.assertRaises(errors.DFTimewolfError) as error:
for d in conts:
collector.Process(d)
self.assertEqual(error.exception.message,
'Could not create disk. Permission denied.')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jleaniz/dtformats",
"score": 3
} |
#### File: dtformats/dtformats/cpio.py
```python
import os
from dtformats import data_format
from dtformats import data_range
from dtformats import errors
class CPIOArchiveFileEntry(data_range.DataRange):
"""CPIO archive file entry.
Attributes:
data_offset (int): offset of the data.
data_size (int): size of the data.
group_identifier (int): group identifier (GID).
inode_number (int): inode number.
mode (int): file access mode.
modification_time (int): modification time, in number of seconds since
January 1, 1970 00:00:00.
path (str): path.
size (int): size of the file entry data.
user_identifier (int): user identifier (UID).
"""
def __init__(self, file_object, data_offset=0, data_size=0):
"""Initializes a CPIO archive file entry.
Args:
file_object (file): file-like object of the CPIO archive file.
data_offset (Optional[int]): offset of the data.
data_size (Optional[int]): size of the data.
"""
super(CPIOArchiveFileEntry, self).__init__(
file_object, data_offset=data_offset, data_size=data_size)
self.group_identifier = None
self.inode_number = None
self.mode = None
self.modification_time = None
self.path = None
self.size = None
self.user_identifier = None
class CPIOArchiveFile(data_format.BinaryDataFile):
"""CPIO archive file.
Attributes:
file_format (str): CPIO file format.
size (int): size of the CPIO file data.
"""
# Using a class constant significantly speeds up the time required to load
# the dtFabric definition file.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('cpio.yaml')
# TODO: move path into structure.
_CPIO_SIGNATURE_BINARY_BIG_ENDIAN = b'\x71\xc7'
_CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN = b'\xc7\x71'
_CPIO_SIGNATURE_PORTABLE_ASCII = b'070707'
_CPIO_SIGNATURE_NEW_ASCII = b'070701'
_CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM = b'070702'
_CPIO_ATTRIBUTE_NAMES_ODC = (
'device_number', 'inode_number', 'mode', 'user_identifier',
'group_identifier', 'number_of_links', 'special_device_number',
'modification_time', 'path_size', 'file_size')
_CPIO_ATTRIBUTE_NAMES_CRC = (
'inode_number', 'mode', 'user_identifier', 'group_identifier',
'number_of_links', 'modification_time', 'path_size',
'file_size', 'device_major_number', 'device_minor_number',
'special_device_major_number', 'special_device_minor_number',
'checksum')
def __init__(self, debug=False, output_writer=None):
"""Initializes a CPIO archive file.
Args:
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(CPIOArchiveFile, self).__init__(
debug=debug, output_writer=output_writer)
self._file_entries = None
self.file_format = None
self.size = None
def _DebugPrintFileEntry(self, file_entry):
"""Prints file entry debug information.
Args:
file_entry (cpio_new_file_entry): file entry.
"""
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
value_string = '0x{0:04x}'.format(file_entry.signature)
else:
value_string = '{0!s}'.format(file_entry.signature)
self._DebugPrintValue('Signature', value_string)
if self.file_format not in ('crc', 'newc'):
value_string = '{0:d}'.format(file_entry.device_number)
self._DebugPrintValue('Device number', value_string)
value_string = '{0:d}'.format(file_entry.inode_number)
self._DebugPrintValue('Inode number', value_string)
value_string = '{0:o}'.format(file_entry.mode)
self._DebugPrintValue('Mode', value_string)
value_string = '{0:d}'.format(file_entry.user_identifier)
self._DebugPrintValue('User identifier (UID)', value_string)
value_string = '{0:d}'.format(file_entry.group_identifier)
self._DebugPrintValue('Group identifier (GID)', value_string)
value_string = '{0:d}'.format(file_entry.number_of_links)
self._DebugPrintValue('Number of links', value_string)
if self.file_format not in ('crc', 'newc'):
value_string = '{0:d}'.format(file_entry.special_device_number)
self._DebugPrintValue('Special device number', value_string)
value_string = '{0:d}'.format(file_entry.modification_time)
self._DebugPrintValue('Modification time', value_string)
if self.file_format not in ('crc', 'newc'):
value_string = '{0:d}'.format(file_entry.path_size)
self._DebugPrintValue('Path size', value_string)
value_string = '{0:d}'.format(file_entry.file_size)
self._DebugPrintValue('File size', value_string)
if self.file_format in ('crc', 'newc'):
value_string = '{0:d}'.format(file_entry.device_major_number)
self._DebugPrintValue('Device major number', value_string)
value_string = '{0:d}'.format(file_entry.device_minor_number)
self._DebugPrintValue('Device minor number', value_string)
value_string = '{0:d}'.format(file_entry.special_device_major_number)
self._DebugPrintValue('Special device major number', value_string)
value_string = '{0:d}'.format(file_entry.special_device_minor_number)
self._DebugPrintValue('Special device minor number', value_string)
value_string = '{0:d}'.format(file_entry.path_size)
self._DebugPrintValue('Path size', value_string)
value_string = '0x{0:08x}'.format(file_entry.checksum)
self._DebugPrintValue('Checksum', value_string)
def _ReadFileEntry(self, file_object, file_offset):
"""Reads a file entry.
Args:
file_object (file): file-like object.
file_offset (int): offset of the data relative to the start of
the file-like object.
Returns:
CPIOArchiveFileEntry: a file entry.
Raises:
ParseError: if the file entry cannot be read.
"""
if self.file_format == 'bin-big-endian':
data_type_map = self._GetDataTypeMap('cpio_binary_big_endian_file_entry')
elif self.file_format == 'bin-little-endian':
data_type_map = self._GetDataTypeMap(
'cpio_binary_little_endian_file_entry')
elif self.file_format == 'odc':
data_type_map = self._GetDataTypeMap('cpio_portable_ascii_file_entry')
elif self.file_format in ('crc', 'newc'):
data_type_map = self._GetDataTypeMap('cpio_new_ascii_file_entry')
file_entry, file_entry_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'file entry')
file_offset += file_entry_data_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
file_entry.modification_time = (
(file_entry.modification_time.upper << 16) |
file_entry.modification_time.lower)
file_entry.file_size = (
(file_entry.file_size.upper << 16) | file_entry.file_size.lower)
if self.file_format == 'odc':
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_ODC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 8)
except ValueError:
raise errors.ParseError(
'Unable to convert attribute: {0:s} into an integer'.format(
attribute_name))
value = setattr(file_entry, attribute_name, value)
elif self.file_format in ('crc', 'newc'):
for attribute_name in self._CPIO_ATTRIBUTE_NAMES_CRC:
value = getattr(file_entry, attribute_name, None)
try:
value = int(value, 16)
except ValueError:
raise errors.ParseError(
'Unable to convert attribute: {0:s} into an integer'.format(
attribute_name))
value = setattr(file_entry, attribute_name, value)
if self._debug:
self._DebugPrintFileEntry(file_entry)
path_data = file_object.read(file_entry.path_size)
if self._debug:
self._DebugPrintData('Path data', path_data)
file_offset += file_entry.path_size
# TODO: should this be ASCII?
path = path_data.decode('ascii')
path, _, _ = path.partition('\x00')
if self._debug:
self._DebugPrintValue('Path', path)
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size
elif self.file_format == 'odc':
padding_size = 0
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size
if self._debug:
padding_data = file_object.read(padding_size)
self._DebugPrintData('Path alignment padding', padding_data)
file_offset += padding_size
archive_file_entry = CPIOArchiveFileEntry(file_object)
archive_file_entry.data_offset = file_offset
archive_file_entry.data_size = file_entry.file_size
archive_file_entry.group_identifier = file_entry.group_identifier
archive_file_entry.inode_number = file_entry.inode_number
archive_file_entry.modification_time = file_entry.modification_time
archive_file_entry.path = path
archive_file_entry.mode = file_entry.mode
archive_file_entry.size = (
file_entry_data_size + file_entry.path_size + padding_size +
file_entry.file_size)
archive_file_entry.user_identifier = file_entry.user_identifier
file_offset += file_entry.file_size
if self.file_format in ('bin-big-endian', 'bin-little-endian'):
padding_size = file_offset % 2
if padding_size > 0:
padding_size = 2 - padding_size
elif self.file_format == 'odc':
padding_size = 0
elif self.file_format in ('crc', 'newc'):
padding_size = file_offset % 4
if padding_size > 0:
padding_size = 4 - padding_size
if padding_size > 0:
if self._debug:
file_object.seek(file_offset, os.SEEK_SET)
padding_data = file_object.read(padding_size)
self._DebugPrintData('File data alignment padding', padding_data)
archive_file_entry.size += padding_size
if self._debug:
self._DebugPrintText('\n')
return archive_file_entry
def _ReadFileEntries(self, file_object):
"""Reads the file entries from the cpio archive.
Args:
file_object (file): file-like object.
"""
self._file_entries = {}
file_offset = 0
while file_offset < self._file_size or self._file_size == 0:
file_entry = self._ReadFileEntry(file_object, file_offset)
file_offset += file_entry.size
if file_entry.path == 'TRAILER!!!':
break
if file_entry.path in self._file_entries:
# TODO: alert on file entries with duplicate paths?
continue
self._file_entries[file_entry.path] = file_entry
self.size = file_offset
def Close(self):
"""Closes the CPIO archive file."""
super(CPIOArchiveFile, self).Close()
self._file_entries = None
def FileEntryExistsByPath(self, path):
"""Determines if file entry for a specific path exists.
Args:
path (str): path of the file entry.
Returns:
bool: True if the file entry exists.
"""
if not self._file_entries:
return False
return path in self._file_entries
def GetFileEntries(self, path_prefix=''):
"""Retrieves the file entries.
Args:
path_prefix (Optional[str]): path prefix.
Yields:
CPIOArchiveFileEntry: CPIO archive file entry.
"""
if self._file_entries:
for path, file_entry in self._file_entries.items():
if path.startswith(path_prefix):
yield file_entry
def GetFileEntryByPath(self, path):
"""Retrieves a file entry for a specific path.
Args:
path (str): path of the file entry.
Returns:
CPIOArchiveFileEntry: CPIO archive file entry or None.
"""
if not self._file_entries:
return False
return self._file_entries.get(path, None)
def ReadFileObject(self, file_object):
"""Reads binary data from a file-like object.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the format signature is not supported.
"""
file_object.seek(0, os.SEEK_SET)
signature_data = file_object.read(6)
self.file_format = None
if len(signature_data) > 2:
if signature_data[:2] == self._CPIO_SIGNATURE_BINARY_BIG_ENDIAN:
self.file_format = 'bin-big-endian'
elif signature_data[:2] == self._CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN:
self.file_format = 'bin-little-endian'
elif signature_data == self._CPIO_SIGNATURE_PORTABLE_ASCII:
self.file_format = 'odc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII:
self.file_format = 'newc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM:
self.file_format = 'crc'
if self.file_format is None:
raise errors.ParseError('Unsupported CPIO format.')
self._ReadFileEntries(file_object)
# TODO: print trailing data
```
#### File: dtformats/dtformats/rp_log.py
```python
from dtformats import data_format
class RestorePointLogFile(data_format.BinaryDataFile):
"""Windows Restore Point rp.log file."""
# Using a class constant significantly speeds up the time required to load
# the dtFabric definition file.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('rp_log.yaml')
# TODO: implement an item based lookup.
_EVENT_TYPES = {
0x00000064: 'BEGIN_SYSTEM_CHANGE',
0x00000065: 'END_SYSTEM_CHANGE',
0x00000066: 'BEGIN_NESTED_SYSTEM_CHANGE',
0x00000067: 'END_NESTED_SYSTEM_CHANGE',
}
# TODO: implement an item based lookup.
_RESTORE_POINT_TYPES = {
0x00000000: 'APPLICATION_INSTALL',
0x00000001: 'APPLICATION_UNINSTALL',
0x0000000a: 'DEVICE_DRIVER_INSTALL',
0x0000000c: 'MODIFY_SETTINGS',
0x0000000d: 'CANCELLED_OPERATION',
}
def __init__(self, debug=False, output_writer=None):
"""Initializes a Windows Restore Point rp.log file.
Args:
debug (Optional[bool]): True if debug information should be written.
output_writer (Optional[OutputWriter]): output writer.
"""
super(RestorePointLogFile, self).__init__(
debug=debug, output_writer=output_writer)
def _DebugPrintFileFooter(self, file_footer):
"""Prints file footer debug information.
Args:
file_footer (rp_log_file_footer): file footer.
"""
self._DebugPrintFiletimeValue('Creation time', file_footer.creation_time)
self._DebugPrintText('\n')
def _DebugPrintFileHeader(self, file_header):
"""Prints file header debug information.
Args:
file_header (rp_log_file_header): file header.
"""
event_type_string = self._EVENT_TYPES.get(
file_header.event_type, 'UNKNOWN')
value_string = '0x{0:08x} ({1:s})'.format(
file_header.event_type, event_type_string)
self._DebugPrintValue('Event type', value_string)
restore_point_type_string = self._RESTORE_POINT_TYPES.get(
file_header.restore_point_type, 'UNKNOWN')
value_string = '0x{0:08x} ({1:s})'.format(
file_header.restore_point_type, restore_point_type_string)
self._DebugPrintValue('Restore point type', value_string)
value_string = '0x{0:08x}'.format(file_header.sequence_number)
self._DebugPrintValue('Sequence number', value_string)
self._DebugPrintValue('Description', file_header.description)
self._DebugPrintText('\n')
def _ReadFileFooter(self, file_object):
"""Reads the file footer.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file footer cannot be read.
"""
file_offset = self._file_size - 8
data_type_map = self._GetDataTypeMap('rp_log_file_footer')
file_footer, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'file footer')
if self._debug:
self._DebugPrintFileFooter(file_footer)
def _ReadFileHeader(self, file_object):
"""Reads the file header.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file header cannot be read.
"""
data_type_map = self._GetDataTypeMap('rp_log_file_header')
file_header, _ = self._ReadStructureFromFileObject(
file_object, 0, data_type_map, 'file header')
if self._debug:
self._DebugPrintFileHeader(file_header)
def ReadFileObject(self, file_object):
"""Reads a Windows Restore Point rp.log file-like object.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file cannot be read.
"""
self._ReadFileHeader(file_object)
data_size = (self._file_size - 8) - file_object.tell()
data = file_object.read(data_size)
self._DebugPrintData('Unknown1', data)
self._ReadFileFooter(file_object)
```
#### File: dtformats/dtformats/usn_journal.py
```python
import os
from dtformats import data_format
class USNRecords(data_format.BinaryDataFile):
"""USN change journal records."""
# Using a class constant significantly speeds up the time required to load
# the dtFabric definition file.
_FABRIC = data_format.BinaryDataFile.ReadDefinitionFile('usn_journal.yaml')
_DEBUG_INFO_RECORD_V2 = [
('size', 'Size', '_FormatIntegerAsDecimal'),
('major_version', 'Major version', '_FormatIntegerAsDecimal'),
('minor_version', 'Manor version', '_FormatIntegerAsDecimal'),
('file_reference', 'File reference', '_FormatIntegerAsHexadecimal8'),
('parent_file_reference', 'Parent file reference',
'_FormatIntegerAsHexadecimal8'),
('timestamp', 'Timestamp', '_FormatIntegerAsFiletime'),
('update_reason_flags', 'Update reason flags',
'_FormatIntegerAsHexadecimal8'),
('update_source_flags', 'Update source flags',
'_FormatIntegerAsHexadecimal8'),
('security_descriptor_entry', 'Security descriptor entry',
'_FormatIntegerAsDecimal'),
('file_attribute_flags', 'File attribute flags',
'_FormatIntegerAsHexadecimal8'),
('name_size', 'Name size', '_FormatIntegerAsDecimal'),
('name_offset', 'Name offset', '_FormatIntegerAsDecimal'),
('name', 'Name', '_FormatString')]
_EMPTY_USN_RECORD_HEADER = bytes([0] * 60)
def _ReadRecordV2(self, file_object):
"""Reads a version 2 USN record.
Args:
file_object (file): file-like object.
Returns:
tuple[usn_record_v2, int]: USN record and number of bytes read.
Raises:
ParseError: if the record cannot be read.
"""
file_offset = file_object.tell()
data_type_map = self._GetDataTypeMap('usn_record_v2')
usn_record, data_size = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map, 'USN record (version 2)')
if self._debug:
self._DebugPrintStructureObject(usn_record, self._DEBUG_INFO_RECORD_V2)
return usn_record, data_size
def ReadFileObject(self, file_object):
"""Reads a file-like object containing USN change journal records.
Args:
file_object (file): file-like object.
Raises:
ParseError: if the file cannot be read.
"""
self._file_object = file_object
def ReadRecords(self):
"""Reads USN change journal records.
Yields:
usn_record_v2: USN record.
Raises:
ParseError: if a record cannot be read.
"""
self._file_object.seek(0, os.SEEK_SET)
file_offset = 0
while file_offset < self._file_size:
block_size = 4096
if block_size > self._file_size:
block_size = self._file_size
while block_size > 60:
usn_record_header = self._file_object.read(60)
if usn_record_header == self._EMPTY_USN_RECORD_HEADER:
break
self._file_object.seek(-60, os.SEEK_CUR)
usn_record, data_size = self._ReadRecordV2(self._file_object)
yield usn_record
file_offset += data_size
block_size -= data_size
file_offset += block_size
```
#### File: dtformats/scripts/jump_list.py
```python
import argparse
import logging
import sys
import pyolecf
from dtformats import jump_list
from dtformats import output_writers
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Extracts information from Windows Jump List files.'))
argument_parser.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False,
help='enable debug output.')
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='PATH',
default=None, help='path of the Windows Jump List file.')
options = argument_parser.parse_args()
if not options.source:
print('Source file missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = output_writers.StdoutWriter()
try:
output_writer.Open()
except IOError as exception:
print('Unable to open output writer with error: {0!s}'.format(exception))
print('')
return False
if pyolecf.check_file_signature(options.source):
jump_list_file = jump_list.AutomaticDestinationsFile(
debug=options.debug, output_writer=output_writer)
else:
jump_list_file = jump_list.CustomDestinationsFile(
debug=options.debug, output_writer=output_writer)
jump_list_file.Open(options.source)
print('Windows Jump List information:')
print('Number of entries:\t\t{0:d}'.format(len(jump_list_file.entries)))
print('Number of recovered entries:\t{0:d}'.format(
len(jump_list_file.recovered_entries)))
print('')
for lnk_file_entry in jump_list_file.entries:
print('LNK file entry: {0:s}'.format(lnk_file_entry.identifier))
for shell_item in lnk_file_entry.GetShellItems():
print('Shell item: 0x{0:02x}'.format(shell_item.class_type))
print('')
jump_list_file.Close()
output_writer.Close()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
```
#### File: dtformats/scripts/recycle_bin.py
```python
import argparse
import logging
import sys
from dfdatetime import filetime as dfdatetime_filetime
from dtformats import output_writers
from dtformats import recycle_bin
def Main():
"""The main program function.
Returns:
bool: True if successful or False if not.
"""
argument_parser = argparse.ArgumentParser(description=(
'Extracts information from Windows Recycle.Bin metadata ($I) files.'))
argument_parser.add_argument(
'-d', '--debug', dest='debug', action='store_true', default=False,
help='enable debug output.')
argument_parser.add_argument(
'source', nargs='?', action='store', metavar='PATH',
default=None, help='path of the Recycle.Bin metadata ($I) file.')
options = argument_parser.parse_args()
if not options.source:
print('Source file missing.')
print('')
argument_parser.print_help()
print('')
return False
logging.basicConfig(
level=logging.INFO, format='[%(levelname)s] %(message)s')
output_writer = output_writers.StdoutWriter()
try:
output_writer.Open()
except IOError as exception:
print('Unable to open output writer with error: {0!s}'.format(exception))
print('')
return False
metadata_file = recycle_bin.RecycleBinMetadataFile(
debug=options.debug, output_writer=output_writer)
metadata_file.Open(options.source)
print('Recycle.Bin metadata ($I) file information:')
print('\tFormat version\t\t: {0:d}'.format(metadata_file.format_version))
if metadata_file.deletion_time == 0:
date_time_string = 'Not set'
elif metadata_file.deletion_time == 0x7fffffffffffffff:
date_time_string = 'Never'
else:
date_time = dfdatetime_filetime.Filetime(
timestamp=metadata_file.deletion_time)
date_time_string = date_time.CopyToDateTimeString()
if date_time_string:
date_time_string = '{0:s} UTC'.format(date_time_string)
else:
date_time_string = '0x{08:x}'.format(metadata_file.deletion_time)
print('\tDeletion time\t\t: {0:s}'.format(date_time_string))
print('\tOriginal filename\t: {0:s}'.format(metadata_file.original_filename))
print('\tOriginal file size\t: {0:d}'.format(
metadata_file.original_file_size))
print('')
metadata_file.Close()
output_writer.Close()
return True
if __name__ == '__main__':
if not Main():
sys.exit(1)
else:
sys.exit(0)
```
#### File: dtformats/tests/prefetch.py
```python
import unittest
from dtformats import prefetch
from tests import test_lib
class PrefetchTest(test_lib.BaseTestCase):
"""Prefetch function tests."""
def testCalculatePrefetchHashXP(self):
"""Tests the CalculatePrefetchHashXP function."""
# Path from Windows XP CMD.EXE-087B4001.pf
path = '\\DEVICE\\HARDDISKVOLUME1\\WINDOWS\\SYSTEM32\\CMD.EXE'
hash_value = prefetch.CalculatePrefetchHashXP(path)
self.assertEqual(hash_value, 0x087b4001)
def testCalculatePrefetchHashVista(self):
"""Tests the CalculatePrefetchHashVista function."""
# Path from Windows Vista NETCFG.EXE-F61A0ADB.pf
path = '\\DEVICE\\HARDDISKVOLUME2\\WINDOWS\\SYSTEM32\\NETCFG.EXE'
hash_value = prefetch.CalculatePrefetchHashVista(path)
self.assertEqual(hash_value, 0xf61a0adb)
# Path from Windows 10 NOTEPAD.EXE-D8414F97.pf
path = '\\VOLUME{01d08edc0cbccaad-3e0d2d25}\\WINDOWS\\SYSTEM32\\NOTEPAD.EXE'
path = '\\DEVICE\\HARDDISKVOLUME2\\WINDOWS\\SYSTEM32\\NOTEPAD.EXE'
hash_value = prefetch.CalculatePrefetchHashVista(path)
self.assertEqual(hash_value, 0xd8414f97)
def testCalculatePrefetchHash2008(self):
"""Tests the CalculatePrefetchHash2008 function."""
# Path from Windows 7 NETCFG.EXE-F61A0ADB.pf
path = '\\DEVICE\\HARDDISKVOLUME2\\WINDOWS\\SYSTEM32\\NETCFG.EXE'
hash_value = prefetch.CalculatePrefetchHash2008(path)
self.assertEqual(hash_value, 0xf61a0adb)
# Path from Windows 8.1 NGEN.EXE-AE594A6B.pf
path = (
'\\DEVICE\\HARDDISKVOLUME2\\WINDOWS\\MICROSOFT.NET\\FRAMEWORK64'
'\\V4.0.30319\\NGEN.EXE')
hash_value = prefetch.CalculatePrefetchHash2008(path)
self.assertEqual(hash_value, 0xae594a6b)
if __name__ == '__main__':
unittest.main()
```
#### File: dtformats/tests/rp_log.py
```python
import unittest
from dtformats import rp_log
from tests import test_lib
class RestorePointLogFileTest(test_lib.BaseTestCase):
"""Windows Restore Point rp.log file tests."""
# pylint: disable=protected-access
def testDebugPrintFileFooter(self):
"""Tests the _DebugPrintFileFooter function."""
output_writer = test_lib.TestOutputWriter()
test_file = rp_log.RestorePointLogFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('rp_log_file_footer')
file_footer = data_type_map.CreateStructureValues(
creation_time=1)
test_file._DebugPrintFileFooter(file_footer)
def testDebugPrintFileHeader(self):
"""Tests the _DebugPrintFileHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = rp_log.RestorePointLogFile(output_writer=output_writer)
data_type_map = test_file._GetDataTypeMap('rp_log_file_header')
file_header = data_type_map.CreateStructureValues(
description='Description'.encode('utf-16-le'),
event_type=1,
restore_point_type=2,
sequence_number=3)
test_file._DebugPrintFileHeader(file_header)
def testReadFileFooter(self):
"""Tests the _ReadFileFooter function."""
output_writer = test_lib.TestOutputWriter()
test_file = rp_log.RestorePointLogFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['rp.log'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
test_file._file_size = 536
test_file._ReadFileFooter(file_object)
def testReadFileHeader(self):
"""Tests the _ReadFileHeader function."""
output_writer = test_lib.TestOutputWriter()
test_file = rp_log.RestorePointLogFile(output_writer=output_writer)
test_file_path = self._GetTestFilePath(['rp.log'])
self._SkipIfPathNotExists(test_file_path)
with open(test_file_path, 'rb') as file_object:
test_file._ReadFileHeader(file_object)
def testReadFileObject(self):
"""Tests the ReadFileObject function."""
output_writer = test_lib.TestOutputWriter()
test_file = rp_log.RestorePointLogFile(
debug=True, output_writer=output_writer)
test_file_path = self._GetTestFilePath(['rp.log'])
self._SkipIfPathNotExists(test_file_path)
test_file.Open(test_file_path)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jleaniz/turbinia",
"score": 2
} |
#### File: workers/analysis/loki.py
```python
import csv
import os
from turbinia import TurbiniaException
from turbinia.evidence import EvidenceState as state
from turbinia.evidence import ReportText
from turbinia.lib import text_formatter as fmt
from turbinia.workers import Priority
from turbinia.workers import TurbiniaTask
class LokiAnalysisTask(TurbiniaTask):
"""Task to use Loki to analyse files."""
REQUIRED_STATES = [
state.ATTACHED, state.MOUNTED, state.CONTAINER_MOUNTED, state.DECOMPRESSED
]
def run(self, evidence, result):
"""Run the Loki worker.
Args:
evidence (Evidence object): The evidence to process
result (TurbiniaTaskResult): The object to place task results into.
Returns:
TurbiniaTaskResult object.
"""
# Where to store the resulting output file.
output_file_name = 'loki_analysis.txt'
output_file_path = os.path.join(self.output_dir, output_file_name)
# What type of evidence we should output.
output_evidence = ReportText(source_path=output_file_path)
try:
(report, priority, summary) = self.runLoki(result, evidence)
except TurbiniaException as e:
result.close(
self, success=False, status='Unable to run Loki: {0:s}'.format(
str(e)))
return result
output_evidence.text_data = report
result.report_priority = priority
result.report_data = report
# Write the report to the output file.
with open(output_file_path, 'wb') as fh:
fh.write(output_evidence.text_data.encode('utf-8'))
# Add the resulting evidence to the result object.
result.add_evidence(output_evidence, evidence.config)
result.close(self, success=True, status=summary)
return result
def runLoki(self, result, evidence):
log_file = os.path.join(self.output_dir, 'loki.log')
stdout_file = os.path.join(self.output_dir, 'loki_stdout.log')
stderr_file = os.path.join(self.output_dir, 'loki_stderr.log')
cmd = [
'sudo', 'python', '/opt/loki/loki.py', '-w', '0', '--csv', '--intense',
'--noprocscan', '--dontwait', '--noindicator', '--nolevcheck',
'--nolisten', '-l', log_file, '-p', evidence.local_path
]
(ret, result) = self.execute(
cmd, result, log_files=[log_file], stdout_file=stdout_file,
stderr_file=stderr_file, cwd='/opt/loki/')
if ret != 0:
raise TurbiniaException('Return code: {0:d}'.format(ret))
report = []
summary = 'No Loki threats found'
priority = Priority.LOW
report_lines = []
with open(stdout_file, 'r') as loki_report_csv:
lokireader = csv.DictReader(
loki_report_csv, fieldnames=['Time', 'Hostname', 'Level', 'Log'])
for row in lokireader:
if row['Level'] == 'ALERT':
report_lines.append(row['Log'])
if report_lines:
priority = Priority.HIGH
summary = 'Loki analysis found {0:d} alert(s)'.format(len(report_lines))
report.insert(0, fmt.heading4(fmt.bold(summary)))
line = '{0:n} alerts(s) found:'.format(len(report_lines))
report.append(fmt.bullet(fmt.bold(line)))
for line in report_lines:
report.append(fmt.bullet(line, level=2))
report = '\n'.join(report)
return (report, priority, summary)
```
#### File: workers/analysis/postgresql_acct_test.py
```python
import os
import unittest
from turbinia import config
from turbinia.workers.analysis import postgresql_acct
from turbinia.workers.workers_test import TestTurbiniaTaskBase
class PostgresAcctAnalysisTaskTest(TestTurbiniaTaskBase):
"""Tests for PostgresAcctAnalysisTask Task."""
TEST_DATA_DIR = None
EXPECTED_CREDENTIALS = {'<KEY>': 'postgres'}
POSTGRES_REPORT = """#### **PostgreSQL analysis found 1 weak password(s)**
* **1 weak password(s) found:**
* User 'postgres' with password 'password'"""
def setUp(self):
super(PostgresAcctAnalysisTaskTest, self).setUp()
self.setResults(mock_run=False)
filedir = os.path.dirname(os.path.realpath(__file__))
self.TEST_DATA_DIR = os.path.join(filedir, '..', '..', '..', 'test_data')
self.evidence.local_path = self.TEST_DATA_DIR
def test_extract_data_dir(self):
"""Tests the _extract_data_dir method."""
config.LoadConfig()
task = postgresql_acct.PostgresAccountAnalysisTask()
# pylint: disable=protected-access
data_dirs = task._extract_data_dir(self.TEST_DATA_DIR, self.result)
self.assertEqual(len(data_dirs), 1)
self.assertEqual(data_dirs, ['test_data'])
def test_extract_creds(self):
"""Tests the _extract_creds method."""
config.LoadConfig()
task = postgresql_acct.PostgresAccountAnalysisTask()
# pylint: disable=protected-access
hashes = task._extract_creds(['/database'], self.evidence)
self.assertDictEqual(hashes, self.EXPECTED_CREDENTIALS)
def test_analyse_postgres_creds(self):
"""Tests the _analyse_postegres_creds method."""
config.LoadConfig()
task = postgresql_acct.PostgresAccountAnalysisTask()
(report, priority, summary) = task._analyse_postgres_creds(
self.EXPECTED_CREDENTIALS)
self.assertEqual(report, self.POSTGRES_REPORT)
self.assertEqual(priority, 10)
self.assertEqual(summary, 'PostgreSQL analysis found 1 weak password(s)')
``` |
{
"source": "jleapeMIT/danceable",
"score": 3
} |
#### File: danceable/CNN/imageFilesTools.py
```python
from PIL import Image
import numpy as np
#Returns numpy image at size imageSize*imageSize
def getProcessedData(img,imageSize):
img = img.resize((imageSize,imageSize), resample=Image.ANTIALIAS)
imgData = np.asarray(img, dtype=np.uint8).reshape(imageSize,imageSize,1)
imgData = imgData/255.
return imgData
#Returns numpy image at size imageSize*imageSize
def getImageData(filename,imageSize):
img = Image.open(filename)
imgData = getProcessedData(img, imageSize)
return imgData
```
#### File: danceable/CNN/model.py
```python
import numpy as np
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
def createModel(nbClasses,imageSize):
# print("[+] Creating model...")
convnet = input_data(shape=[None, imageSize, imageSize, 1], name='input')
convnet = conv_2d(convnet, 64, 2, activation='elu', weights_init="Xavier")
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 128, 2, activation='elu', weights_init="Xavier")
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 256, 2, activation='elu', weights_init="Xavier")
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 512, 2, activation='elu', weights_init="Xavier")
convnet = max_pool_2d(convnet, 2)
convnet = fully_connected(convnet, 1024, activation='elu')
convnet = dropout(convnet, 0.5)
convnet = fully_connected(convnet, nbClasses, activation='softmax')
convnet = regression(convnet, optimizer='rmsprop', loss='categorical_crossentropy')
model = tflearn.DNN(convnet)
# print(" Model created! ✅")
return model
``` |
{
"source": "jlebar/llvm-port-commits",
"score": 2
} |
#### File: jlebar/llvm-port-commits/filter.py
```python
from __future__ import print_function
import argparse
import os
import subprocess
import sys
import time
OLD_UPSTREAM='old-clang/master'
NEW_UPSTREAM='origin/master'
CACHE_DIR='/tmp/filter-cache'
CACHE_ENABLED=True
try:
import pyfscache
except ImportError:
CACHE_ENABLED=False
def cache_decorator(name):
def wrapper(fn):
if CACHE_ENABLED:
CACHE = pyfscache.FSCache('/tmp/filter-branch-cache/%s' % name)
return CACHE(fn)
return fn
return wrapper
def time_decorator(fn):
def closure(*args, **kw):
start = time.time()
ret = fn(*args, **kw)
end = time.time()
#print('%r(%r, %r)=%r took: %2.4fs' % (fn.__name__, args, kw, ret, end - start),
# file=sys.stderr)
return ret
return closure
@cache_decorator('should_edit')
@time_decorator
def should_edit(rev):
"""Determines whether rev is not in OLD_UPSTREAM and thus should be filtered."""
# If rev is in OLD_UPSTREAM, `git merge-base rev OLD_UPSTREAM` will be rev.
merge_base = subprocess.check_output(['git', 'merge-base', rev, OLD_UPSTREAM]).strip()
return merge_base != rev
@cache_decorator('map_to_new_upstream')
@time_decorator
def map_to_new_upstream(rev):
"""Finds a commit in NEW_UPSTREAM corresponding to this commit.
If we can't find a corresponding commit,
- if rev is in OLD_UPSTREAM, raises an error;
- otherwise, returns None.
Identifies commits according to the tuple (commit time, author email). This
is sufficient to uniquely identify a commit in our monorepo.
"""
date, email = subprocess.check_output(['git', 'show', '-s', '--format=%at %ae', rev]).strip().split(' ', 1)
new_upstream_revs = subprocess.check_output(
['git', 'log', '--author', email, '--since', date, '--until', str(int(date) + 1), '--pretty=%H', NEW_UPSTREAM]
).strip().split('\n')
if len(new_upstream_revs) > 1:
raise RuntimeError('Revision %s has more than one corresponding rev in new '
'upstream, %s. Maybe (author email, date) is not unique?'
% (rev, NEW_UPSTREAM))
if not new_upstream_revs or not new_upstream_revs[0]:
# Check if rev is in OLD_UPSTREAM. If so, it's an error that we couldn't
# map_to_new_upstream to NEW_UPSTREAM. Otherwise, we can return None.
in_old_upstream = subprocess.check_output(
['git', 'merge-base', rev, OLD_UPSTREAM]).strip() == rev
if in_old_upstream:
raise RuntimeError("Couldn't map_to_new_upstream revision %s from %s to %s."
% (rev, OLD_UPSTREAM, NEW_UPSTREAM))
return None
return new_upstream_revs[0]
def map_to_filtered(rev):
"""Gets hash of rev after filtering.
If rev hasn't been filtered (yet), returns None.
Equivalent to the `map` function exposed by git-filter-branch, except that
function returns rev if the revision hasn't yet been filtered, and that this
function raises an error if rev maps to multiple commits.
"""
#if not workdir:
# raise RuntimeError("workdir environment variable is empty?")
mapfile = '../map/%s' % rev
try:
with open(mapfile, 'r') as f:
lines = f.read().strip().split('\n')
if len(lines) != 1:
raise RuntimeError("mapfile %s doesn't contain a single line: %s" % (mapfile, str(lines)))
return lines[0]
except IOError:
return None
# See http://stackoverflow.com/questions/23841111/change-file-name-case-using-git-filter-branch
@time_decorator
def filter_index(rev):
if not should_edit(rev):
return
# Try to find a parent rev that's in OLD_UPSTREAM. If so, map_to_new_upstream
# it to NEW_UPSTREAM and use that as our parent below. Otherwise, pick the
# first parent; it's as good as any other.
parents = subprocess.check_output(
['git', 'rev-list', '--parents', '-n', '1', rev]).strip().split()[1:]
parent_rev = parents[0]
for tp in (map_to_new_upstream(p) for p in parents):
if tp is None:
continue
parent_rev = tp
break
# If our parent has already been through git-filter-branch, use the filtered
# parent.
filtered_parent_rev = map_to_filtered(parent_rev)
if filtered_parent_rev:
parent_rev = filtered_parent_rev
index_file = os.environ['GIT_INDEX_FILE']
new_index_file = index_file + '.new'
# TODO: Parameterize this
sh_cmd = r"""
cat <(git ls-tree -r %s | sed -e $'s:\t:\tclang/:') \
<(git ls-tree -r %s | grep -v $'\tclang/') | \
GIT_INDEX_FILE=%s git update-index --index-info
""" % (rev, parent_rev, new_index_file)
subprocess.check_call(['bash', '-c', sh_cmd])
os.rename(new_index_file, index_file)
@time_decorator
def filter_parent(rev):
if not should_edit(rev):
print(sys.stdin.read())
return
new_parents = []
# -p rev1 -p rev2 -p rev3
for p in sys.stdin.read().strip().split():
if p == '-p':
new_parents.append(p)
continue
tp = map_to_new_upstream(p)
if tp:
new_parents.append(tp)
else:
new_parents.append(p)
print(' '.join(new_parents))
if __name__ == '__main__':
rev = os.environ['GIT_COMMIT']
cmd = sys.argv[1] if len(sys.argv) > 1 else None
if cmd == 'index':
filter_index(rev)
elif cmd == 'parent':
pass
filter_parent(rev)
else:
print('Usage: %s index|parent' % sys.argv[0])
sys.exit(1)
``` |
{
"source": "jleben/datavis",
"score": 3
} |
#### File: datavis/test/generate_test_data.py
```python
import math
import random
import argparse
def circle(out, centre, radius, resolution = 1000):
for i in range(0, resolution):
angle = i / resolution * 2 * math.pi
y = math.sin(angle) * radius + centre[0]
x = math.cos(angle) * radius + centre[1]
out.write("{} {}\n".format(x, y))
def circles():
radius_range = (5,10)
x_range = (-1000, 1000)
y_range = (-1000, 1000)
count = 1000
resolution = 1000
out = open('circles.txt', 'w')
for i in range(0, count):
x = random.uniform(x_range[0], x_range[1])
y = random.uniform(y_range[0], y_range[1])
r = random.uniform(radius_range[0], radius_range[1])
circle(out, (x,y), r, resolution)
def line(out, x1, y1, x2, y2, resolution = 1000):
step = 1/resolution
for i in range(0, resolution):
z = i * step
x = z * x1 + (1-z) * x2
y = z * y1 + (1-z) * y2
out.write("{} {}\n".format(x, y))
def lines():
x_range = (-1000, 1000)
y_range = (-1000, 1000)
max_len = 100
count = 1000
resolution = 1000
def rand(rng):
return random.uniform(rng[0], rng[1])
out = open('lines.txt', 'w')
for i in range(0, count):
x1 = rand(x_range)
y1 = rand(y_range)
x2 = x1 + rand((-max_len, +max_len))
y2 = y1 + rand((-max_len, +max_len))
line(out, x1, y1, x2, y2, resolution)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('mode', default='lines', nargs='?')
args = parser.parse_args()
if args.mode == 'lines':
lines()
elif args.mode == 'circles':
circles()
else:
print("ERROR: Unknown mode: " + args.mode)
main()
``` |
{
"source": "jlebensold/flrl-ddpg",
"score": 2
} |
#### File: flrl-ddpg/src/ddpg_round.py
```python
import random
import gym
from gym.envs.registration import register as gym_register
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from .replay_buffer import ReplayBuffer
from .ou_noise import OUNoise
from .networks import PolicyNetwork, ValueNetwork
from .normalized_actions import NormalizedActions
from .envs.gravity_pendulum import GravityPendulum
from .envs.force_mountain_car_continuous import PositionContinuous_MountainCar
envs = {
'MountainCarContinuous' : {
'env': PositionContinuous_MountainCar,
'state_dim': 2,
'max_steps': 300,
'value_lr': 1e-3,
'policy_lr': 1e-4,
'tau': 1.,
'batch_size':64,
},
'GravityPendulum' : {
'env': GravityPendulum,
'state_dim': 3,
'max_steps': 200,
'value_lr': 1e-3,
'policy_lr': 1e-4,
'tau': 1e-2,
'batch_size':128,
}
}
ACTION_DIM = 1
HIDDEN_DIM = 256
class DDPGRound():
@classmethod
def defaults(cls):
return dict(
seed=1,
device=torch.device("cpu"),
num_episodes=1,
max_frames=200,
algo='DDPG',
id="ID"
)
@classmethod
def build_policy_network(cls, env_name):
state_dim = envs[env_name]['state_dim']
return PolicyNetwork(state_dim, ACTION_DIM, HIDDEN_DIM)
def __init__(self, params):
params = dict(params)
self.env_param = params['env_param']
self.device = params['device']
self.max_frames = params['max_frames']
self.num_episodes = params['num_episodes']
self.id = params['id']
self.total_frames = 0
self.seed = params['seed']
self.env_name = params['env']
# normally taken from the env, but since we're adjusting
# environments after construction, it's easier to hard-code for now
self.max_steps = envs[self.env_name]['max_steps']
self.batch_size = envs[self.env_name]['batch_size']
state_dim = envs[self.env_name]['state_dim']
self.value_net = ValueNetwork(state_dim, ACTION_DIM, HIDDEN_DIM).to(self.device)
self.policy_net = PolicyNetwork(state_dim, ACTION_DIM, HIDDEN_DIM).to(self.device)
self.target_value_net = ValueNetwork(state_dim, ACTION_DIM, HIDDEN_DIM).to(self.device)
self.target_policy_net = PolicyNetwork(state_dim, ACTION_DIM, HIDDEN_DIM).to(self.device)
for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(param.data)
for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()):
target_param.data.copy_(param.data)
value_lr = envs[self.env_name]['value_lr']
policy_lr = envs[self.env_name]['policy_lr']
self.value_optimizer = optim.Adam(self.value_net.parameters(), lr=value_lr)
self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=policy_lr)
self.value_criterion = nn.MSELoss()
replay_buffer_size = 1000000
self.replay_buffer = ReplayBuffer(replay_buffer_size)
def env_factory(self):
return envs[self.env_name]['env'](self.env_param)
def setup(self):
env_name = f'{self.env_name}-{self.id}-v0'
gym_register(id=env_name,entry_point=self.env_factory, max_episode_steps=200,)
env = gym.make(env_name)
self.env = NormalizedActions(env)
self.set_seed(self.seed)
self.ou_noise = OUNoise(self.env.action_space)
def set_seed(self, seed):
torch.manual_seed(seed)
np.random.seed(seed)
self.env.seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def ddpg_update(self, batch_size,
gamma = 0.99,
min_value=-np.inf,
max_value=np.inf):
soft_tau = envs[self.env_name]['tau']
state, action, reward, next_state, done = self.replay_buffer.sample(batch_size)
state = torch.FloatTensor(state).to(self.device)
next_state = torch.FloatTensor(next_state).to(self.device)
action = torch.FloatTensor(action).to(self.device)
reward = torch.FloatTensor(reward).unsqueeze(1).to(self.device)
done = torch.FloatTensor(np.float32(done)).unsqueeze(1).to(self.device)
self.policy_loss = self.value_net(state, self.policy_net(state))
self.policy_loss = -self.policy_loss.mean()
next_action = self.target_policy_net(next_state)
target_value = self.target_value_net(next_state, next_action.detach())
expected_value = reward + (1.0 - done) * gamma * target_value
expected_value = torch.clamp(expected_value, min_value, max_value)
value = self.value_net(state, action)
self.value_loss = self.value_criterion(value, expected_value.detach())
self.policy_optimizer.zero_grad()
self.policy_loss.backward()
self.policy_optimizer.step()
self.value_optimizer.zero_grad()
self.value_loss.backward()
self.value_optimizer.step()
for target_param, param in zip(self.target_value_net.parameters(), self.value_net.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - soft_tau) + param.data * soft_tau
)
for target_param, param in zip(self.target_policy_net.parameters(), self.policy_net.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - soft_tau) + param.data * soft_tau
)
def get_action(self, state):
state = torch.FloatTensor(state).unsqueeze(0).to(self.device)
action = self.policy_net.forward(state)
return action.detach().cpu().numpy()[0, 0]
def run(self):
self.setup()
rewards = []
frame_idx= 0
cur_episode = 0
for i_episode in range(self.num_episodes):
cur_episode += 1
state = self.env.reset()
self.ou_noise.reset()
episode_reward = 0
for step in range(self.max_steps):
action = self.get_action(state)
action = self.ou_noise.get_action(action, step)
next_state, reward, done, _ = self.env.step(action)
self.replay_buffer.push(state, action, reward, next_state, done)
if len(self.replay_buffer) > self.batch_size:
self.ddpg_update(self.batch_size)
state = next_state
episode_reward += reward
frame_idx += 1
self.total_frames += 1
if done:
print(f'[{self.id}] - Episode {i_episode}: {episode_reward}')
break
rewards.append(episode_reward)
return rewards, self.total_frames
```
#### File: flrl-ddpg/src/replay_memory.py
```python
import random
class ReplayMemory(object):
""" Used by DQN (e.g. Distral) """
def __init__(self, capacity, policy_capacity=0):
self.capacity = capacity
self.buffer = []
self.position = 0
self.policy_capacity = policy_capacity
self.policy_buffer = []
self.policy_position = 0
def push(self, state, action, next_state, reward, time=None):
"""Saves a transition."""
if len(self.buffer) < self.capacity:
self.buffer.append(None)
if time is None:
self.buffer[self.position] = (state, action, next_state, reward)
else:
self.buffer[self.position] = (state, action, next_state, reward, time)
self.position = (self.position + 1) % self.capacity
if self.policy_capacity == 0:
return
if len(self.policy_buffer) < self.policy_capacity:
self.policy_buffer.append(None)
if time is None:
self.policy_buffer[self.policy_position] = (state, action, next_state, reward)
else:
self.policy_buffer[self.policy_position] = (state, action, next_state, reward, time)
self.policy_position = (self.policy_position + 1) % self.policy_capacity
def sample(self, batch_size):
return random.sample(self.buffer, batch_size)
def policy_sample(self, batch_size):
return random.sample(self.policy_buffer, batch_size)
def __len__(self):
return len(self.buffer)
``` |
{
"source": "jlebon/packit",
"score": 2
} |
#### File: packit/cli/watch_fedora_ci.py
```python
import logging
import click
from packit.api import PackitAPI
from packit.cli.utils import cover_packit_exception
from packit.config import get_context_settings, pass_config
logger = logging.getLogger(__name__)
@click.command("watch-fedora-ci", context_settings=get_context_settings())
@click.argument("message_id", nargs=-1, required=False)
@pass_config
@cover_packit_exception
def watcher(config, message_id):
"""
Watch for flags on PRs: try to process those which we know mapping for
:return: int, retcode
"""
api = PackitAPI(config)
if message_id:
for msg_id in message_id:
fedmsg_dict = api.fetch_fedmsg_dict(msg_id)
api.process_ci_result(fedmsg_dict)
return
else:
api.keep_fwding_ci_results()
```
#### File: packit/packit/jobs.py
```python
import logging
from typing import List, Optional, Tuple, Dict, Type
from ogr.abstract import GitProject
from ogr.services.github import GithubProject, GithubService
from packit.api import PackitAPI
from packit.config import JobConfig, JobTriggerType, JobType, PackageConfig, Config
from packit.config import get_packit_config_from_repo
from packit.local_project import LocalProject
from packit.utils import nested_get
logger = logging.getLogger(__name__)
JOB_NAME_HANDLER_MAPPING: Dict[JobType, Type["JobHandler"]] = {}
def add_to_mapping(kls: Type["JobHandler"]):
JOB_NAME_HANDLER_MAPPING[kls.name] = kls
return kls
class SteveJobs:
"""
Steve makes sure all the jobs are done with precision.
"""
def __init__(self, config: Config):
self.config = config
self._github_service = None
@property
def github_service(self):
if self._github_service is None:
self._github_service = GithubService(token=self.config.github_token)
return self._github_service
def get_package_config_from_github_release(
self, event: dict
) -> Optional[Tuple[JobTriggerType, PackageConfig, GitProject]]:
""" look into the provided event and see if it's one for a published github release """
action = nested_get(event, "action")
logger.debug(f"action = {action}")
release = nested_get(event, "release")
if action == "published" and release:
repo_namespace = nested_get(event, "repository", "owner", "login")
repo_name = nested_get(event, "repository", "name")
if not (repo_namespace and repo_name):
logger.warning(
"We could not figure out the full name of the repository."
)
return None
release_ref = nested_get(event, "release", "tag_name")
if not release_ref:
logger.warning("Release tag name is not set.")
return None
logger.info(
f"New release event {release_ref} for repo {repo_namespace}/{repo_name}."
)
gh_proj = GithubProject(
repo=repo_name, namespace=repo_namespace, service=self.github_service
)
package_config = get_packit_config_from_repo(gh_proj, release_ref)
return JobTriggerType.release, package_config, gh_proj
return None
def parse_event(
self, event: dict
) -> Optional[Tuple[JobTriggerType, PackageConfig, GitProject]]:
"""
When a new event arrives, we need to figure out if we are able to process it.
:param event: webhook payload or fedmsg
"""
if event:
# Once we'll start processing multiple events from different sources,
# we should probably break this method down and move it to handlers or JobTrigger
# github webhooks
respone = self.get_package_config_from_github_release(event)
if respone:
return respone
# TODO: pull requests
return None
def process_jobs(
self,
trigger: JobTriggerType,
package_config: PackageConfig,
event: dict,
project: GitProject,
):
for job in package_config.jobs:
if trigger == job.trigger:
handler_kls = JOB_NAME_HANDLER_MAPPING.get(job.job, None)
if not handler_kls:
logger.warning(f"There is no handler for job {job}")
continue
handler = handler_kls(self.config, package_config, event, project, job)
handler.run()
def process_message(self, event: dict):
""" this is the entrypoint """
response = self.parse_event(event)
if not response:
logger.debug("We don't process this event")
return
trigger, package_config, project = response
if not all([trigger, package_config, project]):
logger.debug("This project is not using packit.")
return
self.process_jobs(trigger, package_config, event, project)
class JobHandler:
""" generic interface to handle different type of inputs """
name: JobType
triggers: List[JobTriggerType]
def __init__(
self,
config: Config,
package_config: PackageConfig,
event: dict,
project: GitProject,
job: JobConfig,
):
self.config: Config = config
self.project: GitProject = project
self.package_config: PackageConfig = package_config
self.event: dict = event
self.job: JobConfig = job
def run(self):
raise NotImplementedError("This should have been implemented.")
@add_to_mapping
class GithubReleaseHandler(JobHandler):
name = JobType.propose_downstream
triggers = [JobTriggerType.release]
def run(self):
"""
Sync the upstream release to dist-git as a pull request.
"""
version = self.event["release"]["tag_name"]
https_url = self.event["repository"]["html_url"]
local_project = LocalProject(git_project=self.project)
self.package_config.upstream_project_url = https_url
api = PackitAPI(self.config, self.package_config, local_project)
api.sync_release(
dist_git_branch=self.job.metadata.get("dist-git-branch", "master"),
version=version,
)
```
#### File: packit/packit/sync.py
```python
import glob
import logging
import os
import shutil
from typing import List, NamedTuple, Union
from packit.exceptions import PackitException
logger = logging.getLogger(__name__)
class SyncFilesItem(NamedTuple):
src: Union[str, List[str]]
dest: str
def __repr__(self):
return f"SyncFilesItem(src={self.src}, dest={self.dest})"
def __eq__(self, other: object) -> bool:
if not isinstance(other, SyncFilesItem):
raise NotImplementedError()
return self.src == other.src and self.dest == other.dest
class RawSyncFilesItem(SyncFilesItem):
src: str
dest: str
def get_files_from_wildcard(
file_wildcard: str, destination: str
) -> List[RawSyncFilesItem]:
"""
Get list of SyncFilesItem that match the wildcard.
:param file_wildcard: - if ends with '/' we add all files of that directory
- if contains '*', we use glob.glob to get matches
:param destination: used to create RawSyncFilesItem instances
:return: list of matching RawSyncFilesItem instances
"""
if "*" not in file_wildcard:
if file_wildcard.endswith("/"):
file_wildcard = f"{file_wildcard}*"
else:
return [RawSyncFilesItem(src=file_wildcard, dest=destination)]
globed_files = glob.glob(file_wildcard)
return [RawSyncFilesItem(src=file, dest=destination) for file in globed_files]
def get_raw_files(file_to_sync: SyncFilesItem) -> List[RawSyncFilesItem]:
"""
Split the SyncFilesItem with src as a list or wildcard to multiple instances.
Destination is used from the original SyncFilesItem.
:param file_to_sync: SyncFilesItem to split
:return: [RawSyncFilesItem]
"""
source = file_to_sync.src
if not isinstance(source, list):
source = [source]
files_to_sync: List[RawSyncFilesItem] = []
for file in source:
files_to_sync += get_files_from_wildcard(
file_wildcard=file, destination=file_to_sync.dest
)
return files_to_sync
def sync_files(
files_to_sync: List[RawSyncFilesItem], src_working_dir: str, dest_working_dir: str
) -> None:
"""
Sync required files from upstream to downstream.
"""
logger.debug(f"Copy synced files {files_to_sync}")
for fi in files_to_sync:
# Check if destination dir exists
# If not create the destination dir
dest_dir = os.path.join(dest_working_dir, fi.dest)
logger.debug(f"Destination {dest_dir}")
# Sync all source file
src_file = os.path.join(src_working_dir, fi.src)
logger.debug(f"Source file {src_file}")
if os.path.exists(src_file):
logger.info(f"Syncing {src_file}")
shutil.copy2(src_file, dest_dir)
else:
raise PackitException(
f"File {src_file} is not present in the upstream repository. "
)
```
#### File: tests/unit/test_sync.py
```python
import glob
import pytest
from flexmock import flexmock
from packit.config import SyncFilesConfig
from packit.sync import get_files_from_wildcard, get_raw_files, SyncFilesItem
@pytest.mark.parametrize(
"file,glob_files,result",
[
("file", "file", [SyncFilesItem(src="file", dest="dest")]),
(
"file/",
["file/a", "file/b"],
[
SyncFilesItem(src="file/a", dest="dest"),
SyncFilesItem(src="file/b", dest="dest"),
],
),
(
"*.md",
["a.md", "b.md"],
[
SyncFilesItem(src="a.md", dest="dest"),
SyncFilesItem(src="b.md", dest="dest"),
],
),
],
)
def test_get_files_from_wildcard(file, glob_files, result):
flexmock(glob, glob=lambda x: glob_files)
files = get_files_from_wildcard(file_wildcard=file, destination="dest")
assert files == result
@pytest.mark.parametrize(
"file,glob_files,result",
[
(
SyncFilesItem(src="file", dest="dest"),
None,
[SyncFilesItem(src="file", dest="dest")],
),
(
SyncFilesItem(src=["file"], dest="dest"),
None,
[SyncFilesItem(src="file", dest="dest")],
),
(
SyncFilesItem(src=["file1", "file2"], dest="dest"),
None,
[
SyncFilesItem(src="file1", dest="dest"),
SyncFilesItem(src="file2", dest="dest"),
],
),
(
SyncFilesItem(src="file/", dest="dest"),
["file/a", "file/b"],
[
SyncFilesItem(src="file/a", dest="dest"),
SyncFilesItem(src="file/b", dest="dest"),
],
),
(
SyncFilesItem(src=["*.md"], dest="dest"),
["a.md", "b.md"],
[
SyncFilesItem(src="a.md", dest="dest"),
SyncFilesItem(src="b.md", dest="dest"),
],
),
],
)
def test_get_raw_files(file, glob_files, result):
if glob_files is not None:
flexmock(glob, glob=lambda x: glob_files)
files = get_raw_files(file_to_sync=file)
assert files == result
@pytest.mark.parametrize(
"files,result",
[
([], []),
(
[SyncFilesItem(src="file", dest="dest")],
[SyncFilesItem(src="file", dest="dest")],
),
(
[
SyncFilesItem(src="file1", dest="dest"),
SyncFilesItem(src="file2", dest="dest"),
],
[
SyncFilesItem(src="file1", dest="dest"),
SyncFilesItem(src="file2", dest="dest"),
],
),
(
[SyncFilesItem(src=["file1", "file2"], dest="dest")],
[
SyncFilesItem(src="file1", dest="dest"),
SyncFilesItem(src="file2", dest="dest"),
],
),
],
)
def test_raw_files_to_sync(files, result):
files_to_sync = SyncFilesConfig(files_to_sync=files).raw_files_to_sync
assert files_to_sync == result
``` |
{
"source": "jlebunetel/agile",
"score": 2
} |
#### File: apps/core/admin.py
```python
from django.contrib import admin
from django.contrib.sites.models import Site
from django.utils.translation import ugettext, ugettext_lazy as _
from modeltranslation.admin import TranslationAdmin
from simple_history.admin import SimpleHistoryAdmin
from core.models import SiteCustomization
from core.models import BaseModel
class BaseModelMixin(object):
def get_fieldsets(self, request, obj=None):
fieldsets_base = (
(
_("generic fields"),
{
"classes": ("collapse",),
"fields": (
# "id",
"created_at",
"created_by",
"changed_at",
"changed_by",
),
},
),
(
_("common editable fields"),
{
"fields": (
"owner",
"metadata",
),
},
),
)
if self.fields:
return fieldsets_base + ((_("specific fields"), {"fields": self.fields}),)
return fieldsets_base
def get_readonly_fields(self, request, obj=None):
readonly_fields_base = (
"id",
"created_at",
"created_by",
"changed_at",
"changed_by",
)
if obj:
return self.readonly_fields + readonly_fields_base
else:
return self.readonly_fields + readonly_fields_base + ("owner",)
def get_list_display(self, request):
list_display_base = (
"created_at",
"created_by",
"changed_at",
"changed_by",
)
return self.list_display + list_display_base
def get_search_fields(self, request):
return self.search_fields + ("id",)
def save_model(self, request, obj, form, change):
if not change:
obj.owner = request.user
super().save_model(request, obj, form, change)
def save_related(self, request, form, formsets, change):
"""Méthode appellée lorsqu'un objet est créé via un formulaire inline"""
for formset in formsets:
if issubclass(formset.model, BaseModel):
instances = formset.save(commit=False)
for added_obj in formset.new_objects:
added_obj.owner = request.user
for deleted_obj in formset.deleted_objects:
pass
super(BaseModelMixin, self).save_related(request, form, formsets, change)
class ReadOnlyModelMixin(object):
def has_add_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class SiteAdmin(SimpleHistoryAdmin):
pass
admin.site.unregister(Site)
admin.site.register(Site, SiteAdmin)
class SiteCustomizationAdmin(TranslationAdmin, SimpleHistoryAdmin):
pass
admin.site.register(SiteCustomization, SiteCustomizationAdmin)
```
#### File: core/models/site.py
```python
from django.conf import settings
from django.contrib.sites.models import Site
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
class SiteCustomization(models.Model):
# history = HistoricalRecords() # done in translation.py
site = models.OneToOneField(
Site,
on_delete=models.CASCADE, # if Site is deleted, SiteCustomization will also be deleted!
primary_key=True,
verbose_name=_("site"),
)
is_open_for_signup = models.BooleanField(
default=True, verbose_name=_("is open for signup")
)
tagline = models.CharField( # [i18n]
blank=True,
max_length=settings.CORE_SITECUSTOMIZATION_TAGLINE_LENGHT,
verbose_name=_("tagline"),
help_text=_("A few words to describe this very website."),
default="A few words to describe this very website.",
)
description = models.TextField( # [i18n]
blank=True,
max_length=settings.CORE_SITECUSTOMIZATION_DESCRIPTION_LENGHT,
verbose_name=_("description"),
help_text=_("A short text to describe this very website."),
default=_("A short text to describe this very website."),
)
class Meta:
verbose_name = _("site customization")
verbose_name_plural = _("site customizations")
ordering = ["site"]
def __str__(self):
return self.site.name if self.site.name else str(_("unknown"))
def save(self, *args, **kwargs):
super(SiteCustomization, self).save(*args, **kwargs)
# Clear cached content
Site.objects.clear_cache()
@property
def github_repo_name(self):
return settings.GITHUB_REPO_NAME
@property
def github_repo_url(self):
return settings.GITHUB_REPO_URL
@property
def github_team_name(self):
return settings.GITHUB_TEAM_NAME
@property
def github_team_url(self):
return settings.GITHUB_TEAM_URL
@property
def github_contributors_url(self):
return settings.GITHUB_CONTRIB_URL
@property
def license_name(self):
return settings.LICENSE_NAME
@property
def license_url(self):
return settings.LICENSE_URL
@property
def fontawesome_site_icon(self):
return settings.FONTAWESOME_SITE_ICON
```
#### File: projects/models/base.py
```python
import markdown
import uuid
from django.conf import settings
from django.db import models
from django.urls import reverse_lazy
from django.utils.translation import ugettext, ugettext_lazy as _
from accounts.models import get_sentinel_user
class PriorityMixin(models.Model):
LOW = 1
MEDIUM = 2
HIGH = 3
PRIORITY_CHOICES = [
(HIGH, _("High")),
(MEDIUM, _("Medium")),
(LOW, _("Low")),
]
priority = models.SmallIntegerField(
choices=PRIORITY_CHOICES,
default=MEDIUM,
verbose_name=_("priority"),
help_text=_("Priority relating to the parent object."),
)
class Meta:
abstract = True
@property
def shortcut(self):
return "#?{}".format(self.id)
def __str__(self):
return "[{}] {}".format(self.shortcut, self.title)
def get_priority_color(self):
if self.priority == self.HIGH:
return "danger"
elif self.priority == self.MEDIUM:
return "warning"
else:
return "success"
def get_priority_icon(self):
if self.priority == self.HIGH:
return "fas fa-angle-double-up"
elif self.priority == self.MEDIUM:
return "fas fa-angle-up"
else:
return "fas fa-angle-down"
class ProgressMixin(models.Model):
class Meta:
abstract = True
def get_points_display(self):
total = self.total_story_points()
remaining = self.remaining_story_points()
return "{} / {}".format(total - remaining, total)
def progress(self):
"""Returns progress from 0 to 100 according to child issue states."""
total = self.total_story_points()
remaining = self.remaining_story_points()
if not total:
return 0
return int(100 * (total - remaining) / total)
class TrustMixin(models.Model):
LOW = 1
MEDIUM = 2
HIGH = 3
TRUST_CHOICES = [
(LOW, _("Low")),
(MEDIUM, _("Medium")),
(HIGH, _("High")),
]
class Meta:
abstract = True
def get_trust_display(self):
trust = self.trust
for t in self.TRUST_CHOICES:
if trust == t[0]:
return t[1]
return self.LOW
def get_trust_color(self):
total = self.total_story_points()
if not total:
return "warning"
if self.trust == self.HIGH:
return "light"
elif self.trust == self.MEDIUM:
return "warning"
else:
return "danger"
class BaseModel(models.Model):
id = models.AutoField(primary_key=True)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET(
get_sentinel_user
), # if the related user is deleted, sets the creator to the "deleted" user!
related_name="%(app_label)s_%(class)ss_as_owner",
related_query_name="%(app_label)s_%(class)s_as_owner",
verbose_name=_("owner"),
help_text=_("Owner of this very object."),
limit_choices_to={"is_active": True},
)
created_at = models.DateTimeField(
auto_now_add=True, verbose_name=_("creation date")
)
changed_at = models.DateTimeField(auto_now=True, verbose_name=_("update date"))
title = models.CharField(
max_length=255,
verbose_name=_("title"),
)
description = models.TextField(
blank=True,
verbose_name=_("description"),
help_text=_("Tip: You can use Markdown's syntax!"),
)
class Meta:
abstract = True
ordering = ["-changed_at"]
def __str__(self):
return self.title
def get_created_by(self):
return self.history.earliest().history_user
get_created_by.short_description = _("created by")
created_by = property(get_created_by)
def get_changed_by(self):
return self.history.latest().history_user
get_changed_by.short_description = _("changed by")
changed_by = property(get_changed_by)
def get_edit_url(self):
return reverse_lazy(
"admin:%s_%s_change" % (self._meta.app_label, self._meta.model_name),
args=(self.id,),
)
@property
def description_html(self):
return markdown.markdown(self.description)
```
#### File: projects/models/epic.py
```python
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
from simple_history.models import HistoricalRecords
from projects.models import (
BaseModel,
Initiative,
Feature,
Product,
PriorityMixin,
ProgressMixin,
TrustMixin,
)
class Epic(
PriorityMixin,
ProgressMixin,
TrustMixin,
BaseModel,
):
history = HistoricalRecords()
feature = models.ForeignKey(
Feature,
on_delete=models.CASCADE,
related_name="epics",
related_query_name="epic",
verbose_name=_("feature"),
)
class Meta(BaseModel.Meta):
verbose_name = _("epic")
verbose_name_plural = _("epics")
ordering = [
"feature__initiative__product",
"feature__initiative__order",
"-feature__priority",
"-priority",
"created_at",
]
@property
def color(self):
return "info"
@property
def icon(self):
return "fas fa-book"
@property
def shortcut(self):
return "E{}".format(self.id)
@property
def product(self):
return self.feature.product
def total_story_points(self):
points = self.issues.exclude(points=None).aggregate(models.Sum("points"))[
"points__sum"
]
return points if points else 0
def remaining_story_points(self):
from projects.models import Issue
points = (
self.issues.exclude(points=None)
.exclude(status=Issue.DONE)
.exclude(status=Issue.CANCELLED)
.aggregate(models.Sum("points"))["points__sum"]
)
return points if points else 0
@property
def trust(self):
from projects.models import Issue
if self.issues.filter(trust=Issue.LOW) or self.issues.filter(points=None):
return self.LOW
if self.issues.filter(trust=Issue.MEDIUM):
return self.MEDIUM
return self.HIGH
```
#### File: projects/templatetags/markdown.py
```python
import markdown
from django import template
register = template.Library()
@register.filter
def render(value):
return markdown.markdown(value)
``` |
{
"source": "jlebunetel/python-test-fun",
"score": 4
} |
#### File: tests/sandbox/test_hello_world.py
```python
import unittest
from sandbox.hello_world import additionner
class AdditionnerTestCase(unittest.TestCase):
def test_additionner_un_plus_un(self):
self.assertEqual(additionner(1, 1), 2)
# def test_additionner_un_plus_un_egal_trois(self):
# self.assertEqual(additionner(1, 1), 3)
def test_type_str_first_argument(self):
with self.assertRaises(TypeError):
additionner("bidule", 1)
def test_type_str_second_argument(self):
with self.assertRaises(TypeError):
additionner(1, "bidule")
def test_type_float(self):
with self.assertRaises(TypeError):
additionner(1.5, 2.8)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jlecavalier/godot-docs",
"score": 2
} |
#### File: godot-docs/extensions/gdscript.py
```python
import re
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
default, words, combined, do_insertions
from pygments.util import get_bool_opt, shebang_matches
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Other, Error
from pygments import unistring as uni
__all__ = ['GDScriptLexer']
line_re = re.compile('.*?\n')
class GDScriptLexer(RegexLexer):
"""
For `Godot source code <https://www.godotengine.org>`_ source code.
"""
name = 'GDScript'
aliases = ['gdscript', 'gd']
filenames = ['*.gd']
mimetypes = ['text/x-gdscript', 'application/x-gdscript']
def innerstring_rules(ttype):
return [
# the old style '%s' % (...) string formatting
(r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[E-GXc-giorsux%]', String.Interpol),
# backslashes, quotes and formatting signs must be parsed one at a time
(r'[^\\\'"%\n]+', ttype),
(r'[\'"\\]', ttype),
# unhandled string formatting sign
(r'%', ttype),
# newlines are an error (use "nl" state)
]
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
bygroups(Text, String.Affix, String.Doc)),
(r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
bygroups(Text, String.Affix, String.Doc)),
(r'[^\S\n]+', Text),
(r'#.*$', Comment.Single),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|and|or|not)\b', Operator.Word),
(r'!=|==|<<|>>|&&|\+=|-=|\*=|/=|%=|&=|\|=|\|\||[-~+/*%=<>&^.!|$]', Operator),
include('keywords'),
(r'(func)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
include('builtins'),
('([rR]|[uUbB][rR]|[rR][uUbB])(""")',
bygroups(String.Affix, String.Double), 'tdqs'),
("([rR]|[uUbB][rR]|[rR][uUbB])(''')",
bygroups(String.Affix, String.Single), 'tsqs'),
('([rR]|[uUbB][rR]|[rR][uUbB])(")',
bygroups(String.Affix, String.Double), 'dqs'),
("([rR]|[uUbB][rR]|[rR][uUbB])(')",
bygroups(String.Affix, String.Single), 'sqs'),
('([uUbB]?)(""")', bygroups(String.Affix, String.Double),
combined('stringescape', 'tdqs')),
("([uUbB]?)(''')", bygroups(String.Affix, String.Single),
combined('stringescape', 'tsqs')),
('([uUbB]?)(")', bygroups(String.Affix, String.Double),
combined('stringescape', 'dqs')),
("([uUbB]?)(')", bygroups(String.Affix, String.Single),
combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(words((
'and', 'in', 'not', 'or', 'as', 'breakpoint', 'class', 'class_name',
'extends', 'is', 'func', 'setget', 'signal', 'tool', 'const',
'enum', 'export', 'onready', 'static', 'var', 'break', 'continue',
'if', 'elif', 'else', 'for', 'pass', 'return', 'match', 'while',
'remote', 'master', 'puppet', 'remotesync', 'mastersync',
'puppetsync'), suffix=r'\b'),
Keyword),
],
'builtins': [
(words((
'Color8', 'ColorN', 'abs', 'acos', 'asin', 'assert', 'atan', 'atan2',
'bytes2var', 'ceil', 'char', 'clamp', 'convert', 'cos', 'cosh',
'db2linear', 'decimals', 'dectime', 'deg2rad', 'dict2inst',
'ease', 'exp', 'floor', 'fmod', 'fposmod', 'funcref', 'hash',
'inst2dict', 'instance_from_id', 'is_inf', 'is_nan', 'lerp',
'linear2db', 'load', 'log', 'max', 'min', 'nearest_po2', 'pow',
'preload', 'print', 'print_stack', 'printerr', 'printraw',
'prints', 'printt', 'rad2deg', 'rand_range', 'rand_seed',
'randf', 'randi', 'randomize', 'range', 'round', 'seed', 'sign',
'sin', 'sinh', 'sqrt', 'stepify', 'str', 'str2var', 'tan',
'tan', 'tanh', 'type_exist', 'typeof', 'var2bytes', 'var2str',
'weakref', 'yield'),
prefix=r'(?<!\.)', suffix=r'\b'),
Name.Builtin),
(r'((?<!\.)(self|false|true)|(PI|TAU|NAN|INF)'
r')\b', Name.Builtin.Pseudo),
(words((
'bool', 'int', 'float', 'String', 'NodePath'
'Vector2', 'Rect2', 'Transform2D',
'Vector3', 'Rect3', 'Plane', 'Quat', 'Basis', 'Transform',
'Color', "RID", 'Object', 'NodePath', 'Dictionary',
'Array', 'PoolByteArray', 'PoolIntArray', 'PoolRealArray',
'PoolStringArray', 'PoolVector2Array', 'PoolVector3Array', 'PoolColorArray',
'null',
), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin.Type),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+j?', Number.Integer)
],
'name': [
('[a-zA-Z_]\w*', Name),
],
'funcname': [
('[a-zA-Z_]\w*', Name.Function, '#pop'),
default('#pop'),
],
'classname': [
('[a-zA-Z_]\w*', Name.Class, '#pop')
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings-single': innerstring_rules(String.Single),
'strings-double': innerstring_rules(String.Double),
'dqs': [
(r'"', String.Double, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
include('strings-double')
],
'sqs': [
(r"'", String.Single, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
include('strings-single')
],
'tdqs': [
(r'"""', String.Double, '#pop'),
include('strings-double'),
(r'\n', String.Double)
],
'tsqs': [
(r"'''", String.Single, '#pop'),
include('strings-single'),
(r'\n', String.Single)
],
}
def setup(sphinx):
sphinx.add_lexer('gdscript', GDScriptLexer())
``` |
{
"source": "jlech42/chump_django",
"score": 2
} |
#### File: chump_django/user/utilities.py
```python
from django.contrib.auth.models import User, Group
def get_user_id_from_messenger_id(messenger_id):
user = User.objects.get(username=messenger_id).id
return user
``` |
{
"source": "jlecker/django-srcset",
"score": 2
} |
#### File: django-srcset/responsive_images/models.py
```python
from django.db import models
class ImageModel(models.Model):
width = models.PositiveIntegerField()
height = models.PositiveIntegerField()
class Meta:
abstract = True
@property
def size(self):
return (self.width, self.height)
class OriginalImage(ImageModel):
image_file = models.ImageField(
upload_to='original_images', # required for Django 1.6, but not used
height_field='height',
width_field='width',
db_index=True
)
class ResizedImage(ImageModel):
original = models.ForeignKey(
OriginalImage,
db_index=True,
on_delete=models.CASCADE
)
image_file = models.ImageField(
upload_to='responsive_images',
height_field='height',
width_field='width',
)
crop = models.CharField(max_length=10)
class Meta:
unique_together = ('original', 'width', 'height', 'crop')
``` |
{
"source": "jleclanche/fastapi-cloudauth",
"score": 2
} |
#### File: docs/server/cognito.py
```python
import os
from pydantic import BaseModel
from fastapi import FastAPI, Depends
from fastapi_cloudauth.cognito import Cognito, CognitoCurrentUser, CognitoClaims
tags_metadata = [
{
"name": "Cognito",
"description": "Operations with access/ID token, provided by AWS Cognito.",
}
]
app = FastAPI(
title="FastAPI CloudAuth Project",
description="Simple integration between FastAPI and cloud authentication services (AWS Cognito, Auth0, Firebase Authentication).",
openapi_tags=tags_metadata,
)
auth = Cognito(
region=os.environ["COGNITO_REGION"], userPoolId=os.environ["COGNITO_USERPOOLID"]
)
@app.get("/", dependencies=[Depends(auth.scope("read:users"))], tags=["Cognito"])
def secure():
# access token is valid
return "Hello"
class AccessUser(BaseModel):
sub: str
@app.get("/access/", tags=["Cognito"])
def secure_access(current_user: AccessUser = Depends(auth.claim(AccessUser))):
# access token is valid and getting user info from access token
return f"Hello", {current_user.sub}
get_current_user = CognitoCurrentUser(
region=os.environ["COGNITO_REGION"], userPoolId=os.environ["COGNITO_USERPOOLID"]
)
@app.get("/user/", tags=["Cognito"])
def secure_user(current_user: CognitoClaims = Depends(get_current_user)):
# ID token is valid and getting user info from ID token
return f"Hello, {current_user.username}"
```
#### File: fastapi-cloudauth/fastapi_cloudauth/firebase.py
```python
from calendar import timegm
from datetime import datetime
from typing import Any, Dict
from fastapi import HTTPException
from pydantic import BaseModel, Field
from starlette import status
from .base import UserInfoAuth
from .messages import NOT_VERIFIED
from .verification import JWKS, ExtraVerifier
class FirebaseClaims(BaseModel):
user_id: str = Field(alias="user_id")
email: str = Field(None, alias="email")
class FirebaseCurrentUser(UserInfoAuth):
"""
Verify ID token and get user info of Firebase
"""
user_info = FirebaseClaims
firebase_keys_url = "https://www.googleapis.com/robot/v1/metadata/x509/[email protected]"
def __init__(self, project_id: str, *args: Any, **kwargs: Any):
self._key_refresh_locked = False
jwks = JWKS.firebase(self.firebase_keys_url)
super().__init__(
jwks,
*args,
user_info=self.user_info,
audience=project_id,
issuer=f"https://securetoken.google.com/{project_id}",
extra=FirebaseExtraVerifier(project_id=project_id),
**kwargs,
)
async def refresh_keys(self) -> None:
if not self._key_refresh_locked:
# Ensure only one key refresh can happen at once.
# This prevents a dogpile of requests the second the keys expire
# from causing a bunch of refreshes (each one is an http request).
self._key_refresh_locked = True
# Re-query the keys from firebase.
# NOTE: The expires comes from an http header which is supposed to
# be set to a time long before the keys are no longer in use.
# This allows gradual roll-out of the keys and should prevent any
# request from failing.
# The only scenario which will result in failing requests is if
# there are zero requests for the entire duration of the roll-out
# (observed to be around 1 week), followed by a burst of multiple
# requests at once.
jwks = JWKS.firebase(self.firebase_keys_url)
# Reset the keys and the expiry date.
self._verifier._jwks_to_key = jwks.keys
self._keys_expire = jwks.expires
# Remove the lock.
self._key_refresh_locked = False
class FirebaseExtraVerifier(ExtraVerifier):
def __init__(self, project_id: str):
self._pjt_id = project_id
def __call__(self, claims: Dict[str, str], auto_error: bool = True) -> bool:
# auth_time must be past time
if claims.get("auth_time"):
auth_time = int(claims["auth_time"])
now = timegm(datetime.utcnow().utctimetuple())
if now < auth_time:
if auto_error:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail=NOT_VERIFIED
)
return False
return True
``` |
{
"source": "jleclanche/filetime",
"score": 3
} |
#### File: filetime/tests/test_main.py
```python
from datetime import datetime
from filetime import from_datetime, to_datetime, utc
def test_from_datetime():
assert from_datetime(datetime(2009, 7, 25, 23, 0)) == 128930364000000000
assert from_datetime(datetime(1970, 1, 1, 0, 0, tzinfo=utc)) == 116444736000000000
assert from_datetime(datetime(1970, 1, 1, 0, 0)) == 116444736000000000
assert from_datetime(datetime(2009, 7, 25, 23, 0, 0, 100)) == 128930364000001000
def test_to_datetime():
assert to_datetime(116444736000000000) == datetime(1970, 1, 1, 0, 0)
assert to_datetime(128930364000000000) == datetime(2009, 7, 25, 23, 0)
assert to_datetime(128930364000001000) == datetime(2009, 7, 25, 23, 0, 0, 100)
``` |
{
"source": "jleclanche/pywow",
"score": 3
} |
#### File: game/glyphs/__init__.py
```python
from .. import *
from ..globalstrings import *
class Glyph(Model):
MAJOR = 0
MINOR = 1
PRIME = 2
SKILL_GLYPHS = 810
@classmethod
def getAllSpells(cls):
from ..skills import Skill
return Skill(cls.SKILL_GLYPHS).getSpells()
@classmethod
def getAllForClass(cls, chrClass):
spells = cls.getAllSpells()
ret = []
for spell in spells:
glyph = spell.getGlyphLearned()
if glyph and glyph.getSpell().getGlyphInfo() == chrClass:
ret.append(spell)
return ret
def getTypeText(self):
return {
self.MINOR: MINOR_GLYPH,
self.MAJOR: MAJOR_GLYPH,
self.PRIME: PRIME_GLYPH,
}.get(self.getType(), "")
class GlyphTooltip(Tooltip):
def tooltip(self):
self.append("name", self.obj.getName())
self.append("type", self.obj.getTypeText())
self.append("description", self.obj.getDescription(), YELLOW)
return self.flush()
Glyph.Tooltip = GlyphTooltip
class GlyphProxy(object):
"""
WDBC proxy for glyphs
"""
def __init__(self, cls):
from pywow import wdbc
self.__file = wdbc.get("GlyphProperties.dbc", build=-1)
def get(self, id):
from ..spells import Spell
row = self.__file[id]
self.spell = Spell(row._raw("spell"))
return row
def getDescription(self, row):
return self.spell.getDescription()
def getName(self, row):
return self.spell.name_enus
def getSpell(self, row):
return self.spell
def getSpellIcon(self, row):
return self.spell.getIcon()
def getType(self, row):
return row.type
Glyph.initProxy(GlyphProxy)
```
#### File: game/items/__init__.py
```python
from __future__ import division
from .. import *
from .. import durationstring
from ..globalstrings import *
TRIGGER_ONUSE = 0
TRIGGER_ONEQUIP = 1
TRIGGER_ONPROC = 2
TRIGGER_INVENTORY = 5
TRIGGER_LEARNING = 6
def price(value):
if not value:
return 0, 0, 0
g = divmod(value, 10000)[0]
s = divmod(value, 100)[0] % 100
c = value % 100
return g, s, c
class Item(Model):
def getQualityColor(self):
return {
0: GREY, # Poor
1: WHITE, # Common
2: GREEN, # Uncommon
3: BLUE, # Rare
4: PURPLE, # Epic
5: ORANGE, # Legendary
6: GOLD, # Artifact
7: GOLD, # Heirloom
}.get(self.quality, WHITE)
def getBindingText(self):
if self.isAccountBound():
return ITEM_BIND_TO_ACCOUNT
return {
1: ITEM_BIND_ON_PICKUP,
2: ITEM_BIND_ON_EQUIP,
3: ITEM_BIND_ON_USE,
4: ITEM_BIND_QUEST,
}.get(self.bind, "")
def getSlotText(self):
return {
#INVTYPE_WEAPONMAINHAND_PET = "Main Attack"
1: INVTYPE_HEAD,
2: INVTYPE_NECK,
3: INVTYPE_SHOULDER,
4: INVTYPE_BODY,
5: INVTYPE_CHEST,
6: INVTYPE_WAIST,
7: INVTYPE_LEGS,
8: INVTYPE_FEET,
9: INVTYPE_WRIST,
10: INVTYPE_HAND,
11: INVTYPE_FINGER,
12: INVTYPE_TRINKET,
13: INVTYPE_WEAPON,
14: INVTYPE_SHIELD,
15: INVTYPE_RANGED,
16: INVTYPE_CLOAK, # INVTYPE_BACK ?
17: INVTYPE_2HWEAPON,
18: INVTYPE_BAG,
19: INVTYPE_TABARD,
20: INVTYPE_ROBE,
21: INVTYPE_WEAPONMAINHAND,
22: INVTYPE_WEAPONOFFHAND,
23: INVTYPE_HOLDABLE,
24: INVTYPE_AMMO,
25: INVTYPE_THROWN,
26: INVTYPE_RANGEDRIGHT,
#27: INVTYPE_QUIVER,
28: INVTYPE_RELIC,
}.get(self.slot, "")
def getTriggerText(self, trigger):
"""
Return the trigger text for an item spell trigger
An empty string means no trigger text.
None means the trigger should be hidden.
"""
if trigger == TRIGGER_ONUSE:
return ITEM_SPELL_TRIGGER_ONUSE
if trigger == TRIGGER_ONEQUIP:
return ITEM_SPELL_TRIGGER_ONEQUIP
if trigger == TRIGGER_ONPROC:
return ITEM_SPELL_TRIGGER_ONPROC
if trigger == TRIGGER_INVENTORY:
return ""
if trigger == TRIGGER_LEARNING:
return ITEM_SPELL_TRIGGER_ONUSE
class ItemTooltip(Tooltip):
def tooltip(self):
hideNote = False # for recipes, mounts, etc
self.append("name", self.obj.name, self.obj.getQualityColor())
#if self.obj.quality and env.colorblind:
# self.append("quality", self.obj.getQuality())
glyph = self.obj.getGlyph()
if glyph:
self.append("glyph", glyph.getTypeText(), color=CYAN)
if self.obj.isHeroic():
self.append("heroic", ITEM_HEROIC, GREEN)
if self.obj.isChart():
self.append("chart", ITEM_SIGNABLE, GREEN)
self.append("requiredZone", self.obj.getRequiredZone())
self.append("requiredInstance", self.obj.getRequiredInstance())
if self.obj.isConjured():
self.append("conjured", ITEM_CONJURED)
self.append("binding", self.obj.getBindingText())
if self.obj.isUniqueEquipped():
self.append("unique", ITEM_UNIQUE_EQUIPPABLE)
elif self.obj.unique:
if self.obj.unique > 1:
self.append("unique", ITEM_UNIQUE_MULTIPLE % (self.obj.unique))
else:
self.append("unique", ITEM_UNIQUE)
elif self.obj.unique_category:
pass
if self.obj.startsQuest():
self.append("startsQuest", ITEM_STARTS_QUEST)
isLocked, lockType, lockSkillLevel = self.obj.getLockInfo()
if isLocked:
self.append("locked", LOCKED, RED)
self.append("lock", ITEM_MIN_SKILL % ("Lockpicking", lockSkillLevel), RED)
slot = self.obj.getSlotText()
bagSlots = self.obj.getBagSlots()
subClassId, subClassName = self.obj.getSubClassInfo()
if slot:
if bagSlots:
self.append("slot", CONTAINER_SLOTS % (bagSlots, slot))
elif self.showSubClass():
self.append("slot", slot)
self.append("subclass", subClassName, side=RIGHT)
else:
self.append("slot", slot)
elif self.showSubClass():
self.append("subclass", subClassName)
damageMin, damageMax, speed = self.obj.getDamageInfo()
if damageMax:
speed = speed / 1000
dps = (damageMin + damageMax) / (2 * speed)
self.append("damage", DAMAGE_TEMPLATE % (damageMin, damageMax))
self.append("speed", "%s %.2f" % (SPEED, speed), side=RIGHT)
self.append("dps", DPS_TEMPLATE % (dps))
armor, extraArmor = self.obj.getArmor()
if armor:
if extraArmor:
self.append("armor", ARMOR_TEMPLATE % (armor + extraArmor), color=GREEN)
else:
self.append("armor", ARMOR_TEMPLATE % (armor))
block = self.obj.getBlock()
if block:
self.append("block", SHIELD_BLOCK_TEMPLATE % (block))
for stat, amount in self.obj.getStats():
text = stat.getText(amount)
if not stat.isSpecial():
self.append("stat", stat.getText(amount))
# enchant
for socket in self.obj.getSockets():
self.append("socket", socket.getText(), color=GREY)
socketBonus = self.obj.getSocketBonus()
if socketBonus:
self.append("socketBonus", ITEM_SOCKET_BONUS % (socketBonus.getName()), color=GREY)
self.append("gemProperties", self.obj.getGemProperties())
# random ench
duration = self.obj.getDuration()
if duration:
duration = durationstring.duration(duration, durationstring.SHORT)
self.append("duration", ITEM_DURATION % (duration))
holiday = self.obj.getRequiredHoliday()
if holiday:
self.append("requiredHoliday", ITEM_REQ_SKILL % (self.obj.getRequiredHoliday()))
# race reqs
requiredClasses = self.obj.getRequiredClasses()
if requiredClasses:
self.append("requiredClasses", ITEM_CLASSES_ALLOWED % (", ".join(cls.getName() for cls in requiredClasses)))
minDurability, maxDurability = self.obj.getDurabilityInfo()
if minDurability:
self.append("durability", DURABILITY_TEMPLATE % (minDurability, maxDurability))
if self.obj.required_level > 1:
self.append("requiredLevel", ITEM_MIN_LEVEL % (self.obj.required_level))
if self.showItemLevel():
self.append("level", ITEM_LEVEL % (self.obj.level))
# (required arena rating)
requiredSkill, requiredSkillLevel = self.obj.getRequiredSkillInfo()
if requiredSkill and requiredSkillLevel:
self.append("requiredSkill", ITEM_MIN_SKILL % (requiredSkill, requiredSkillLevel))
self.formatAppend("requiredSpell", ITEM_REQ_SKILL, self.obj.getRequiredSpell())
requiredFaction, requiredReputation = self.obj.getRequiredFaction()
if requiredFaction: # and requiredReputation?
self.append("requiredFaction", ITEM_REQ_REPUTATION % (requiredFaction, requiredReputation))
for stat, amount in self.obj.getStats():
text = stat.getText(amount)
if stat.isSpecial() and not stat.isExtraArmor():
self.append("specialStat", "%s %s" % (ITEM_SPELL_TRIGGER_ONEQUIP, text), color=GREEN)
for spell, trigger, charges, cooldown, category, cooldownCategory, createdItem in self.obj.getSpells():
if spell:
triggerText = self.obj.getTriggerText(trigger)
if triggerText is None:
continue
glyph = spell.getGlyphLearned()
if trigger == TRIGGER_LEARNING:
if glyph:
text = "%s\n\n%s" % (ITEM_GLYPH_ONUSE, glyph.getDescription())
else:
hideNote = True
text = self.obj.note or "(null)"
else:
text = spell.getDescription()
if text:
if triggerText:
text = triggerText + " " + text
self.append("spells", text, GREEN)
if createdItem:
self.appendEmptyLine()
self.append("createdItem", ItemTooltip(createdItem))
self.appendEmptyLine()
reagents = spell.getReagents()
if reagents:
text = []
for item, amount in reagents:
if amount == 1:
text.append(item.name)
else:
text.append("%s (%d)" % (item.name, amount))
self.append("reagents", "Requires %s" % (", ".join(text))) # FIXME globalstring
# charges
itemSet = self.obj.getItemSet()
if itemSet:
from ..itemsets import ItemSetTooltip
self.appendEmptyLine()
self.append("itemSet", ItemSetTooltip(itemSet))
if not hideNote:
self.append("note", self.obj.note and '"%s"' % (self.obj.note), YELLOW)
# openable
if self.obj.isReadable():
self.append("page", ITEM_CAN_BE_READ, GREEN)
# disenchanting
if self.obj.sell_price:
g, s, c = price(self.obj.sell_price)
text = SELL_PRICE + ":"
if g: text += " %ig" % (g)
if s: text += " %is" % (s)
if c: text += " %ic" % (c)
self.append("sellPrice", text)
return self.flush()
def showItemLevel(self):
return self.obj.category.id in (2, 4, 5, 6)
def showSubClass(self):
category = self.obj.category.id
subcategory = self.obj.subcategory
slot = self.obj.slot
if category in (2, 4, 6):
if slot == 16: # cloak
return False
if category == 2 and subcategory == 14:
return False
if category == 4 and subcategory == 0:
return False
return True
if category == 15 and subcategory == 5:
return True
return False
Item.Tooltip = ItemTooltip
class ItemProxy(WDBCProxy):
"""
WDBC proxy for items
"""
def get(self, id):
from pywow import wdbc
itemSparse = wdbc.get("Item-sparse.db2", build=self.build, locale=self.locale)
item = wdbc.get("Item.db2", build=self.build, locale=self.locale)
ret = itemSparse[id]
item = item[id]
ret.category = item.category
ret.subcategory = item.subcategory
return ret
def isAccountBound(self, row):
return row.flags.account_bound
def isChart(self, row):
return row.flags.chart
def isConjured(self, row):
return row.flags.conjured
def isHeroic(self, row):
return row.flags.heroic
def isReadable(self, row):
return bool(row._raw("page"))
def isUniqueEquipped(self, row):
return row.flags.unique_equipped
def getArmor(self, row):
from . import levels
#return row.armor # old
return levels.getArmor(row.level, row.category.id, row.subcategory, row.quality, row.slot), self.getExtraArmor(row)
def getBagSlots(self, row):
return row.bag_slots
def getBlock(self, row):
#return row.block # old
return 0
def getDamageInfo(self, row):
from . import levels
damageMin, damageMax = levels.getDamage(row.level, row.category.id, row.subcategory, row.quality, row.slot, row.flags, row.speed)
return damageMin, damageMax, row.speed
def getDurabilityInfo(self, row):
# return min, max
return row.durability, row.durability
def getDuration(self, row):
return row.duration
def getExtraArmor(self, row):
ret = 0
for stat, amount in self.getStats(row):
if stat.isExtraArmor():
ret += amount
return ret
def getGemProperties(self, row):
if row.gem_properties and row.gem_properties.enchant:
return row.gem_properties.enchant.name_enus
return ""
def getGlyph(self, row):
for spellInfo in self.getSpells(row):
glyph = spellInfo[0].getGlyphLearned()
if glyph:
return glyph
def getItemSet(self, row):
id = row._raw("itemset")
if id:
from ..itemsets import ItemSet
return ItemSet(id)
def getLockInfo(self, row):
row = row.lock
if row:
for i in range(1, 9):
type = getattr(row, "type_%i" % (i))
if type:
level = getattr(row, "required_skill_level_%i" % (i))
return True, type, level
return False, 0, 0
def getName(self, row):
return row.name
def getRequiredClasses(self, row):
from ..classes import ChrClass
return ChrClass.getClassesFromMask(row.class_mask)
def getRequiredFaction(self, row):
requiredReputation = globals().get("FACTION_STANDING_LABEL%i" % (row.required_reputation + 1), "")
if row.required_faction:
return row.required_faction.name_enus, requiredReputation
return "", requiredReputation
def getRequiredHoliday(self, row):
if row.required_holiday:
return row.required_holiday.name.name_enus
return ""
def getRequiredInstance(self, row):
if row._raw("required_instance") and row.required_instance:
return row.required_instance.name_enus
return ""
def getRequiredSkillInfo(self, row):
requiredSkillLevel = row.required_skill_level
if row.required_skill:
return row.required_skill.name_enus, requiredSkillLevel
return "", requiredSkillLevel
def getRequiredSpell(self, row):
if row.required_spell:
return row.required_spell.name_enus
return ""
def getRequiredZone(self, row):
if row.required_zone:
return row.required_zone.name_enus
return ""
def getSpells(self, row):
from ..spells import Spell
spells = ("spell_%i", "spell_trigger_%i", "spell_charges_%i", "spell_cooldown_%i", "spell_category_%i", "spell_cooldown_category_%i")
ret = []
for i in range(1, 6):
r = []
for k in spells:
r.append(row._raw(k % (i)))
spell = r[0]
if spell:
r[0] = spell = Spell(spell)
r.append(spell.getCreatedItem())
ret.append(r)
return ret
def getSocketBonus(self, row):
id = row._raw("socket_bonus")
if id:
from ..enchants import Enchant
return Enchant(id)
def getSockets(self, row):
from .sockets import Socket
ret = []
for i in range(1, 4):
socket = getattr(row, "socket_%i" % (i))
if socket:
ret.append(Socket(socket))
return ret
def getStats(self, row):
from ..stats import Stat
ret = []
for i in range(1, 10):
stat = getattr(row, "stats_id_%i" % (i))
amount = getattr(row, "stats_amount_%i" % (i))
if amount:
ret.append((Stat(stat), amount))
return ret
def getSubClassInfo(self, row):
category = row.category.itemsubclass__category
subcategory = None
for x in category:
if x.subcategory == row.subcategory:
subcategory = x
if subcategory:
return subcategory.id, subcategory.name_enus
return None, None
def startsQuest(self, row):
return bool(row._raw("starts_quest"))
Item.initProxy(ItemProxy)
```
#### File: game/items/levels.py
```python
from math import floor
from pywow import wdbc
ITEM_CLASS_WEAPON = 2
ITEM_CLASS_ARMOR = 4
ITEM_QUALITY_POOR = 0
ITEM_QUALITY_COMMON = 1
ITEM_QUALITY_UNCOMMON = 2
ITEM_QUALITY_RARE = 3
ITEM_QUALITY_EPIC = 4
ITEM_QUALITY_LEGENDARY = 5
ITEM_QUALITY_ARTIFACT = 6
ITEM_QUALITY_HEIRLOOM = 7
ITEM_SUBCLASS_ARMOR_CLOTH = 1
ITEM_SUBCLASS_ARMOR_LEATHER = 2
ITEM_SUBCLASS_ARMOR_MAIL = 3
ITEM_SUBCLASS_ARMOR_PLATE = 4
ITEM_SUBCLASS_WEAPON_BOW = 2
ITEM_SUBCLASS_WEAPON_GUN = 3
ITEM_SUBCLASS_WEAPON_THROWN = 16
ITEM_SUBCLASS_WEAPON_CROSSBOW = 18
ITEM_SUBCLASS_WEAPON_WAND = 19
INVTYPE_CHEST = 5
INVTYPE_WEAPON = 13
INVTYPE_SHIELD = 14
INVTYPE_RANGED = 15
INVTYPE_2HWEAPON = 17
INVTYPE_ROBE = 20
INVTYPE_WEAPONMAINHAND = 21
INVTYPE_WEAPONOFFHAND = 22
INVTYPE_THROWN = 25
INVTYPE_RANGEDRIGHT = 26
qualities = ("poor", "common", "uncommon", "rare", "epic", "legendary", "artifact")
types = ("no_armor", "cloth", "leather", "mail", "plate")
def getDamageDBC(subcategory, slot, flags):
if slot in (INVTYPE_WEAPON, INVTYPE_WEAPONMAINHAND, INVTYPE_WEAPONOFFHAND):
if flags & 0x200:
return "ItemDamageOneHandCaster"
return "ItemDamageOneHand"
if slot == INVTYPE_2HWEAPON:
if flags & 0x200:
return "ItemDamageTwoHandCaster"
return "ItemDamageTwoHand"
if slot in (INVTYPE_RANGED, INVTYPE_THROWN, INVTYPE_RANGEDRIGHT):
if subcategory in (ITEM_SUBCLASS_WEAPON_BOW, ITEM_SUBCLASS_WEAPON_GUN, ITEM_SUBCLASS_WEAPON_CROSSBOW):
return "ItemDamageRanged"
if subcategory == ITEM_SUBCLASS_WEAPON_THROWN:
return "ItemDamageThrown"
if subcategory == ITEM_SUBCLASS_WEAPON_WAND:
return "ItemDamageWand"
def getDamage(level, category, subcategory, quality, slot, flags, speed):
if not (1 <= level <= 1000):
return 0, 0
if category != ITEM_CLASS_WEAPON:
dps = 0.0
if quality >= ITEM_QUALITY_HEIRLOOM:
return 0, 0
dbc = getDamageDBC(subcategory, slot, flags)
if not dbc:
dps = 0.0
else:
dbc = wdbc.get(dbc, build=-1)
dps = getattr(dbc[level], qualities[quality])
min = int(floor(dps * speed / 1000 * 0.7 + 0.5))
max = int(floor(dps * speed / 1000 * 1.3 + 0.5))
return min, max
def getArmor(level, category, subcategory, quality, slot):
if quality >= ITEM_QUALITY_HEIRLOOM:
return 0
if not (1 <= level <= 1000):
return 0
if slot == INVTYPE_SHIELD:
dbc = wdbc.get("ItemArmorShield", build=-1)
total = getattr(dbc[level], qualities[quality])
return int(floor(total + 0.5))
if slot == INVTYPE_ROBE:
slot = INVTYPE_CHEST
if category != ITEM_CLASS_ARMOR or not slot:
return 0
if subcategory not in (ITEM_SUBCLASS_ARMOR_CLOTH, ITEM_SUBCLASS_ARMOR_LEATHER, ITEM_SUBCLASS_ARMOR_MAIL, ITEM_SUBCLASS_ARMOR_PLATE):
return 0
ArmorLocation = wdbc.get("ArmorLocation", build=-1)
ItemArmorTotal = wdbc.get("ItemArmorTotal", build=-1)
ItemArmorQuality = wdbc.get("ItemArmorQuality", build=-1)
total = getattr(ItemArmorTotal[level], types[subcategory])
_quality = getattr(ItemArmorQuality[level], qualities[quality])
_slot = getattr(ArmorLocation[slot], types[subcategory])
return int(floor(total * _quality * _slot + 0.5))
```
#### File: game/skills/__init__.py
```python
from .. import *
from ..globalstrings import *
class Skill(Model):
@classmethod
def getTypeText(self):
return {
self.MINOR: MINOR_GLYPH,
self.MAJOR: MAJOR_GLYPH,
self.PRIME: PRIME_GLYPH,
}.get(self.obj.type, "")
class SkillProxy(object):
"""
WDBC proxy for skills
"""
def __init__(self, cls):
from pywow import wdbc
self.__file = wdbc.get("SkillLine.dbc", build=-1)
self.spells = wdbc.get("SkillLineAbility.dbc", build=-1)
def get(self, id):
return self.__file[id]
def getSpells(self, row):
from ..spells import Spell
lookups = row.skilllineability__skill
return [Spell(k._raw("spell")) for k in lookups]
Skill.initProxy(SkillProxy)
```
#### File: old/docs/hstest.py
```python
def test_deathrattle_ordering_sylv(board):
swdeath = board.player1.addToHand("Shadow Word: Death")
sylvanas = board.player1.addToField("Sylvanas Windrunner")
cultmaster = board.player2.addToField("Cult Master")
board.player1.beginTurn()
board.player1.playCard(swdeath, target=sylvanas)
board.commit()
# Check player2 did not draw any cards
assert len(board.player2.cards) == 0
def test_tracking_zero_cards(board):
tracking = board.player1.addToDeck("Tracking")
board.player1.beginTurn()
assert board.player1.hand == [tracking]
board.player1.playCard(tracking)
board.commit()
assert board.player1.fatigueCounter == 0
```
#### File: wdbc/structures/structure.py
```python
from enum import _EnumDict
from collections import OrderedDict
from .fields import Field
_magic_register = {}
_name_register = {}
class StructureNotFound(Exception):
pass
class StructureMeta(type):
"""
Metaclass for Structure
"""
@classmethod
def __prepare__(metacls, cls, bases):
return OrderedDict()
def __new__(cls, name, bases, clsdict):
c = type.__new__(cls, name, bases, clsdict)
c._orderedKeys = clsdict.keys()
return c
class Structure(metaclass=StructureMeta):
def __init__(self):
self.skeleton = []
for attr in self.__dict__:
if isinstance(attr, Field):
self.skeleton.append()
def __iter__(self):
for key in self._orderedKeys:
if not key.startswith("__"):
yield key
def items(self):
for key in self._orderedKeys:
if not key.startswith("__"):
yield key, getattr(self, key)
def values(self):
for key in self._orderedKeys:
if not key.startswith("__"):
yield getattr(self, key)
def register(name, magic=None):
def dec(cls):
if magic:
_magic_register[magic] = cls
_name_register[name] = cls
return dec
def get_structure(name=None, magic=None):
ret = None
if magic:
if magic != "WDBC":
from . import wdb
ret = _magic_register.get(magic)
if name and not ret:
ret = _name_register.get(name)
if not ret:
raise StructureNotFound
return ret
```
#### File: wdbc/tests/__init__.py
```python
import sys
import unittest
sys.path.append("../../..")
from wow.wdbc import read_file
class WDBTestCase(unittest.TestCase):
def test_pagetextcache(self):
testfile = "/home/adys/src/git/wow/wdbc/tests/pagetextcache.wdb"
f = read_file(testfile)
self.assertEqual(f.header.magic, "XTPW")
self.assertEqual(len(f._offsets), 48)
self.assertEqual(f[4452].text, "Darkmoon Faire Bill of Sale\r\n\r\n6x Super-effective Gnoll Decoy*\r\n60g 20s 300c")
def test_pagetextcache(self):
testfile = "/home/adys/src/git/wow/wdbc/tests/itemtextcache.wdb"
f = read_file(testfile)
self.assertEqual(f.header.magic, "XTIW")
self.assertEqual(len(f._offsets), 1)
print(f.keys())
self.assertEqual(f[4452].text, "Darkmoon Faire Bill of Sale\r\n\r\n6x Super-effective Gnoll Decoy*\r\n60g 20s 300c")
if __name__ == "__main__":
unittest.main()
```
#### File: common/templatetags/extratags.py
```python
import re
from datetime import timedelta
from django import template
from django.utils import html, safestring
from django.template.defaultfilters import stringfilter
from math import ceil
register = template.Library()
@register.simple_tag
def sitenav():
return """<ul id="sitenav">
<li><a href="/" rel="sigrie" class="">Database</a></li>
<li>» <a href="/items" rel="items">Items</a></li>
<li>» <a href="/items/9" rel="items_9" class="">Recipe</a></li>
<li>» <a href="/items/9/2" rel="items_9_2">Tailoring</a></li>
</ul>
"""
def esc(text, autoescape):
if autoescape:
return html.conditional_escape(text)
return text
@register.filter
def colorinline(value, autoescape=None):
pattern = r"\|c([0-9a-f]{8})(.+)\|r"
sre = re.search(pattern, value, re.IGNORECASE)
if not sre:
return value
color, text = sre.groups()
output = '<span style="color:#%s;">%s</span>' % (color[2:], esc(text, autoescape))
output = "".join([value[:sre.start()], output, value[sre.end():]])
return safestring.mark_safe(output)
colorinline.needs_autoescape = True
@register.filter
def genderinline(value, autoescape=None):
if not value.find("$"):
return value
pattern = r"\$(G|g)\s?([^:]+):([^;]+);"
sre = re.search(pattern, value)
if not sre:
return value
char, male, female = sre.groups()
output = '<%s/%s>' % (esc(male.strip(), autoescape), esc(female.strip(), autoescape))
output = "".join([esc(value[:sre.start()], autoescape), output, esc(value[sre.end():], autoescape)])
return safestring.mark_safe(output)
genderinline.needs_autoescape = True
DURATIONS_DEFAULT = {
"second": "second",
"seconds": "seconds",
"minute": "minute",
"minutes": "minutes",
"hour": "hour",
"hours": "hours",
"day": "day",
"days": "days",
}
DURATIONS_SHORT = {
"second": "sec",
"seconds": "sec",
"minute": "min",
"minutes": "min",
"hour": "hour",
"hours": "hrs",
"day": "day",
"days": "days",
}
DURATIONS_SHORTCAP = {
"second": "Sec",
"seconds": "Sec",
"minute": "Min",
"minutes": "Min",
"hour": "Hr",
"hours": "Hr",
"day": "Day",
"days": "Days",
}
@register.filter
def duration(value, locales=DURATIONS_DEFAULT):
if not isinstance(value, timedelta):
if value < 0: value = 0
value = timedelta(microseconds=value)
if value == timedelta(seconds=1):
return "1 %s" % (locales["second"])
elif value < timedelta(minutes=1):
return "%.3g %s" % (value.seconds+float(value.microseconds)/1000000, locales["seconds"])
elif value < timedelta(hours=1):
return "%.3g %s" % (value.seconds / 60, value.seconds >= 120 and locales["minutes"] or locales["minute"])
elif value < timedelta(days=1):
return "%d %s" % (ceil(value.seconds / 3600.0), value.seconds > 3600 and locales["hours"] or locales["hour"])
else:
return "%.3g %s" % (value.days, value.days > 1 and locales["days"] or locales["day"])
duration.is_safe = True
@register.filter
def duration_short(value):
return duration(value, DURATIONS_SHORT)
@register.filter
def duration_shortcap(value):
return duration(value, DURATIONS_SHORTCAP)
PRICE_TEMPLATE = '<span class="%(letter)s">%(amt)i<span class="price-hidden">%(letter)s</span></span>'
@register.filter
def price(value, autoescape=None):
value = int(value)
if not value:
g, s, c = 0, 0, 0
else:
g = divmod(value, 10000)[0]
s = divmod(value, 100)[0] % 100
c = value % 100
output = '<span class="price">%s %s %s</span>' % (
g and PRICE_TEMPLATE % {"amt": g, "letter": "g", "alt": "Gold"} or "",
s and PRICE_TEMPLATE % {"amt": s, "letter": "s", "alt": "Silver"} or "",
c and PRICE_TEMPLATE % {"amt": c, "letter": "c", "alt": "Copper"} or "",
)
return safestring.mark_safe(output)
price.needs_autoescape = True
@register.filter
def mapify(locations, autoescape=None):
locations = locations.filter(x__gt=0, y__gt=0).select_related()
if not locations.count():
return ""
html_base = """
<div id="map-container"></div>
<script type="text/javascript">
%s
maplib.renderMaps([%s])
</script>
"""
html_vars = """
var %s = {
name: %r,
file: %r,
nodes: %r
}
"""
ret = {}
for location in locations.all():
key = "<KEY> (location.zone_id, abs(hash(location.zone.map)))
if key not in ret:
map = str(location.zone.map)
if location.floor:
map += str(location.floor)
ret[key] = (str(location.zone.name), map, [])
ret[key][2].append([location.x, location.y])
vars_list = []
for k in ret:
vars_list.append(html_vars % (k, ret[k][0], ret[k][1], ret[k][2]))
vars_html = "\n".join(vars_list)
return html_base % (vars_html, ",".join(ret.keys()))
mapify.needs_autoescape = True
@register.filter
def supermark(value):
if isinstance(value, float):
return "%+f" % value
else:
return "%+i" % int(value)
supermark.is_safe = True
@register.filter
def url(value, text="", autoescape=None):
url = hasattr(value, "get_absolute_url") and value.get_absolute_url()
if url:
classes = (hasattr(value, "get_htclasses") and ' class="%s"' % (value.get_htclasses())) or ""
html = '<a href="%s"%s>%s</a>' % (url, classes, esc(str(text or value), autoescape=True))
return safestring.mark_safe(html)
text = text or value
try:
return esc(str(text), autoescape=True)
except UnicodeError:
return text.encode("ascii", "ignore")
url.needs_autoescape = True
@register.filter
def icon(value, arg=64, autoescape=None):
try:
arg = int(arg)
except ValueError: # Invalid literal for int()
return value # Fail silently
BASE_URL = "http://db.mmo-champion.com"
url = hasattr(value, "get_absolute_url") and value.get_absolute_url()
if not url:
return safestring.mark_safe(value)
else:
icon = value.icon or "temp"
value = esc(str(value), autoescape)
return safestring.mark_safe('<a href="%s" class="iconinline"><img src="http://static.mmo-champion.com/db/img/icons/%s.png" alt="%s" width="%i" height="%i"/></a>' % (url, icon, value, arg, arg))
icon.needs_autoescape = True
@register.filter
def iconize(value, arg="small", autoescape=None):
if arg == "large":
size = 40
else:
size = 16
_icon = icon(value, size)
_url = url(value)
return safestring.mark_safe('<div class="iconized-%s"><div class="icon">%s</div> <span>%s</span></div>' % (arg, _icon, _url))
iconize.needs_autoescape = True
@register.filter
def screenshot(value, autoescape=None):
if not value:
return ""
screenshot = value[0]
url = screenshot.get_absolute_url()
# Don't give it a size as its dynamic
return safestring.mark_safe('<a id="screenshot-thumbnail" href="%s.jpg"><img src="%s.thumbnail.jpg" alt="%s"/></a>' % (url, url, screenshot.caption))
icon.needs_autoescape = True
@register.filter
def tooltip(obj, paperdoll, autoescape=None):
return safestring.mark_safe(obj.tooltip(paperdoll))
tooltip.needs_autoescape = True
@register.filter
def str_repr(value):
value = str(value)
return repr(value)
@register.filter
def verbose_name(cls):
return cls()._meta.verbose_name
@register.filter
def verbose_name_plural(cls):
return cls()._meta.verbose_name_plural
@register.filter
@stringfilter
def truncate(value, arg):
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
if len(value) > arg:
value = value[:arg]
if not value.endswith("..."):
value += "..."
return value
@register.tag
def sigrielisting(parser, token):
try:
cls, iterable = token.split_contents()[1:]
iterable = parser.compile_filter(iterable)
except ValueError:
raise template.TemplateSyntaxError("%s tag requires two arguments" % token.contents[0])
return SigrieListing(cls, iterable)
class SigrieListing(template.Node):
def __init__(self, cls, iterable):
self.cls = cls
self.iterable = iterable
def render(self, context):
from sigrie.owdb import listings
cls = getattr(listings, self.cls)
iterable = self.iterable.resolve(context)
return cls(iterable).render()
```
#### File: sigrie/items/views.py
```python
from django.http import Http404
from django.shortcuts import render
from pywow.game.tooltips import HtmlRenderer
from pywow.game.items import Item
def listing(request):
return render(request, "common/listing.html")
def single(request, id):
id = int(id)
obj = Item(id)
tooltip = obj.tooltip(HtmlRenderer)
return render(request, "spells/single.html", {"obj": obj, "tooltip": tooltip})
```
#### File: pywow/wdbc/main.py
```python
from cStringIO import StringIO
from struct import pack, unpack, error as StructError
from .log import log
from .structures import fields
class DBFile(object):
"""
Base class for WDB and DBC files
"""
@classmethod
def open(cls, file, build, structure, environment):
if isinstance(file, basestring):
file = open(file, "rb")
instance = cls(file, build, environment)
instance._readHeader()
instance.setStructure(structure)
instance._rowDynamicFields = 0 # Dynamic fields index, used when parsing a row
instance._readAddresses()
return instance
def __init__(self, file=None, build=None, environment=None):
self._addresses = {}
self._values = {}
self.file = file
self.build = build
self.environment = environment
def __repr__(self):
return "%s(file=%r, build=%r)" % (self.__class__.__name__, self.file, self.build)
def __contains__(self, id):
return id in self._addresses
def __getitem__(self, item):
if isinstance(item, slice):
keys = sorted(self._addresses.keys())[item]
return [self[k] for k in keys]
if item not in self._values:
self._parse_row(item)
return self._values[item]
def __setitem__(self, item, value):
if not isinstance(item, int):
raise TypeError("DBFile indices must be integers, not %s" % (type(item)))
if isinstance(value, DBRow):
self._values[item] = value
self._addresses[item] = -1
else:
# FIXME technically we should allow DBRow, but this is untested and will need resetting parent
raise TypeError("Unsupported type for DBFile.__setitem__: %s" % (type(value)))
def __delitem__(self, item):
if item in self._values:
del self._values[item]
del self._addresses[item]
def __iter__(self):
return self._addresses.__iter__()
def __len__(self):
return len(self._addresses)
def _add_row(self, id, address, reclen):
if id in self._addresses: # Something's wrong here
log.warning("Multiple instances of row %r found in %s" % (id, self.file.name))
self._addresses[id] = (address, reclen)
def _parse_field(self, data, field, row=None):
"""
Parse a single field in stream.
"""
if field.dyn > self._rowDynamicFields:
return None # The column doesn't exist in this row, we set it to None
ret = None
try:
if isinstance(field, fields.StringField):
ret = self._parse_string(data)
elif isinstance(field, fields.DataField): # wowcache.wdb
length = getattr(row, field.master)
ret = data.read(length)
elif isinstance(field, fields.DynamicMaster):
ret, = unpack("<I", data.read(4))
self._rowDynamicFields = ret
else:
ret, = unpack("<%s" % (field.char), data.read(field.size))
except StructError:
log.warning("Field %s could not be parsed properly" % (field))
ret = None
return ret
def supportsSeeking(self):
return hasattr(self.file, "seek")
def append(self, row):
"""
Append a row at the end of the file.
If the row does not have an id, one is automatically assigned.
"""
i = len(self) + 1 # FIXME this wont work properly in incomplete files
if "_id" not in row:
row["_id"] = i
self[i] = row
def clear(self):
"""
Delete every row in the file
"""
for k in self.keys(): # Use key, otherwise we get RuntimeError: dictionary changed size during iteration
del self[k]
def keys(self):
return self._addresses.keys()
def items(self):
return [(k, self[k]) for k in self]
def parse_row(self, data, reclen=0):
"""
Assign data to a DBRow instance
"""
return DBRow(self, data=data, reclen=reclen)
def values(self):
"""
Return a list of the file's values
"""
return [self[id] for id in self]
def setRow(self, key, **values):
self.__setitem__(key, DBRow(self, columns=values))
def size(self):
if hasattr(self.file, "size"):
return self.file.size()
elif isinstance(self.file, file):
from os.path import getsize
return getsize(self.file.name)
raise NotImplementedError
def update(self, other):
"""
Update file from iterable other
"""
for k in other:
self[k] = other[k]
def write(self, filename=""):
"""
Write the file data on disk. If filename is not given, use currently opened file.
"""
_filename = filename or self.file.name
data = self.header.data() + self.data() + self.eof()
f = open(_filename, "wb") # Don't open before calling data() as uncached rows would be empty
f.write(data)
f.close()
log.info("Written %i bytes at %s" % (len(data), f.name))
if not filename: # Reopen self.file, we modified it
# XXX do we need to wipe self._values here?
self.file.close()
self.file = open(f.name, "rb")
class DBRow(list):
"""
A database row.
Names of the variables of that class should not be used in field names of structures
"""
initialized = False
def __init__(self, parent, data=None, columns=None, reclen=0):
self._parent = parent
self._values = {} # Columns values storage
self.structure = parent.structure
self.initialized = True # needed for __setattr__
if columns:
if type(columns) == list:
self.extend(columns)
elif type(columns) == dict:
self._default()
_cols = [k.name for k in self.structure]
for k in columns:
try:
self[_cols.index(k)] = columns[k]
except ValueError:
log.warning("Column %r not found" % (k))
elif data:
dynfields = 0
data = StringIO(data)
for field in self.structure:
_data = parent._parse_field(data, field, self)
self.append(_data)
if reclen:
real_reclen = reclen + self._parent.row_header_size
if data.tell() != real_reclen:
log.warning("Reclen not respected for row %r. Expected %i, read %i. (%+i)" % (self.id, real_reclen, data.tell(), real_reclen-data.tell()))
def __dir__(self):
result = self.__dict__.keys()
result.extend(self.structure.column_names)
return result
def __getattr__(self, attr):
if attr in self.structure:
return self._get_value(attr)
if attr in self.structure._abstractions: # Union abstractions etc
field, func = self.structure._abstractions[attr]
return func(field, self)
if "__" in attr:
return self._query(attr)
return super(DBRow, self).__getattribute__(attr)
def __int__(self):
return self.id
def __setattr__(self, attr, value):
# Do not preserve the value in DBRow! Use the save method to save.
if self.initialized and attr in self.structure:
self._set_value(attr, value)
return super(DBRow, self).__setattr__(attr, value)
def __setitem__(self, index, value):
if not isinstance(index, int):
raise TypeError("Expected int instance, got %s instead (%r)" % (type(index), index))
list.__setitem__(self, index, value)
col = self.structure[index]
self._values[col.name] = col.to_python(value, row=self)
def _get_reverse_relation(self, table, field):
"""
Return a list of rows matching the reverse relation
"""
if not hasattr(self._parent, "_reverse_relation_cache"):
self._parent._reverse_relation_cache = {}
cache = self._parent._reverse_relation_cache
tfield = table + "__" + field
if tfield not in cache:
cache[tfield] = {}
# First time lookup, let's build the cache
table = self._parent.environment.dbFile(table)
for row in table:
row = table[row]
id = row._raw(field)
if id not in cache[tfield]:
cache[tfield][id] = []
cache[tfield][id].append(row)
return cache[tfield].get(self.id, None)
def _matches(self, **kwargs):
for k, v in kwargs.items():
if not self._query(k, v):
return False
return True
def _query(self, rel, value=None):
"""
Parse a django-like multilevel relationship
"""
rels = rel.split("__")
if "" in rels: # empty string
raise ValueError("Invalid relation string")
first = rels[0]
if not hasattr(self, first):
if self._parent.environment.hasDbFile(first):
# Handle reverse relations, eg spell__item for item table
remainder = rel[len(first + "__"):]
return self._get_reverse_relation(first, remainder)
raise ValueError("Invalid relation string")
ret = self
rels = rels[::-1]
special = {
"contains": lambda x, y: x in y,
"exact": lambda x, y: x == y,
"icontains": lambda x, y: x.lower() in y.lower(),
"iexact": lambda x, y: x.lower() == y.lower(),
"gt": lambda x, y: x > y,
"gte": lambda x, y: x >= y,
"lt": lambda x, y: x < y,
"lte": lambda x, y: x <= y,
}
while rels:
if rels[-1] in special:
if len(rels) != 1:
# icontains always needs to be the last piece of the relation string
raise ValueError("Invalid relation string")
return special[rels[-1]](value, ret)
else:
ret = getattr(ret, rels.pop())
return ret
def _set_value(self, name, value):
index = self.structure.index(name)
col = self.structure[index]
self._values[name] = col.to_python(value, self)
self[index] = value
def _get_value(self, name):
if name not in self._values:
raw_value = self[self.structure.index(name)]
self._set_value(name, raw_value)
return self._values[name]
def _raw(self, name):
"""
Returns the raw value from field 'name'
"""
index = self.structure.index(name)
return self[index]
def _save(self):
for name in self._values:
index = self.structure.index(name)
col = self.structure[index]
self[index] = col.from_python(self._values[name])
def _field(self, name):
"""
Returns the field 'name'
"""
index = self.structure.index(name)
return self.structure[index]
def _default(self):
"""
Change all fields to their default values
"""
del self[:]
self._values = {}
for col in self.structure:
char = col.char
if col.dyn:
self.append(None)
elif char == "s":
self.append("")
elif char == "f":
self.append(0.0)
else:
self.append(0)
def dict(self):
"""
Return a dict of the row as colname: value
"""
return dict(zip(self.structure.column_names, self))
def update(self, other):
for k in other:
self[k] = other[k]
@property
def id(self):
"Temporary hack to transition between _id and id"
return self._id
```
#### File: wdbc/structures/fields.py
```python
from structures.fields import *
##
# Core custom types for WDB/DBC files
#
class IDField(IntegerField):
"""
Integer field containing the row's ID
"""
def __init__(self, name="_id"):
IntegerField.__init__(self, name=name, primary_key=True)
class RecLenField(IntegerField):
"""
Integer field containing the length of the row from itself
"""
def __init__(self, name="_reclen"):
IntegerField.__init__(self, name=name)
class LocalizedField(Field):
"""
Localized StringField.
Structure handled at wdbc.structures.LocalizedStringField
"""
pass
##
# Dynamic types
#
class DynamicFieldsBase(list):
def get_fields(self):
return self
def delete_field(self, name):
"""
Delete a field, by name or by instance
"""
if isinstance(name, basestring):
for index, field in enumerate(self):
if field.name == name:
del self[index]
break
else:
for index, field in enumerate(self):
if isinstance(field, name.__class__):
del self[index]
break
class DynamicMaster(IntegerField):
"""
Master field for dynamic columns, determining how many will be present.
"""
pass
class DynamicFields(DynamicFieldsBase):
"""
A dynamic column master, followed by the full list of dynamic columns.
Used in itemcache.wdb
DynamicFields("name", [((Field, "x"), (Field, "y"), ...), 10])
"""
def __init__(self, name, columns):
self.name = name
self.master = DynamicMaster(name, group=self)
self.append(self.master)
cols, amt = columns
for i in xrange(amt):
self.append([v[0](name="%s_%s_%i" % (name, v[1], i+1), dynamic=i+1, group=self) for v in cols])
def get_fields(self):
yield self.master
for v in self[1:]:
for f in v:
yield f
class SubRow(object):
"""
Used in Unions as a fake DBRow
"""
def __init__(self, field, row, structure):
self.__field = field
self.__row = row
self._structure = structure(row._parent.build, row._parent)
def __dir__(self):
result = self.__dict__.keys()
result.extend(self._structure.column_names)
return result
def __getattr__(self, name):
if name in self._structure:
index = self._structure.index(name)
value = self._raw(name)
return self._structure[index].to_python(value, self.__row)
return super(SubRow, self).__getattribute__(name)
def _raw(self, name):
index = self._structure.index(name)
real_name = self.__field.column_names[index]
return getattr(self.__row, real_name)
class Union(DynamicFieldsBase):
"""
Imitates a C++ union.
Takes a name argument and field_1, ... field_n fields to
populate the default union.
Required get_structure(x, row) callable argument that
returns the structure corresponding to a specific row.
"""
def __init__(self, name, fields, get_structure):
DynamicFieldsBase.__init__(self, fields)
self.name = name
if not callable(get_structure):
raise StructureError("%s._get_structure must be a callable type" % (self.__class__.__name__))
self._get_structure = get_structure
self.column_names = [k.name for k in fields]
def __build_list(self, field, row):
"Builds a fake DBRow to allow deep attribute seeking"
return SubRow(field, row, self._get_structure(row))
def get_abstraction(self):
return self.name, self.__build_list
def get_structure(self, row):
return self._get_structure(row)
class MultiField(DynamicFieldsBase):
"""
Expands a list of fields to a specific amount
"""
def __init__(self, name, fields, amount):
super(DynamicFieldsBase, self).__init__(fields)
def __build_list(self):
pass
def get_abstraction(self):
return self.name, self.__build_list
##
# Relations
#
class UnresolvedObjectRef(int):
def __repr__(self):
return "<%s: %d>" % (self.__class__.__name__, int(self))
class RelationError(Exception):
pass
class UnresolvedTable(RelationError):
pass
class UnresolvedKey(RelationError):
pass
class ForeignKeyBase(IntegerField):
"""
Base class for ForeignKeys
"""
def from_python(self, value): # FIXME use isinstance(DBFile) instead
if isinstance(value, int) or isinstance(value, long):
return value
pk = value.structure.primary_keys[0] # TODO: what about multiple primary keys ?
index = value.structure.index(pk.name)
return value[index]
def to_python(self, value, row):
if isinstance(value, int):
self.raw_value = value
f = self.relationTable(value)
key = self.relationKey(value, row)
try:
value = f[key]
except KeyError:
# If the key is 0 and is not in the target table, we assume it's meant to be empty
if key == 0:
value = None
else:
raise UnresolvedKey("Key %r does not exist in %s" % (key, f.structure.name()))
return self.get_final_value(value, row)
return value
def relationTable(self, value):
"""
Return the forward relation "table" (file) in the Environment
"""
environment = self.parent.parent.environment
relation = self.relation(value)
try:
return environment.dbFile(relation)
except KeyError:
raise UnresolvedTable("Table %r does not exist in the current environment" % (relation))
def get_final_value(self, value, row):
return value
def relation(self, value):
raise NotImplementedError("Subclasses must implement this method")
def relationKey(self, value, row):
raise NotImplementedError("Subclasses must implement this method")
class ForeignKey(ForeignKeyBase):
"""
Integer link to another table's primary key.
Relation required.
"""
def __init__(self, name, relation):
IntegerField.__init__(self, name)
self._relation = relation
def relation(self, value):
return self._relation
def relationKey(self, value, row):
return value
class ForeignMask(BitMaskField):
"""
Integer field containing a bitmask relation to
multiple rows in another file.
"""
def __init__(self, name, relation, **kwargs):
super(ForeignMask, self).__init__(name=name, **kwargs)
self._relation = relation
self.flags = {}
def __init_flags(self):
env = self.parent.parent.environment
try:
f = env.dbFile(self._relation)
except KeyError:
raise UnresolvedTable("Relation %r does not exist in the current environment" % (self._relation), value)
for k in f:
self.flags[2 ** (k-1)] = f[k]
def from_python(self, value):
assert isinstance(value, BitFlags)
return int(value)
def to_python(self, value, row):
if isinstance(value, BitFlags):
return value
if not self.flags:
self.__init_flags()
return BitMask(value, self.flags)
class ForeignByte(ForeignKey):
"""
This is a HACK
"""
char = "b"
size = 1
class GenericForeignKey(ForeignKeyBase):
def __init__ (self, name="", get_relation=None, get_value=lambda x, value: value):
IntegerField.__init__(self, name)
if not callable(get_relation):
raise FieldError("%s._get_relation must be a callable type" % (self.__class__.__name__))
self._get_relation = get_relation
self._get_value = get_value
def relation(self, value):
return self._get_relation(self, value)
def relationKey(self, value, row):
return self._get_value(self, value)
class ForeignCell(ForeignKeyBase):
"""
Like a ForeignKey, but returns a specific cell
from the relation. Requires both a get_column
and a get_row method.
"""
def __init__(self, name, relation, get_column, get_row):
IntegerField.__init__(self, name)
self._relation = relation
self.get_column = get_column
self.get_row = get_row
def get_final_value(self, value, row):
column = self.get_column(row, self.raw_value)
if column:
return getattr(value, column)
return self.raw_value
def relationKey(self, value, row):
return self.get_row(row, self.raw_value)
def relation(self, value):
return self._relation
##
# Misc. types
#
class UnknownField(IntegerField):
pass
class ColorField(UnsignedIntegerField):
pass
class MoneyField(UnsignedIntegerField):
pass
class FilePathField(StringField):
pass
class GUIDField(BigIntegerField):
pass
class HashField(Field):
char = "16s"
size = 16
class DataField(Field):
char = "s"
def __init__(self, name, master):
Field.__init__(self, name=name)
self.master = master
```
#### File: wdbc/structures/generated.py
```python
from ..structures import Skeleton, Structure, UnknownField, IDField
class GeneratedStructure(Structure):
"""Dynamically generated DBC structure."""
def __init__(self, structure_string, *pargs, **kwargs):
columns = []
fields = [IDField()] + [UnknownField() for s in structure_string][1:]
self.fields = Skeleton(*fields)
Structure.__init__(self)
```
#### File: wdbc/structures/__init__.py
```python
from structures import Structure, Skeleton
from .fields import *
from .main import *
from .generated import GeneratedStructure
class StructureNotFound(Exception):
pass
class StructureLoader():
wowfiles = None
@classmethod
def setup(cls):
if cls.wowfiles is None:
cls.wowfiles = {}
for name in globals():
try:
if not issubclass(globals()[name], Structure):
continue
except TypeError:
continue
cls.wowfiles[name.lower()] = globals()[name]
@classmethod
def getstructure(cls, name, build=0, parent=None):
name = name.replace("-", "_")
if name in cls.wowfiles:
return cls.wowfiles[name](build, parent)
raise StructureNotFound("Structure not found for file %r" % (name))
StructureLoader.setup()
getstructure = StructureLoader.getstructure
class LocalizedStringField(Structure):
"""
Structure for the LocalizedField class
"""
fields = Skeleton(
StringField("enus"),
StringField("kokr"),
StringField("frfr"),
StringField("dede"),
StringField("zhcn"),
StringField("zhtw"),
StringField("eses"),
StringField("esmx"),
BitMaskField("locflags")
)
def changed_5595(self, fields):
fields.insert_fields((
StringField("ruru"),
StringField("unk1"),
StringField("unk2"),
StringField("unk3"),
StringField("unk4"),
StringField("unk5"),
StringField("unk6"),
StringField("unk7"),
), before="locflags")
def changed_11927(self, fields):
self.changed_5595(fields)
fields.delete_fields(
"kokr", "frfr", "dede",
"zhcn", "zhtw", "eses",
"esmx", "ruru", "unk1",
"unk2", "unk3", "unk4",
"unk5", "unk6", "unk7",
"locflags",
)
def changed_11993(self, fields):
self.changed_5595(fields)
def changed_12025(self, fields):
self.changed_11927(fields)
``` |
{
"source": "jlec/numdifftools",
"score": 2
} |
#### File: numdifftools/numdifftools/test_functions.py
```python
from __future__ import division
import numpy as np
function_names = ['cos', 'sin', 'tan',
'cosh', 'sinh', 'tanh',
'arcsinh',
'exp', 'expm1', 'exp2', 'square',
'sqrt',
'log', 'log1p', 'log10', 'log2',
'arccos', 'arcsin', 'arctan', ]
def dcos(x):
return -np.sin(x)
def ddcos(x):
return -np.cos(x)
def get_function(fun_name, n=1):
sinh, cosh, tanh = np.sinh, np.cosh, np.tanh
sin, cos, tan = np.sin, np.cos, np.tan
f_dic = dict(sinh=(sinh, cosh, sinh, cosh, sinh),
cosh=(cosh, sinh, cosh, sinh, cosh),
arccosh=(np.arccosh,
lambda x: 1./np.sqrt(x**2-1),
lambda x: -x/(x**2-1)**(1.5),
lambda x: -1./(x**2-1)**(1.5) +
3*x**2/(x**2-1)**(2.5),
),
arcsinh=(np.arcsinh,
lambda x: 1./np.sqrt(1+x**2),
lambda x: -x/(1+x**2)**(3./2),
lambda x: -1./(1+x**2)**(3./2) +
3*x**2/(1+x**2)**(5./2),
),
arctanh=(np.arctanh,
lambda x: 1./(1-x**2),
lambda x: 2*x/(1-x**2)**2,
lambda x: 2./(1-x**2)**2 +
8*x**2/(1-x**2)**3,
),
arccos=(np.arccos,
lambda x: -1./np.sqrt(1-x**2),
lambda x: -x/(1-x**2)**(3./2),
lambda x: -1./(1-x**2)**(3./2) -
3*x**2/(1-x**2)**(5./2),
),
arcsin=(np.arcsin,
lambda x: 1./np.sqrt(1-x**2),
lambda x: x/(1-x**2)**(3./2),
lambda x: 1./(1-x**2)**(3./2) +
3*x**2./(1-x**2)**(5./2),
),
square=(lambda x: x * x, # np.square,
lambda x: 2 * x,
lambda x: 2 * np.ones_like(x)) + (
lambda x: np.zeros_like(x),)*15,
exp=(np.exp,)*20,
expm1=(np.expm1,) + (np.exp,)*20,
exp2=(np.exp2,
lambda x: np.exp2(x)*np.log(2),
lambda x: np.exp2(x)*np.log(2)**2,
lambda x: np.exp2(x)*np.log(2)**3,
lambda x: np.exp2(x)*np.log(2)**4
),
arctan=(np.arctan,
lambda x: 1./(1+x**2),
lambda x: -2*x/(1+x**2)**2,
lambda x: 8.0*x**2/(1+x**2)**3 - 2./(1+x**2)**2,
lambda x: 24*x/(1+x**2)**3 - 48*x**3./(1+x**2)**4,
),
cos=(cos, dcos, ddcos, sin) * 6,
sin=(sin, np.cos, dcos, ddcos) * 6,
tan=(tan,
lambda x: 1./np.cos(x)**2,
lambda x: 2*np.tan(x)/np.cos(x)**2,
lambda x: (4*(tan(x)**2 + 1)*tan(x)**2 +
2*(tan(x)**2 + 1)**2),
lambda x: (8*(tan(x)**2 + 1)*tan(x)**3 +
16*(tan(x)**2 + 1)**2*tan(x))
),
tanh=(tanh,
lambda x: 1. / cosh(x) ** 2,
lambda x: -2 * sinh(x) / cosh(x) ** 3,
lambda x: 4*(tanh(x)/cosh(x))**2 - 2./cosh(x)**4,
lambda x: (8*(tanh(x)**2 - 1)*tanh(x)**3 +
16*(tanh(x)**2 - 1)**2*tanh(x))),
log1p=(np.log1p,
lambda x: 1. / (1+x),
lambda x: -1. / (1+x) ** 2,
lambda x: 2. / (1+x) ** 3,
lambda x: -6. / (1+x) ** 4),
log2=(np.log2,
lambda x: 1. / (x*np.log(2)),
lambda x: -1. / (x ** 2 * np.log(2)),
lambda x: 2. / (x ** 3 * np.log(2)),
lambda x: -6. / (x ** 4 * np.log(2))),
log10=(np.log10,
lambda x: 1. / (x * np.log(10)),
lambda x: -1. / (x ** 2 * np.log(10)),
lambda x: 2. / (x ** 3 * np.log(10)),
lambda x: -6. / (x ** 4 * np.log(10))),
log=(np.log,
lambda x: 1. / x,
lambda x: -1. / x ** 2,
lambda x: 2. / x ** 3,
lambda x: -6. / x ** 4),
sqrt=(np.sqrt,
lambda x: 0.5/np.sqrt(x),
lambda x: -0.25/x**(1.5),
lambda x: 1.5*0.25/x**(2.5),
lambda x: -2.5*1.5*0.25/x**(3.5)),
inv=(lambda x: 1. / x,
lambda x: -1. / x ** 2,
lambda x: 2. / x ** 3,
lambda x: -6. / x ** 4,
lambda x: 24. / x ** 5))
if fun_name == 'all':
return f_dic.keys()
funs = f_dic.get(fun_name)
fun0 = funs[0]
if n < len(funs):
return fun0, funs[n]
return fun0, None
if __name__ == '__main__':
pass
```
#### File: numdifftools/tests/test_nd_algopy.py
```python
from __future__ import division
import unittest
import numdifftools.nd_algopy as nd
import numpy as np
from numpy import pi, r_, sqrt, array
from numpy.testing import assert_array_almost_equal
from scipy import linalg, optimize, constants
_TINY = np.finfo(float).machar.tiny
# Hamiltonian
# H = sum_i(p_i2/(2m)+ 1/2 * m * w2 x_i2) + sum_(i!=j)(a/|x_i-x_j|)
class classicalHamiltonian:
'''
Parameters
----------
N : scalar
number of ions in the chain
w : scalar
angular trap frequency
C : scalar
Coulomb constant times the electronic charge in SI units.
m : scalar
the mass of a single trapped ion in the chain
'''
def __init__(self):
self.N = 2
f = 1000000 # f is a scalar, it's the trap frequency
self.w = 2 * pi * f
self.C = (4 * pi * constants.epsilon_0) ** (-1) * constants.e ** 2
# C is a scalar, it's the I
self.m = 39.96 * 1.66e-27
def potential(self, positionvector):
'''
positionvector is an 1-d array (vector) of length N that contains the
positions of the N ions
'''
x = positionvector
w = self.w
C = self.C
m = self.m
# First we consider the potential of the harmonic oscillator
Vx = 0.5 * m * (w ** 2) * sum(x ** 2)
# then we add the coulomb interaction:
for i, xi in enumerate(x):
for xj in x[i + 1:]:
Vx += C / (abs(xi - xj))
return Vx
def initialposition(self):
"""Defines initial position as an estimate for the minimize process."""
N = self.N
x_0 = r_[-(N - 1) / 2:(N - 1) / 2:N * 1j]
return x_0
def normal_modes(self, eigenvalues):
'''the computed eigenvalues of the matrix Vx are of the form
(normal_modes)2*m.'''
m = self.m
normal_modes = sqrt(eigenvalues / m)
return normal_modes
def _run_hamiltonian(verbose=True):
c = classicalHamiltonian()
if verbose:
print(c.potential(array([-0.5, 0.5])))
print(c.potential(array([-0.5, 0.0])))
print(c.potential(array([0.0, 0.0])))
xopt = optimize.fmin(c.potential, c.initialposition(), xtol=1e-10)
hessian = nd.Hessian(c.potential)
H = hessian(xopt)
true_H = np.array([[5.23748385e-12, -2.61873829e-12],
[-2.61873829e-12, 5.23748385e-12]])
error_estimate = np.NAN
if verbose:
print(xopt)
print('H', H)
print('H-true_H', np.abs(H - true_H))
# print('error_estimate', info.error_estimate)
eigenvalues = linalg.eigvals(H)
normal_modes = c.normal_modes(eigenvalues)
print('eigenvalues', eigenvalues)
print('normal_modes', normal_modes)
return H, error_estimate, true_H
class TestHessian(unittest.TestCase):
def test_run_hamiltonian(self):
H, _error_estimate, true_H = _run_hamiltonian(verbose=False)
self.assertTrue((np.abs(H - true_H) < 1e-18).all())
def test_hessian_cosIx_yI_at_I0_0I(self):
# cos(x-y), at (0,0)
def fun(xy):
return np.cos(xy[0] - xy[1])
htrue = [[-1., 1.], [1., -1.]]
methods = ['forward', ] # 'reverse']
for method in methods:
Hfun2 = nd.Hessian(fun, method=method)
h2 = Hfun2([0, 0])
# print(method, (h2-np.array(htrue)))
assert_array_almost_equal(h2, htrue)
class TestDerivative(unittest.TestCase):
# TODO: Derivative does not tackle non-finite values.
# def test_infinite_functions(self):
# def finf(x):
# return np.inf * np.ones_like(x)
# df = nd.Derivative(finf, method='forward')
# val = df(0)
# self.assert_(np.isnan(val))
def test_high_order_derivative_cos(self):
true_vals = (-1.0, 0.0, 1.0, 0.0) * 5
x = np.pi / 2 # np.linspace(0, np.pi/2, 15)
for method in ['forward', 'reverse']:
nmax = 15 if method in ['forward'] else 2
for n in range(1, nmax):
d3cos = nd.Derivative(np.cos, n=n, method=method)
y = d3cos(x)
assert_array_almost_equal(y, true_vals[n - 1])
def test_fun_with_additional_parameters(self):
'''Test for issue #9'''
def func(x, a, b=1):
return b * a * x * x * x
methods = ['reverse', 'forward']
dfuns = [nd.Jacobian, nd.Derivative, nd.Gradient, nd.Hessdiag,
nd.Hessian]
for dfun in dfuns:
for method in methods:
df = dfun(func, method=method)
val = df(0.0, 1.0, b=2)
assert_array_almost_equal(val, 0)
def test_derivative_cube(self):
'''Test for Issue 7'''
def cube(x):
return x * x * x
shape = (3, 2)
x = np.ones(shape) * 2
for method in ['forward', 'reverse']:
dcube = nd.Derivative(cube, method=method)
dx = dcube(x)
assert_array_almost_equal(list(dx.shape), list(shape),
decimal=13,
err_msg='Shape mismatch')
txt = 'First differing element %d\n value = %g,\n true value = %g'
for i, (val, tval) in enumerate(zip(dx.ravel(),
(3 * x**2).ravel())):
assert_array_almost_equal(val, tval, decimal=8,
err_msg=txt % (i, val, tval))
def test_derivative_exp(self):
# derivative of exp(x), at x == 0
for method in ['forward', 'reverse']:
dexp = nd.Derivative(np.exp, method=method)
assert_array_almost_equal(dexp(0), np.exp(0), decimal=8)
def test_derivative_sin(self):
# Evaluate the indicated (default = first)
# derivative at multiple points
for method in ['forward', 'reverse']:
dsin = nd.Derivative(np.sin, method=method)
x = np.linspace(0, 2. * np.pi, 13)
y = dsin(x)
np.testing.assert_almost_equal(y, np.cos(x), decimal=8)
def test_derivative_on_sinh(self):
for method in ['forward', ]: # 'reverse']: # TODO: reverse fails
dsinh = nd.Derivative(np.sinh, method=method)
self.assertAlmostEqual(dsinh(0.0), np.cosh(0.0))
def test_derivative_on_log(self):
x = np.r_[0.01, 0.1]
for method in ['forward', 'reverse']:
dlog = nd.Derivative(np.log, method=method)
assert_array_almost_equal(dlog(x), 1.0 / x)
class TestJacobian(unittest.TestCase):
def test_on_scalar_function(self):
def f2(x):
return x[0] * x[1] * x[2] + np.exp(x[0]) * x[1]
for method in ['forward', 'reverse']:
Jfun3 = nd.Jacobian(f2, method=method)
x = Jfun3([3., 5., 7.])
assert_array_almost_equal(x, [[135.42768462, 41.08553692, 15.]])
def test_on_vector_valued_function(self):
xdata = np.reshape(np.arange(0, 1, 0.1), (-1, 1))
ydata = 1 + 2 * np.exp(0.75 * xdata)
def fun(c):
return (c[0] + c[1] * np.exp(c[2] * xdata) - ydata) ** 2
for method in ['reverse']: # TODO: 'forward' fails
Jfun = nd.Jacobian(fun, method=method)
J = Jfun([1, 2, 0.75]) # should be numerically zero
assert_array_almost_equal(J, np.zeros(J.shape))
class TestGradient(unittest.TestCase):
def test_on_scalar_function(self):
def fun(x):
return np.sum(x ** 2)
dtrue = [2., 4., 6.]
for method in ['forward', 'reverse']: #
dfun = nd.Gradient(fun, method=method)
d = dfun([1, 2, 3])
assert_array_almost_equal(d, dtrue)
class TestHessdiag(unittest.TestCase):
def test_forward(self):
def fun(x):
return x[0] + x[1] ** 2 + x[2] ** 3
htrue = np.array([0., 2., 18.])
Hfun = nd.Hessdiag(fun)
hd = Hfun([1, 2, 3])
_error = hd - htrue
assert_array_almost_equal(hd, htrue)
def test_reverse(self):
def fun(x):
return x[0] + x[1] ** 2 + x[2] ** 3
htrue = np.array([0., 2., 18.])
Hfun = nd.Hessdiag(fun, method='reverse')
hd = Hfun([1, 2, 3])
_error = hd - htrue
assert_array_almost_equal(hd, htrue)
if __name__ == '__main__':
# _run_hamiltonian()
unittest.main()
```
#### File: numdifftools/tests/test_numdifftools_docstrings.py
```python
import doctest
import unittest
from unittest import TextTestRunner
import numdifftools
def suite():
return doctest.DocTestSuite(numdifftools.core,
optionflags=doctest.NORMALIZE_WHITESPACE)
def load_tests(loader=None, tests=None, ignore=None):
if tests is None:
return suite()
else:
tests.addTests(suite())
return tests
if __name__ == '__main__':
runner = TextTestRunner()
unittest.main(testRunner=runner)
# unittest.main(defaultTest='suite')
``` |
{
"source": "JLee151/TweetMapper",
"score": 3
} |
#### File: JLee151/TweetMapper/TweetSearcher.py
```python
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import json
import re
import csv
# ADD YOUR OWN KEYS HERE
consumer_key = ''
consumer_secret = ''
access_token = ''
access_secret = ''
file_name = "tweets.csv"
csvfile = file(file_name, "w")
csvwriter = csv.writer(csvfile)
row = [ "Title", "Content", "Lat", "Long" ]
#just had this stuff here so I could print out test tweets. Lines 21-25 can be ignored
tweets = []
screen_names = []
content = []
lon = []
lat = []
count = 0
max_tweets = 25 # How many tweets you want here
# CALIFORNIA BASED TWEETS WITH THIS BOUNDING BOX
California_North = 42.0095169
California_South = 32.5342626
California_West = -124.415165
California_East = -114.13139260000003
class StdOutListener(StreamListener):
def on_data(self, data):
global count
if(count < max_tweets):
json_data = json.loads(data) # load up tweet's JSON data
tweets.append(json_data)
if((json_data.get("geo") is not None) and (json_data.get("text") is not None)): # check if the tweet has geo enabled and text
longitude = json_data["geo"]["coordinates"][0] # store latitude and longitude
latitude = json_data["geo"]["coordinates"][1]
if((longitude > California_South) and (longitude < California_North) and (latitude < California_East) and (latitude > California_West)): # check if tweet is in California
print "---------- " + str(count + 1) + " ----------" # print some relevant information
print str(longitude) + ", " + str(latitude)
print json_data["text"]
Title = json_data["user"]["screen_name"]
# regex for hashtags rather than just words in general -> could use, but don't have to
# Content = re.findall(r"#(\w+)", tweets[count]["text"].encode('ascii', 'ignore'))
Content = tweets[count]["text"].encode('ascii', 'ignore')
symbols = "~`!@#$%^&*()_+{}[]|\:'<>,;./?" # remove symbols just to make it easier to read
Content = Content.replace("\n", " ")
for i in range(0, len(symbols)):
Content = Content.replace(symbols[i],"")
Lat = json_data["geo"]["coordinates"][0] # IDK why I did this a second time
Long = json_data["geo"]["coordinates"][1]
row = [ Title, Content, Lat, Long ]
csvwriter.writerow(row) # write to CSV file
count += 1
return True
else:
csvfile.close()
return False
def on_error(self, status):
print status
if __name__ == '__main__':
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
stream = Stream(auth, l)
word = raw_input("Enter a word/hashtag you want to search for: ")
stream.filter(track=[str(word)])
``` |
{
"source": "jlee1-made/aws-fuzzy-finder",
"score": 2
} |
#### File: aws-fuzzy-finder/aws_fuzzy_finder/aws_utils.py
```python
import boto3
from botocore.exceptions import (
NoRegionError,
PartialCredentialsError,
NoCredentialsError,
ClientError
)
from .settings import (
SEPARATOR,
NO_REGION_ERROR,
NO_CREDENTIALS_ERROR,
WRONG_CREDENTIALS_ERROR
)
def gather_instance_data(reservations):
instances = []
for reservation in reservations:
for instance in reservation['Instances']:
if instance['State']['Name'] != 'running':
continue
# skipping not named instances
if 'Tags' not in instance:
continue
instance_data = {
'public_ip': instance.get('PublicIpAddress', ''),
'private_ip': instance['PrivateIpAddress'],
'public_dns': instance['PublicDnsName'],
'tags': instance['Tags']
}
instances.append(instance_data)
return instances
def get_tag_value(tag_name, tags):
for tag in tags:
if tag['Key'] == tag_name:
return tag['Value'].replace('"', '')
def get_aws_instances():
try:
return boto3.client('ec2').describe_instances()
except NoRegionError:
print(NO_REGION_ERROR)
exit(1)
except (PartialCredentialsError, NoCredentialsError):
print(NO_CREDENTIALS_ERROR)
exit(1)
except ClientError:
print(WRONG_CREDENTIALS_ERROR)
exit(1)
def prepare_searchable_instances(reservations, use_private_ip, use_public_dns_over_ip):
instance_data = gather_instance_data(reservations)
searchable_instances = []
for instance in instance_data:
name = get_tag_value('Name', instance['tags'])
if use_public_dns_over_ip:
ip = instance['public_dns']
elif use_private_ip:
ip = instance['private_ip']
else:
ip = instance['public_ip'] or instance['private_ip']
searchable_instances.append("{}{}{}".format(
name,
SEPARATOR,
ip
))
return searchable_instances
``` |
{
"source": "jlee1-made/fixt",
"score": 4
} |
#### File: src/fixt/_fixt.py
```python
import inspect
class Factory(object):
"""Factory for test fixtures.
Fixtures (arbitrary Python objects) are made by calling 'makers'. A maker
is a callable that takes one argument, a Factory instance. A fixture is
instantiated like this:
>>> factory.window
The Factory will call the maker function to make the fixture, and cache it,
so that the next reference to .window will be the same object. Here is a
maker function for a window (note make_window is NOT a maker function
because it does not take a factory as its argument):
def window(factory):
return make_window(double_glazed=True)
Makers may reference other makers by instantiating fixtures using attribute
lookup. Here is a maker for a house, which depends on the window:
def house(factory):
return make_house(factory.window)
In this way an implicit dependency graph is declared. Then, in test code,
to make fixtures that have dependencies, it is not necessary to explicitly
construct the dependencies:
>>> factory.house
It is still possible to have control of dependencies by use of the set
method:
>>> factory.set('window', MyWindow())
>>> factory.house # uses MyWindow() from above
You should only call .set for a named fixture if it has not yet been
instantiated (if you do this an exception is raised).
Where possible the makers return "the one and only" item of that type (one
sale order, one product, etc.). This is useful because it makes it easy to
deal with relationships between the fixtures, and easy to refer to fixtures
in tests -- no need to specify which one.
When you do need more than one instance of a fixture, use the partial_copy
method.
Note: self.test is for use only as a temporary means of migrating old tests
to use this class. It is better for tests to depend on fixtures, and for
fixtures to not depend on tests.
A test is usually formed of setup code that creates fixtures, followed by
code that performs some action that may trigger a side-effect, which is
then checked using test assertion methods. So that that action only calls
as much code as is needed to trigger the side-effect, it is better to call
setup code on a separate line so that it is clear it did not trigger the
side effect. Sometimes this setup code may itself be used just for the
fixture instantiation side-effect:
def test_pushing_button_launches_missiles(self, factory):
factory.button # shouldn't launch missiles
launches = self.subscribe_to_launch_missile()
factory.button.push()
self.assert_len(launches, 1)
Some makers may customize a fixture made by another maker:
def seal(factory):
return Seal()
def broken_seal(factory):
seal = factory.seal
seal.break()
return seal
Then:
>>> assert factory.seal is factory.broken_seal
and iff factory.broken_seal is never referenced, the seal is intact. If
necessary another maker can be defined that is unambigously unbroken:
def intact_seal(factory):
return Seal()
Fixtures can themselves be Factory instances. This forms a tree (it would
be possible to form some other graph: not encouraged!). There is special
support for this:
1. Factory instances know their 'root' factory. This is the root of the
tree. Maker functions are called with the root factory as their argument.
2. .partial_copy() supports dotted names:
factory.partial_copy(['cr', 'products', 'sales.so'])
In addition to making new objects, sometimes you want to get hold of
objects that are already made. Usually it suffices to just refer to the
fixture again:
factory.spam # first time we referenced this -- make the object
factory.spam # same object we already made
This is very convenient in tests. However, sometimes it's not good if it's
unclear whether a reference to a fixture might actually make the object
rather than just finding it. For example, if an assertion wants to verify
that an object got created, it's no good to write:
assert factory.spam is not None
because, whether the system under test created the "spam" object or not,
we're never going to get None. Also, when there is a database involved, to
see the latest state, it may sometimes be necessary to fetch from the
database again (e.g. if there are multiple transactions involved).
fixt provides a feature to deal with this: "finder" functions are exactly
like maker functions, but are not cached by the Factory. So they are
evaluated every time they are referenced through the factory:
def found_spam(factory):
return factory.database.find_spam(factory.spam_id)
found_spam.is_finder = True
factory.add_maker("found_spam", found_spam)
factory.found_spam # calls found_spam
factory.found_spam # calls found_spam again
(typically this is done using MakerSetHelper or adapt_class rather than
directly using add_maker as above TODO should probably add a decorator for
use with adapt_class to mark finder methods as such).
"""
_Missing = object()
def __init__(self, add_cleanup, test=None, root=None):
self.__dict__['_makers'] = {}
self.add_cleanup = add_cleanup
self.test = test
# Note that the tree of Factory instances is defined by self._makers.
# This class has little knowledge of that tree except for navigation
# down the tree via attribute lookup. In particular because child
# factories are just fixtures, there is no way for this class to
# navigate from child to parent to find the 'root' factory. For that
# reason self._root, a reference to the 'root' factory, is stored
# separately. It is there only so that the very top-level factory can
# be found in order to pass it to maker functions.
if root is None:
root = self
self._root = root
self._made = {}
def make_child_factory(self, makers):
return make_factory(self.add_cleanup, self.test, self._root, makers)
def add_maker(self, name, maker):
"""Add a maker.
Args:
name (string): attribute name by which the made fixture will be
available on the Factory instance
maker (callable): one-argument function that instantiates a new
fixture
"""
if name in self._makers:
raise ValueError(
'Too late to add object %s because it is already added: '
'%s' % (name, self._makers[name]))
self._makers[name] = maker
def __getattr__(self, name):
try:
maker = self._makers[name]
except KeyError:
# Raise AttributeError
getattr(type(self), name)
obj = self._made.get(name, self._Missing)
if getattr(maker, 'is_finder', False):
obj = maker(self._root)
elif obj is self._Missing:
obj = maker(self._root)
self._made[name] = obj
return obj
def force_set(self, name, value):
"""Set a fixture, regardless of whether or not it is already made.
Only use this for value objects. Otherwise it causes surprising
dependencies:
door = factory.door
factory.house
factory.set('door', my_door)
assert factory.house.door is factory.door # Fails
"""
if name not in self._makers:
raise ValueError(
'Cannot set object %s because there is no maker defined '
'for it: call factory.add_maker or fix maker name' % (name, ))
self._made[name] = value
def __setattr__(self, name, value):
if name in self.__dict__['_makers']:
raise AttributeError(
'Cannot set object %s because there a maker defined '
'for it: call factory.set(name, value) instead' % (name, ))
super(Factory, self).__setattr__(name, value)
def set(self, name, value):
if name not in self._makers:
raise ValueError(
'Cannot set object %s because there is no maker defined '
'for it: call factory.add_maker or fix maker name' % (name, ))
elif name in self._made:
raise ValueError(
'Too late to set object %s because it is already made: %s' %
(name, self._made[name]))
self._made[name] = value
def _partial_copy(self, to_copy, new_root):
new = make_factory(self.add_cleanup, self.test, new_root,
self._makers.items())
if new_root is None:
new_root = new
children_to_copy = {}
leaf_names = set()
for name in to_copy:
names = name.split('.', 1)
if len(names) == 1:
# Leaf name of requested copy. This is not necessarily a leaf
# of the tree of fixture factories: the fixture may be a
# Factory instance.
leaf_names.add(name)
fixture = getattr(self, name)
new.set(name, fixture)
else:
# Assemble names to recurse on
first, rest = names
if first in leaf_names:
# If say 'widgets' AND 'widgets.spam' is specified, ignore
# 'widgets.spam'.
continue
children_to_copy.setdefault(first, []).append(rest)
# Recurse
for name, to_copy in children_to_copy.items():
child = getattr(self, name)
if not isinstance(child, type(self)):
raise ValueError(name)
new_child = child._partial_copy(to_copy, new_root)
new.set(name, new_child)
return new
def partial_copy(self, to_copy):
"""Return a new Factory that has named fixtures to_copy already set.
This allows for constructing non-identical copies of fixtures that
still are related in some way: say when you need two windows in the
same house.
"""
# None here means make the new factory a root factory
new_root = None if self is self._root else self._root
return self._partial_copy(to_copy, new_root)
class MakerSetHelper(object):
def __init__(self):
self.makers = []
def add_maker(self, func, name=None):
if name is None:
name = func.__name__
self.makers.append((name, func))
def add_finder(self, func, name=None):
func.is_finder = True
self.add_maker(func, name)
def add_constant(self, name, const):
self.add_maker(lambda factory: const, name)
def adapt_class(class_):
"""Return function to make makers, given makers defined by a class.
Each method defines a maker function.
The returned function is suitable to pass to with_factory -- i.e. this
serves the same purpose as MakerSetHelper.
"""
return lambda: inspect.getmembers(class_(), inspect.ismethod)
def is_finder(func):
"""Mark a method as a finder method.
For use with adapt_class.
Use of is_finder is experimental!
"""
func.is_finder = True
return func
def make_factory(add_cleanup, test, root, makers):
factory = Factory(add_cleanup, test, root)
for name, maker in makers:
factory.add_maker(name, maker)
return factory
def is_test_method(obj):
return inspect.isfunction(obj) and obj.__name__.startswith('test')
def compose_make_makers(*funcs):
def composed():
makers = []
for make_makers in funcs:
makers.extend(make_makers())
return makers
return composed
def make_factory_maker(makers):
def maker(factory):
return factory.make_child_factory(makers)
return maker
def _with_factory(make_makers):
"""Return a decorator for test methods or classes.
Args:
make_makers (callable): Return an iterable over (name, maker) pairs,
where maker (callable): Return a fixture (arbitrary object) given
Factory as single argument
"""
def wrap(test_func):
def wrapper(self, *args, **kwargs):
factory = make_factory(
self.addCleanup, test=self, root=None, makers=make_makers())
return test_func(self, factory, *args, **kwargs)
return wrapper
def deco(test_func_or_class):
if inspect.isclass(test_func_or_class):
class_ = test_func_or_class
for name, method in inspect.getmembers(class_, is_test_method):
wrapped_method = wrap(method)
setattr(class_, name, wrapped_method)
return class_
else:
method = test_func_or_class
return wrap(method)
return deco
def with_factory(*make_makers_funcs):
"""Return a decorator for test methods or classes.
The decorated test method or methods should take an extra factory argument:
class Test(TransactionCase):
@with_factory(fixtures.standard)
def test_something(self, factory):
...
@with_factory(fixtures.standard)
class Test(TransactionCase):
def test_something(self, factory)
...
def test_something_else(self, factory)
...
The arguments are callables that return an iterable over (name, maker)
pairs, where:
maker (callable): Return a fixture (arbitrary object) given
Factory as single argument
"""
make_all_makers = compose_make_makers(*make_makers_funcs)
return _with_factory(make_all_makers)
```
#### File: src/fixt/_tempdir.py
```python
import os
import shutil
import testfixtures
def force_permissions(dir_path):
for root, dirs, files in os.walk(dir_path):
for d in dirs:
os.chmod(os.path.join(root, d), 0o777)
for f in files:
os.chmod(os.path.join(root, f), 0o777)
def rmtree_forcing_permissions(dir_path):
force_permissions(dir_path)
shutil.rmtree(dir_path)
class TempDirMaker(object):
def __init__(self, add_cleanup):
self._add_cleanup = add_cleanup
def make_temp_dir(self, rmtree):
d = testfixtures.TempDirectory()
if rmtree is None:
remove = d.cleanup
else:
def remove():
rmtree(d.path)
d.instances.remove(d) # silence warning
def remove_directory():
# TempDirectory.cleanup() fails if file permissions don't allow
# removing
try:
remove()
except PermissionError:
print("Failed to clean up {}".format(d.path))
raise
self._add_cleanup(remove_directory)
return d.path
``` |
{
"source": "jlee24/live_mapillary",
"score": 3
} |
#### File: live_mapillary/models/baseline_nearestneighbor.py
```python
import argparse
import numpy as np
import pandas as pd
import random
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
parser = argparse.ArgumentParser()
parser.add_argument('--baseline', default='random', choices=['random', 'avg'], help='Baseline to run. Classification: Random or Average of Neighbors. Regression only supports Random.')
parser.add_argument('--label', default='pov_label', help='Which index to use')
parser.add_argument('--num_neighbors', default=1000, help='Number of neighboring clusters to average')
args = parser.parse_args()
df = pd.read_csv('data.csv')
# Read in cluster lists
train_clusters = [line.rstrip('\n') for line in open("train_clusters_ia.txt")]
val_clusters = [line.rstrip('\n') for line in open("val_clusters_ia.txt")]
def prepare_data():
# Split into train vs val
train_df = df.loc[ df['unique_cluster'].isin(train_clusters) ]
# emulating training on districts by dividing into north and south (25 deg)
# train_df = df.loc[df['lat'] >= 23]
val_df = df.loc[ df['unique_cluster'].isin(val_clusters) ]
print(train_df.shape)
def get_x_y(df):
lat = df['lat']
lon = df['lon']
x = [list(a) for a in zip(lat, lon)]
y = df[args.label].to_numpy()
return x, y
train_x, train_y = get_x_y(train_df)
val_x, val_y = get_x_y(val_df)
return train_x, train_y, val_x, val_y
if 'label' in args.label: # Classification task
if args.baseline == 'random':
labels = []
for cluster in val_clusters:
cluster_imgs = df.loc[df['unique_cluster'] == cluster]
if cluster_imgs.shape[0] > 0:
label = cluster_imgs[args.label].values[0]
labels.append(label)
preds = np.array([random.randint(0, 1) for i in range(len(labels))])
correct = np.sum(preds == labels)
print(float(correct) / len(val_clusters))
else:
train_x, train_y, val_x, val_y = prepare_data()
nbrs = KNeighborsClassifier(n_neighbors=args.num_neighbors)
nbrs.fit(train_x, train_y)
print(nbrs.score(val_x, val_y))
else: # Regression task
train_x, train_y, val_x, val_y = prepare_data()
nbrs = KNeighborsRegressor(n_neighbors=args.num_neighbors)
nbrs.fit(train_x, train_y)
print(nbrs.score(val_x, val_y))
```
#### File: live_mapillary/models/clusterwise_classifier.py
```python
import argparse
import logging
import json
import numpy as np
import pandas as pd
from scipy.stats.stats import pearsonr
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor
from sklearn.linear_model import RidgeClassifier, Ridge, Lasso, LinearRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.svm import LinearSVC, LinearSVR
parser = argparse.ArgumentParser()
parser.add_argument('--save_dir', default='features/', help='Base name to save model and log file')
parser.add_argument('--log_file', default='results.log', help='File to save results from classifiers')
parser.add_argument('--train_saved_feats', default=None, help='If features were already saved, specify file')
parser.add_argument('--train_saved_labels', default=None, help='If labels were already saved, specify file')
parser.add_argument('--val_saved_feats', default=None, help='If features were already saved, specify file')
parser.add_argument('--val_saved_labels', default=None, help='If labels were already saved, specify file')
parser.add_argument('--label', default='pov_label', help='Which index to use')
args = parser.parse_args()
logging.basicConfig(filename=args.log_file, level=logging.DEBUG)
def get_cluster_features(df, clusters, counts, train=True):
cluster_features = []
cluster_labels = np.array([])
cluster_idxs = df['unique_cluster'].to_numpy()
for i, cluster_id in enumerate(clusters):
# Get the features for this cluster
imgs_in_cluster = np.argwhere(cluster_idxs == cluster_id).flatten()
cluster_size = len(imgs_in_cluster)
if cluster_size == 0:
continue
feats = counts[imgs_in_cluster, :]
feats = np.sum(feats, axis=0) # sum up the object counts from each image
# Add a feature for size of cluster
feats = np.append(feats, [cluster_size])
# Append this cluster's features to the rest
cluster_features.append(feats)
# Append this cluster's label
target = df[args.label].iloc[imgs_in_cluster[0]]
cluster_labels = np.append(cluster_labels, target)
cluster_features = np.array(cluster_features)
print(cluster_features.shape)
print(cluster_labels.shape)
# Save them
prefix = "train" if train else "val"
saved_feats = args.save_dir + prefix + args.label + '_feats.npy'
saved_labels = args.save_dir + prefix + args.label + '_labels.npy'
np.save(saved_feats, cluster_features)
np.save(saved_labels, cluster_labels)
return saved_feats, saved_labels
def process_objects(objects):
NUM_FEATS = 66 # num_stuff == 28, num_thing == 37, 255 is void class
try:
objects = json.loads(objects)
objects = [NUM_FEATS - 1 if x == 255 else x for x in objects]
# turns instances into histogram of counts
counts = np.histogram(objects, bins=np.arange(0, NUM_FEATS))[0]
exit
except:
counts = np.zeros(NUM_FEATS)
return counts
def get_class_results(clf, X_train, y_train, X_val, y_val):
clf.fit(X_train, y_train)
pred = clf.predict(X_val)
score = metrics.accuracy_score(y_val, pred)
print("accuracy: %0.4f" % score)
print(metrics.classification_report(y_val, pred, target_names=['low', 'high']))
print(metrics.confusion_matrix(y_val, pred))
logging.info("accuracy: %0.4f" % score)
logging.info(metrics.classification_report(y_val, pred, target_names=['low', 'high']))
logging.info(metrics.confusion_matrix(y_val, pred))
def get_reg_results(clf, X_train, y_train, X_val, y_val):
clf.fit(X_train, y_train)
pred = clf.predict(X_val)
mse = metrics.mean_squared_error(y_val, pred)
r2 = metrics.r2_score(y_val, pred)
pear = pearsonr(y_val.ravel(), pred)[0]
print('Mean squared error: %.4f' % mse)
print('R2 (coefficient of determination): %.4f' % r2)
print('PearsonR: %.4f' % pear)
logging.info('Mean squared error: %.4f' % mse)
logging.info('R2 (coefficient of determination): %.4f' % r2)
logging.info('PearsonR: %.4f' % pear)
def run_classifiers(X_train, y_train, X_val, y_val):
for clf, name in (
(KNeighborsClassifier(n_neighbors=3), "kNN"),
(RandomForestClassifier(n_estimators=100, max_depth=None), "Random forest 100"),
(RandomForestClassifier(n_estimators=300, max_depth=None), "Random forest 300"),
(GradientBoostingClassifier(learning_rate=1e-1, n_estimators=100, max_depth=7), "GBDT 7"),
(GradientBoostingClassifier(learning_rate=1e-1, n_estimators=100, max_depth=10), "GBDT 10"),
):
print('=' * 80)
print(name)
logging.info('=' * 80)
logging.info(name)
get_class_results(clf, X_train, y_train, X_val, y_val)
def run_regressors(X_train, y_train, X_val, y_val):
for clf, name in (
(KNeighborsRegressor(n_neighbors=3), "kNN"),
(RandomForestRegressor(n_estimators=100, max_depth=None), "Random forest 100"),
(RandomForestRegressor(n_estimators=300, max_depth=None), "Random forest 300"),
(GradientBoostingRegressor(learning_rate=1e-1, n_estimators=300, max_depth=5), "GBDT 5"),
(GradientBoostingRegressor(learning_rate=1e-1, n_estimators=300, max_depth=7), "GBDT 7"),
):
print('=' * 80)
print(name)
logging.info('=' * 80)
logging.info(name)
get_reg_results(clf, X_train, y_train, X_val, y_val)
def main():
df = pd.read_csv('data.csv')
objects = df['features'].to_numpy() # object instances
counts = np.array(list(map(process_objects, objects))) # converted to counts
if not args.train_saved_feats:
train_f = open('train_clusters_ia.txt', 'r')
val_f = open('val_clusters_ia.txt', 'r')
train_clusters = [x[:-1] for x in train_f.readlines()]
val_clusters = [x[:-1] for x in val_f.readlines()]
train_f.close()
val_f.close()
args.train_saved_feats, args.train_saved_labels = get_cluster_features(df, train_clusters, counts)
args.val_saved_feats, args.val_saved_labels = get_cluster_features(df, val_clusters, counts, train=False)
X_train, y_train = np.load(args.train_saved_feats), np.load(args.train_saved_labels)
X_val, y_val = np.load(args.val_saved_feats), np.load(args.val_saved_labels)
if 'label' in args.label:
run_classifiers(X_train, y_train, X_val, y_val)
else:
run_regressors(X_train, y_train, X_val, y_val)
if __name__ == "__main__":
main()
```
#### File: live_mapillary/models/eval_imagewise_classifier.py
```python
import argparse
import logging
import os
import h5py
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from collections import Counter
from skimage import io, transform
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support
from timeit import default_timer as timer
from torch.utils.data import Dataset, DataLoader
from torchvision import models
from torchvision import transforms, utils
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument('--log_file', type=str, default='results_pov_label_img.log', help='Log file')
parser.add_argument('--model_weights', type=str, default='models/pov_classify', help='Where pretrained model is located')
parser.add_argument('--resnet_ver', type=str, default='resnet34', help='Which ResNet architecture was used')
parser.add_argument('--label', type=str, default='pov_label', help='Label')
parser.add_argument('--batch_size', type=int, default=64)
args = parser.parse_args()
logging.basicConfig(filename=args.log_file,level=logging.DEBUG)
class ClusterImgDataset(Dataset):
def __init__(self, df, device):
self.img_paths = df['img_path_224x224'].to_numpy()
self.device = device
def __len__(self):
return self.img_paths.shape[0]
def __getitem__(self, idx):
image = io.imread(self.img_paths[idx])
image_tensor = torch.from_numpy(image)
image_tensor = image_tensor.permute(2,0,1)
return image_tensor
def create_model():
pretrained_weights = torch.load(args.model_weights)
if args.resnet_ver == 'resnet18':
model = models.resnet18(pretrained=False)
elif args.resnet_ver == 'resnet34':
model = models.resnet34(pretrained=False)
model.fc = nn.Linear(512, 2)
model.load_state_dict(pretrained_weights)
return model
def get_majority_vote(cluster_dataset, model, device):
model.eval()
pred = np.array([])
generator = DataLoader(cluster_dataset, batch_size=args.batch_size, num_workers=1)
for batch in generator:
batch = batch.to(device, dtype=torch.float32)
output = model(batch)
predicted = output.argmax(dim=1, keepdim=True)
pred = np.append(pred, predicted.cpu().numpy())
del batch
del generator
votes = Counter(pred)
majority = votes.most_common(1)[0][0]
del pred
return majority
def main():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
df = pd.read_csv('data.csv')
val_f = open('val_clusters_ia.txt', 'r')
val_clusters = [x[:-1] for x in val_f.readlines()]
val_f.close()
print("Creating model...")
model = create_model().to(device)
model.eval()
val_c = []
y_true = np.array([])
y_pred = np.array([])
for cluster in tqdm(val_clusters):
cluster_df = df.loc[df['unique_cluster'] == cluster]
if cluster_df.shape[0] == 0:
continue
val_c.append(cluster)
target = cluster_df[args.label].values[0]
y_true = np.append(y_true, cluster_df[args.label].values[0])
dataset = ClusterImgDataset(cluster_df, device)
y_pred = np.append(y_pred, get_majority_vote(dataset, model, device))
del cluster_df, dataset
logging.debug(y_true.shape)
logging.debug(y_pred.shape)
print(confusion_matrix(y_true, y_pred))
logging.debug(confusion_matrix(y_true, y_pred))
prec, recall, fscore, support = precision_recall_fscore_support(y_true, y_pred, average='micro')
print("Precision: {}\nRecall: {}\nF-Score: {}\nSupport: {}".format(prec, recall, fscore, support))
logging.debug("Precision: {}\nRecall: {}\nF-Score: {}\nSupport: {}".format(prec, recall, fscore, support))
# Save predictions
df = pd.DataFrame({'unique_cluster': np.array(val_c),
args.label: y_true,
args.label + 'pred': y_pred})
df.to_csv(args.label + '_preds.csv', index=False)
if __name__ == "__main__":
main()
``` |
{
"source": "jlee4219/raytracer",
"score": 3
} |
#### File: raytracer/src/hittable_list.py
```python
import numpy as np
from src.hittable import Hittable, HitRecord
from src.ray import Ray
from src.util import vec_where
class HittableList(Hittable):
def __init__(self, hittable_list = []):
self.hittable_list = hittable_list
def add(self, object):
self.hittable_list.append(object)
def clear(self):
self.hittable_list = []
def hit(self, r: Ray, t_min, t_max) -> HitRecord:
record = None
closest_so_far = t_max
for hittable in self.hittable_list:
new_record = hittable.hit(r, t_min, closest_so_far)
if record:
record.t = np.where((new_record.t < closest_so_far), new_record.t, record.t)
record.normal = vec_where((new_record.t < closest_so_far), new_record.normal, record.normal)
record.front_face = np.where((new_record.t < closest_so_far), new_record.front_face, record.front_face)
record.p = vec_where((new_record.t < closest_so_far), new_record.p, record.p)
else:
record = new_record
closest_so_far = record.t
# TOD (Jefferson) moved this from sphere, might need to move back?
record.set_face_normal(r, record.normal)
return record
```
#### File: raytracer/src/vec3.py
```python
import numpy as np
# TODO(Jefferson): after switching to use numpy for general calculation, data model
# here feels less optimal and could likely be improved.
class Vec3:
def __init__(self, data = np.array([0., 0., 0.])):
self.data = np.array(data)
def x(self):
return self.data[0]
def y(self):
return self.data[1]
def z(self):
return self.data[2]
def r(self):
return self.data[0]
def g(self):
return self.data[1]
def b(self):
return self.data[2]
def __add__(self, other):
return Vec3(data = (self.x() + other.x(), self.y() + other.y(), self.z() + other.z()))
def __sub__(self, other):
return Vec3(data = (self.x() - other.x(), self.y() - other.y(), self.z() - other.z()))
def __truediv__(self, scalar):
return Vec3(data = (self.x() / scalar, self.y() / scalar, self.z() / scalar))
def __mul__(self, scalar):
return Vec3(data = (self.x() * scalar, self.y() * scalar, self.z() * scalar))
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def __str__(self):
return 'Vec3: ' + str(self.data)
def dot(self, other):
return self.x() * other.x() + self.y() * other.y() + self.z() * other.z()
def cross(self, other):
'''cross product of two vectors'''
return Vec3(data=(
self[1] * other[2] - self[2] * other[1],
-(self[0] * other[2] - self[2] * other[1]),
self[0] * other[1] - self[1] * other[0]
))
def squared_length(self):
return self.dot(self)
def length(self):
return np.sqrt(self.squared_length())
def normalized(self):
return self / self.length()
``` |
{
"source": "jlee58/financa_web",
"score": 2
} |
#### File: web/api/views.py
```python
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from finance.models import StockHistory,StockList,UserFinanceRecord
from django.core import serializers
from django.http import HttpResponse
from django.http import JsonResponse
from yahoo_finance import Share
from django.utils import timezone
@login_required
def setStock(request):
if request.method == 'POST':
uid = request.user.id
type = request.POST.get('type')
symbolid = request.POST.get('symbolid')
num = request.POST.get('num')
price = request.POST.get('price')
date = request.POST.get('date')
mtime = timezone.now()
if not symbolid:
return JsonResponse({"success": 'false', 'error': '股票代碼未填寫', 'errid': 's01'})
if not num:
return JsonResponse({"success": 'false', 'error': '數量未填寫', 'errid': 's02'})
if not price:
return JsonResponse({"success": 'false', 'error': '價格未填寫', 'errid': 's03'})
if not date:
return JsonResponse({"success": 'false', 'error': '時間未填寫', 'errid': 's04'})
hand = float(price) * float(num) * 0.001425
tax = 0
fin = 0
fout = 0
total = 0
if int(type) == 1:
fout = float(price) * float(num) + hand
if int(type) == 2:
tax = float(price) * float(num) * 0.003
fin = float(price) * float(num) - hand - tax
if int(type) == 3:
hand = 0
fin = float(price) * float(num)
UserFinanceRecord.objects.create(uid=uid,type=type,symbolid=symbolid,num=num,price=price,hand=hand,tax=tax,fout=fout,fin=fin,total=total,date=date,mtime=mtime)
return JsonResponse({"result": "Add successfully! "+symbolid})
else:
return JsonResponse({"success": 'false', 'error': 'wrong method'})
@login_required
def delStock(request):
if request.method == 'POST':
id = request.POST['id']
uid = request.user.id
d = UserFinanceRecord.objects.filter(uid=uid,id=id)
d.delete()
return JsonResponse({"result": 'delete '+id})
else:
return JsonResponse({"success": 'false', 'error': 'wrong method'})
@login_required
def getStock(request):
if request.method == 'GET':
uid = request.user.id
data = serializers.serialize('json', UserFinanceRecord.objects.filter(uid=uid).order_by('-mtime'))
return HttpResponse(data, content_type="application/json")
else:
return JsonResponse({"success": 'false', 'error': 'wrong method'})
@login_required
def getStockHistory(request):
if request.method == 'GET':
sid = request.GET['sid']
data = serializers.serialize('json', StockHistory.objects.filter(symbolid=sid).order_by('-date')[:400])
return HttpResponse(data, content_type="application/json")
else:
return JsonResponse({"success": 'false', 'error': 'wrong method'})
@login_required
def getStockInfoNow(request):
if request.method == 'GET':
sid = request.GET['sid']
try:
s = Share(sid+'.TW')
p = float(s.get_price())
c = float(s.get_change())
v = float(s.get_volume())/1000
pc = float(s.get_prev_close())
except:
return JsonResponse({"success": 'false', 'error': 'wrong sid'})
return JsonResponse({'price': p, 'change': c, 'volume': v, 'prev_close': pc})
else:
return JsonResponse({"success": 'false', 'error': 'wrong method'})
@login_required
def getStockInfo(request):
if request.method == 'GET':
sid = request.GET['sid']
try:
data = serializers.serialize('json', StockList.objects.filter(symbolid=sid))
except:
return JsonResponse({"success": 'false', 'error': 'wrong sid'})
return HttpResponse(data, content_type="application/json")
else:
return JsonResponse({"success": 'false', 'error': 'wrong method'})
``` |
{
"source": "jlee7x2/SParts",
"score": 2
} |
#### File: tp_artifact_1.0/sawtooth_artifact/artifact_batch.py
```python
import hashlib
import base64
from base64 import b64encode
import time
import requests
import yaml
import json
# import sawtooth_signing.secp256k1_signer as signing
#
from sawtooth_signing import create_context
from sawtooth_signing import CryptoFactory
from sawtooth_signing import ParseError
from sawtooth_signing.secp256k1 import Secp256k1PrivateKey
#
from datetime import datetime
from sawtooth_sdk.protobuf.transaction_pb2 import TransactionHeader
from sawtooth_sdk.protobuf.transaction_pb2 import Transaction
from sawtooth_sdk.protobuf.batch_pb2 import BatchList
from sawtooth_sdk.protobuf.batch_pb2 import BatchHeader
from sawtooth_sdk.protobuf.batch_pb2 import Batch
from sawtooth_artifact.exceptions import ArtifactException
def _sha512(data):
return hashlib.sha512(data).hexdigest()
class ArtifactBatch:
def __init__(self, base_url):
self._base_url = base_url
def create(self,private_key,public_key,artifact_id,alias,artifact_name,artifact_type,artifact_checksum,label,openchain,timestamp):
return self.artifact_transaction(private_key,public_key,artifact_id,alias,artifact_name,artifact_type,artifact_checksum,label,openchain,timestamp,"create","","","","","","","","")
def list_artifact(self):
artifact_prefix = self._get_prefix()
result = self._send_request(
"state?address={}".format(artifact_prefix)
)
try:
encoded_entries = yaml.safe_load(result)["data"]
return [
base64.b64decode(entry["data"]) for entry in encoded_entries
]
except BaseException:
return None
def add_uri(self,private_key,public_key,artifact_id,version,checksum,content_type,size,uri_type,location):
return self.artifact_transaction(private_key,public_key,artifact_id,"","","","","","","","AddURI","",version,checksum,content_type,size,uri_type,location,"")
def add_artifact(self,private_key,public_key,artifact_id,sub_artifact_id,path):
return self.artifact_transaction(private_key,public_key,artifact_id,"","","","","","","","AddArtifact",sub_artifact_id,"","","","","","",path)
def retrieve_artifact(self, artifact_id):
address = self._get_address(artifact_id)
result = self._send_request("state/{}".format(address), artifact_id=artifact_id)
try:
return base64.b64decode(yaml.safe_load(result)["data"])
except BaseException:
return None
def _get_prefix(self):
return _sha512('artifact'.encode('utf-8'))[0:6]
def _get_address(self, artifact_id):
artifact_prefix = self._get_prefix()
address = _sha512(artifact_id.encode('utf-8'))[0:64]
return artifact_prefix + address
def _send_request(
self, suffix, data=None,
content_type=None, artifact_id=None):
if self._base_url.startswith("http://"):
url = "{}/{}".format(self._base_url, suffix)
else:
url = "http://{}/{}".format(self._base_url, suffix)
headers = {}
if content_type is not None:
headers['Content-Type'] = content_type
try:
if data is not None:
result = requests.post(url, headers=headers, data=data)
else:
result = requests.get(url, headers=headers)
if result.status_code == 404:
raise ArtifactException("No such artifact as {}".format(artifact_id))
elif not result.ok:
raise ArtifactException("Error {} {}".format(
result.status_code, result.reason))
except BaseException as err:
raise ArtifactException(err)
return result.text
def artifact_transaction(self,private_key,public_key,artifact_id,alias="",artifact_name="",artifact_type="",artifact_checksum="",label="",openchain="",timestamp="",action="",sub_artifact_id="",version="",checksum="",content_type="",size="",uri_type="",location="",path=""):
self._public_key = public_key
self._private_key = private_key
payload = ",".join([artifact_id,str(alias),str(artifact_name),str(artifact_type),str(artifact_checksum),str(label),str(openchain),str(timestamp), action,str(sub_artifact_id),str(version),str(checksum),str(content_type),size,str(uri_type),str(location),str(path)]).encode()
address = self._get_address(artifact_id)
header = TransactionHeader(
signer_public_key=self._public_key,
family_name="artifact",
family_version="1.0",
inputs=[address],
outputs=[address],
dependencies=[],
# payload_encoding="csv-utf8",
payload_sha512=_sha512(payload),
batcher_public_key=self._public_key,
nonce=time.time().hex().encode()
).SerializeToString()
# signature = signing.sign(header, self._private_key)
signature = CryptoFactory(create_context('secp256k1')) \
.new_signer(Secp256k1PrivateKey.from_hex(self._private_key)).sign(header)
transaction = Transaction(
header=header,
payload=payload,
header_signature=signature
)
batch_list = self._create_batch_list([transaction])
return self._send_request(
"batches", batch_list.SerializeToString(),
'application/octet-stream'
)
def _create_batch_list(self, transactions):
transaction_signatures = [t.header_signature for t in transactions]
header = BatchHeader(
signer_public_key=self._public_key,
transaction_ids=transaction_signatures
).SerializeToString()
# signature = signing.sign(header, self._private_key)
signature = CryptoFactory(create_context('secp256k1')) \
.new_signer(Secp256k1PrivateKey.from_hex(self._private_key)).sign(header)
batch = Batch(
header=header,
transactions=transactions,
header_signature=signature
)
return BatchList(batches=[batch])
```
#### File: sawtooth_part/processor/handler.py
```python
import hashlib
import logging
import json
# from sawtooth_sdk.processor.state import StateEntry
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.processor.exceptions import InternalError
# from sawtooth_sdk.protobuf.transaction_pb2 import TransactionHeader
from sawtooth_sdk.processor.handler import TransactionHandler
LOGGER = logging.getLogger(__name__)
class PartTransactionHandler:
def __init__(self, namespace_prefix):
self._namespace_prefix = namespace_prefix
@property
def family_name(self):
return 'pt'
@property
def family_versions(self):
return ['1.0']
@property
def encodings(self):
return ['csv-utf8']
@property
def namespaces(self):
return [self._namespace_prefix]
def apply(self, transaction, context):
# header = TransactionHeader()
# header.ParseFromString(transaction.header)
try:
# The payload is csv utf-8 encoded string
pt_id,pt_name,checksum,version,alias,licensing,label,description,action,artifact_id,category_id,supplier_id = transaction.payload.decode().split(",")
except ValueError:
raise InvalidTransaction("Invalid payload serialization")
validate_transaction( pt_id,action)
data_address = make_part_address(self._namespace_prefix,pt_id)
# Retrieve the data from state storage
# state_entries = state_store.get([data_address])
state_entries = context.get_state([data_address])
if len(state_entries) != 0:
try:
stored_pt_id, stored_pt_str = \
state_entries[0].data.decode().split(",",1)
stored_pt = json.loads(stored_pt_str)
except ValueError:
raise InternalError("Failed to deserialize data.")
else:
stored_pt_id = stored_pt = None
if action == "create" and stored_pt_id is not None:
raise InvalidTransaction("Invalid part already exists.")
elif action == "AddArtifact" or action == "AddSupplier" or action == "AddCategory":
if stored_pt_id is None:
raise InvalidTransaction(
"Invalid the operation requires an existing part."
)
if action == "create":
pt = create_part(pt_id,pt_name,checksum,version,alias,licensing,label,description)
stored_pt_id = pt_id
stored_pt = pt
_display("Created a part.")
if action == "AddArtifact":
if artifact_id not in stored_pt_str:
pt = add_artifact(artifact_id,stored_pt)
stored_pt = pt
if action == "AddSupplier":
if supplier_id not in stored_pt_str:
pt = add_supplier(supplier_id,stored_pt)
stored_pt = pt
if action == "AddCategory":
if category_id not in stored_pt_str:
pt = add_category(category_id,stored_pt)
stored_pt = pt
# 6. Put data back in state storage
stored_pt_str = json.dumps(stored_pt)
data=",".join([stored_pt_id,stored_pt_str]).encode()
addresses = context.set_state({data_address:data})
# addresses = state_store.set([
# StateEntry(
# address=data_address,
# data=",".join([stored_pt_id, stored_pt_str]).encode()
# )
# ])
return addresses
def add_artifact(uuid,parent_pt):
pt_list = parent_pt['artifacts']
pt_dic = {'artifact_id': uuid}
pt_list.append(pt_dic)
parent_pt['artifacts'] = pt_list
return parent_pt
def add_supplier(uuid,parent_pt):
pt_list = parent_pt['suppliers']
pt_dic = {'supplier_id': uuid}
pt_list.append(pt_dic)
parent_pt['suppliers'] = pt_list
return parent_pt
def add_category(uuid,parent_pt):
pt_list = parent_pt['categories']
pt_dic = {'category_id': uuid}
pt_list.append(pt_dic)
parent_pt['categories'] = pt_list
return parent_pt
def create_part(pt_id,pt_name,checksum,version,alias,licensing,label,description):
ptD = {'pt_id': pt_id,'pt_name': pt_name,'checksum': checksum,'version': version,'alias':alias,'licensing':licensing,'label':label,'description':description,'artifacts':[],'suppliers':[],'categories':[]}
return ptD
def validate_transaction( pt_id,action):
if not pt_id:
raise InvalidTransaction('Part ID is required')
if not action:
raise InvalidTransaction('Action is required')
if action not in ("AddArtifact", "create","AddCategory","AddSupplier","list-part","retrieve"):
raise InvalidTransaction('Invalid action: {}'.format(action))
def make_part_address(namespace_prefix, part_id):
return namespace_prefix + \
hashlib.sha512(part_id.encode('utf-8')).hexdigest()[:64]
def _display(msg):
n = msg.count("\n")
if n > 0:
msg = msg.split("\n")
length = max(len(line) for line in msg)
else:
length = len(msg)
msg = [msg]
LOGGER.debug("+" + (length + 2) * "-" + "+")
for line in msg:
LOGGER.debug("+ " + line.center(length) + " +")
LOGGER.debug("+" + (length + 2) * "-" + "+")
``` |
{
"source": "jlee-ds/meshcnn",
"score": 2
} |
#### File: jlee-ds/meshcnn/test.py
```python
from options.test_options import TestOptions
from data import DataLoader
from models import create_model
from util.writer import Writer
import numpy as np
import sklearn.metrics
def run_test(epoch=-1):
print('Running Test')
opt = TestOptions().parse()
opt.serial_batches = True # no shuffle
dataset = DataLoader(opt)
model = create_model(opt)
writer = Writer(opt)
# test
writer.reset_counter()
heappop_error_test = 0
pred_classes = []
label_classes = []
for i, data in enumerate(dataset):
model.set_input(data)
if opt.dataset_mode == 'classification' :
try :
ncorrect, nexamples, pred_class, label_class = model.test()
pred_classes.append(pred_class.cpu().numpy())
label_classes.append(label_class.cpu().numpy())
#print(sklearn.metrics.classification_report(np.concatenate(label_classes, axis=None), np.concatenate(pred_classes, axis=None)))
writer.update_counter(ncorrect, nexamples)
except IndexError:
heappop_error_test += 1
print('(%d) IndexError occured, passed to next data' % (heappop_error_test))
pass
else :
ncorrect, nexamples, pred_class, label_class = model.test()
writer.update_counter(ncorrect, nexamples)
writer.print_acc(epoch, writer.acc)
if opt.dataset_mode == 'classification' :
print(sklearn.metrics.classification_report(np.concatenate(label_classes, axis=None), np.concatenate(pred_classes, axis=None)))
return writer.acc
if __name__ == '__main__':
run_test()
``` |
{
"source": "jleen/alchemy",
"score": 2
} |
#### File: jleen/alchemy/transmute.py
```python
import getopt
import os
import re
import sys
import xml
debug = False
templates = None
ITALIC = r"/([^/]+)/"
escapes = [
[ r'^=> (?P<file>.*)$', None, 'include' ],
[ r'^=(?P<cmd>[A-Za-z]+) (?P<arg>.*)$', None, 'directive' ],
[ r'^==(?P<cmd>[A-Za-z]+) \< (?P<file>.*)$', None, 'include_macro' ],
[ r'^==(?P<cmd>[A-Za-z]+)(?: (?P<arg>[^<]*))?==$', None, 'macro' ],
[ r'^==(?P<cmd>[A-Za-z]+)(?: (?P<arg>[^=<]*))?$', "^==$", 'macro' ],
[ r'^===(?P<cmd>[A-Za-z]+)(?: (?P<arg>[^=<]*))?$', "^===$", 'macro' ],
[ r'^== (?P<arg>.*) ==$', None, 'section' ]
]
class IteratorStack:
iter_stack = [[].__iter__()]
def __iter__(self):
return self
def next(self):
if debug: print 'next', self.iter_stack[-1]
self.iter_stack[-1].next()
def push(self, iterator):
if debug: print 'push', iterator
self.iter_stack += [iterator]
def pop(self):
if debug: print 'pop', self.iter_stack[1:]
self.iter_stack = self.iter_stack[1:]
runtime_dir = os.path.split(sys.argv[0])[0]
def runtime_file(name):
return os.path.join(runtime_dir, name)
class Formatter:
fields = {}
def __init__(self, transformer, line_mode):
self.transformer = transformer
self.line_mode = line_mode
def begin(self):
if debug: print "Beginning", self.debug_name
self.fields = {}
self.fields["body"] = ''
self.fields["title"] = ''
self.fields["date"] = ''
def end(self):
if debug: print "Ending", self.debug_name
def include_macro(self, cmd, file):
if debug: print 'Including', file, 'for macro', cmd
include_fh = open(file + '.prose', 'r')
include_lines = include_fh.readlines()
include_fh.close()
self.macro(cmd, None, include_lines)
def section(self, arg):
self.macro("section", arg)
def macro(self, cmd, arg, lines = []):
line_macro = templates[cmd]['line_macro']
subfmt = self.__class__(self.transformer, line_macro)
if debug: subfmt.debug_name = "inner"
subfmt.begin()
if arg: arg = self.transformer.transform(arg)
subfmt.fields["arg"] = arg
subfmt.fill(lines)
subfmt.end()
sub_fields = subfmt.get_fields()
self.fields["body"] += fill_template(templates[cmd]['text'], sub_fields)
def include(self, file):
if debug: print 'Including ', file
include_fh = open(file + '.prose', 'r')
include_lines = include_fh.readlines()
include_fh.close()
self.fill(include_lines)
def directive(self, cmd, arg):
if debug: print "Directive", cmd, arg
self.fields[cmd] = self.transformer.transform(arg)
def get_fields(self):
return self.fields
def line(self, str):
if self.line_mode:
if debug: print 'Processing line with macro', self.line_mode
self.macro(self.line_mode, str)
else:
if debug: print 'Processing paragraph with transformer'
self.fields["body"] += self.transformer.transform_line(str)
def fill(self, lines):
cur_line = ''
fresh_line = True
line_iter = lines.__iter__()
while True:
line = ''
try: line = line_iter.next()
except StopIteration: break
line = line.strip()
if debug: print "Filling", self.debug_name, "from", line
if line.startswith('='):
for pat, end_pat, cmd in escapes:
cmd_impl = getattr(self, cmd)
found = pat.match(line)
if found:
if end_pat:
inner_lines = []
line = line_iter.next().strip()
while not end_pat.match(line):
inner_lines += [line]
line = line_iter.next().strip()
cmd_impl(lines = inner_lines, *found.groups())
else:
cmd_impl(*found.groups())
elif self.line_mode:
self.line(line)
elif line != '':
if not fresh_line:
cur_line += ' '
else:
fresh_line = False
cur_line += line
else:
if not fresh_line:
self.line(cur_line)
fresh_line = True
cur_line = ''
if not fresh_line: self.line(cur_line)
def get_transformer(which_one):
if which_one == "plain": return Plain()
if which_one == "html": return Html()
if which_one == "tex": return TeX()
class Transformer:
def transform(self, str):
for xf in self.transforms:
str = re.sub(xf[0], xf[1], str)
return str
def transform_line(self, str):
return self.transform(str) + '\n\n'
class Dumb(Transformer):
transforms = []
class Plain(Transformer):
transforms = [
[ r' +', ' ' ],
[ r'`', "'" ]
]
class Html(Transformer):
def transform_line(self, str):
return '<p>' + self.transform(str) + '</p>\n'
transforms = [
[ r' +', ' ' ],
[ r'<', '<' ],
[ r'>', '>' ],
[ r'&', '&' ],
[ r'``', '“' ],
[ r"''", '”' ],
[ r'`', '‘' ],
[ r"'", '’' ],
[ r'--', '&endash;' ],
[ r'\.\.\.', '…' ],
[ ITALIC, '<i>\\1</i>' ]
]
class TeX(Transformer):
transforms = [
[r' +', ' '],
[r'\\', '\\\\\\\\'],
[r'&', '\\&'],
[r'\$', '\$'],
[r'_', '\_'],
[r'\^\^', '\\^'],
[r'\^:', '\\"'],
[r'\^AE', '\\AE{}'],
[r'\s*--\s*', '---' ],
[r'//', '\\\\happyslash{}'],
[r'==footnote ([^=]+)==', '\\\\footnote{\\1}'],
[ITALIC, '{\\\\it \\1}']
]
def fill_template(template, fields):
field_alternator = '|'.join([re.escape(key) for key in fields.keys()])
filler = re.compile('\$(' + field_alternator + ')')
return filler.sub(lambda match: fields[match.group(1)], template)
def main():
global debug
global templates
lines = sys.stdin.readlines()
template = None
if lines[0].startswith('====template '):
template = lines[0][13:].strip() + '.tmpl'
opts, args = getopt.getopt(
sys.argv[1:],
'v', ['version'])
transformer_type = "plain"
for o, a in opts:
if o in ('-v', '--verbose'):
debug = True
if len(args) > 0:
template = args[0]
if template and not os.path.exists(template):
template = runtime_file(template)
if not template or not os.path.exists(template):
template = runtime_file("plain.tmpl")
for cmd in escapes:
cmd[0] = re.compile(cmd[0])
if cmd[1]: cmd[1] = re.compile(cmd[1])
template_fh = open(template, 'r')
template_lines = template_fh.readlines()
template_fh.close()
templates = { 'main': '' }
current_template = 'main'
lines_re = re.compile(
'^(?P<name>[A-Za-z]+) lines (?P<line_macro>[A-Za-z]+)$')
for line in template_lines:
line = line.strip()
if line.startswith('===default '):
transformer_type = line[11:]
elif line.startswith('=='):
current_template = line[2:]
line_macro = None
found = lines_re.match(current_template)
if found:
current_template = found.group('name')
line_macro = found.group('line_macro')
if debug: print 'Defining macro', current_template
templates[current_template] = {}
templates[current_template]['text'] = ''
templates[current_template]['line_macro'] = line_macro
else:
templates[current_template]['text'] += line + '\n'
line_mode = templates['main']['line_macro']
formatter = Formatter(get_transformer(transformer_type), line_mode)
if debug: formatter.debug_name = "outer"
formatter.begin()
formatter.fill(lines)
formatter.end()
print fill_template(templates['main']['text'], formatter.get_fields())
main()
``` |
{
"source": "jleen/antikythera",
"score": 3
} |
#### File: jleen/antikythera/antikythera.py
```python
import computus
import hebrew
import cgi
import cgitb
import os
phase_names = [ 'New', 'Waxing Crescent', 'First Quarter', 'Waxing Gibbous',
'Full', 'Waning Gibbous', 'Last Quarter', 'Waning Crescent' ]
def get():
cgitb.enable()
form = cgi.FieldStorage()
print 'Content-type: text/html'
print
interleave(int(os.environ["PATH_INFO"][1:]))
def interleave(year):
start_date = computus.gregorian_to_jd((year, 3, 1))
end_date = computus.gregorian_to_jd((year, 5, 31))
print 'start ' + str(start_date) + '<br>'
print 'end ' + str(end_date) + '<br>'
julian = julian_calendar(year)
gregorian = gregorian_calendar(year)
hebrew_cal = hebrew_calendar(hebrew.ad_to_am_at_pesach(year))
compendium = easter_compendium(year)
i_julian = 0
i_hebrew = 0
i = 0
while julian[i_julian][0] != start_date: i_julian += 1
while gregorian[i][0] != start_date: i += 1
while hebrew_cal[i_hebrew][0] != start_date: i_hebrew += 1
offset = i_julian - i
hebrew_offset = i_hebrew - i
# Find Sunday.
while gregorian[i][3] != 0: i += 1
prev = str(year - 1)
next = str(year + 1)
print """
<div align="center">
<a href=\"""" + prev + """"><< prev</a>
<a href=\"""" + next + """">next >></a>
<table border="1" cellspacing="0">
<tr>
<th width="100">Sunday</th>
<th width="100">Monday</th>
<th width="100">Tuesday</th>
<th width="100">Wednesday</th>
<th width="100">Thursday</th>
<th width="100">Friday</th>
<th width="100">Saturday</th>
"""
done = False
while not done:
if gregorian[i][3] == 0:
print '</tr>'
print '<tr>'
print '<td height="100" valign="top">'
jd = gregorian[i][0]
print str(jd) + '<br>'
if jd == end_date:
done = True
print 'Done!<br>'
print 'G',
print_calendar_entry(gregorian[i])
print '<br>'
print 'J',
print_calendar_entry(julian[i + offset])
print '<br>'
print 'H',
print_hebrew_calendar_entry(
hebrew_cal[i + hebrew_offset], gregorian[i][3])
print '<br>'
annotations = consult_compendium(compendium, jd)
j_easter = False
j_pre_easter = False
for a in annotations:
if a == 'gregorian_equinox': print 'Equinox (W)'
elif a == 'gregorian_full_moon': print 'Full Moon (W)'
elif a == 'gregorian_easter': print 'Easter (W)'
elif a == 'julian_equinox': print 'Equinox (E)'
elif a == 'julian_full_moon': print 'Full Moon (E)'
elif a == 'julian_easter':
print 'Easter (E)'
j_easter = True
elif a == 'julian_uncorrected_easter': j_pre_easter = True
elif a == 'passover': print 'Passover'
elif a == 'passover_prep': print 'Full Moon (H)'
if j_pre_easter and not j_easter: print 'Not Easter (E)'
print '</td>'
i += 1
print '''
</tr>
</table>
</div>
'''
def consult_compendium(compendium, jd):
annotations = []
for key in compendium:
if compendium[key] == jd:
annotations += [ key ]
return annotations
def easter_compendium(year):
gregorian_data = computus.gregorian_easter(year)
julian_data = computus.julian_easter(year)
compendium = {}
for key in gregorian_data: compendium[key] = gregorian_data[key]
for key in julian_data: compendium[key] = julian_data[key]
return compendium
hebrew_month_names = [ 'Shevat', 'Adar', 'Adar I', 'Adar II', 'Nissan',
'Iyar', 'Sivan' ]
def hebrew_calendar(year):
is_leap = hebrew.is_leap(year)
jd = hebrew.shevat_jd(year)
adar_jd = hebrew.adar_jd(year)
adar_i_jd = hebrew.adar_i_jd(year)
nisan_jd = hebrew.nisan_jd(year)
iyar_jd = hebrew.iyar_jd(year)
sivan_jd = hebrew.sivan_jd(year)
tamuz_jd = hebrew.tamuz_jd(year)
calendar = []
day = 1
month = 0
while True:
calendar += [ (jd, month, day, dol_to_pom(day - 5)) ]
jd += 1
if is_leap and jd == adar_i_jd:
month = 2
day = 1
elif is_leap and jd == adar_jd:
month = 3
day = 1
elif (not is_leap) and jd == adar_jd:
month = 1
day = 1
elif jd == nisan_jd:
month = 4
day = 1
elif jd == iyar_jd:
month = 5
day = 1
elif jd == sivan_jd:
month = 6
day = 1
elif jd == tamuz_jd:
break
else:
day += 1
return calendar
def julian_calendar(year):
return calendar(
computus.julian_to_jd((year, 2, 1)),
computus.julian_year(year),
year % 4 == 0)
def gregorian_calendar(year):
return calendar(
computus.gregorian_to_jd((year, 2, 1)),
computus.gregorian_year(year),
is_gregorian_leap_year(year))
def is_gregorian_leap_year(year):
if year % 100 == 0: return year % 400 == 0
else: return year % 4 == 0
# HACK!
def dol_to_pom(day_of_lunation):
phase = int((day_of_lunation + 4) / 3.5)
if phase >= 8: phase = 0
return phase
def calendar(initial_jd, year_data, leap):
i_january_3 = computus.find_day(year_data, 1, 3)
i_february_1 = computus.find_day(year_data, 2, 1)
i_june_1 = computus.find_day(year_data, 6, 1)
i_january_new_moon = computus.find_new_moon_after(
i_january_3, year_data)
calendar = []
day_of_lunation = 32 - year_data[i_january_new_moon][1]
phase = dol_to_pom(day_of_lunation)
# HACK!
day_of_phase = int((day_of_lunation + 4) % 3.5) + 1
jd = initial_jd
for i in range(i_february_1, i_june_1):
(month, day, weekday, new_moon) = year_data[i]
if new_moon:
phase = 1
day_of_phase = 1
# Bissextile!
if leap:
if month == 1 or (month == 2 and day < 24): weekday -= 1
if month == 2 and day == 24:
calendar += [ (jd, 2, 24, (weekday - 1) % 7, phase) ]
jd += 1
calendar += [ (jd, month, day, weekday % 7, phase) ]
day_of_phase += 1
if day_of_phase == 5 - (phase % 2) and phase != 0:
day_of_phase = 1
phase += 1
phase = phase % 8
jd += 1
return calendar
def print_calendar_entry(entry):
(jd, month, day, weekday, phase) = entry
mon_name = computus.months[month]
print '%s %d' % (mon_name, day)
def print_hebrew_calendar_entry(entry, weekday):
(jd, month, day, phase) = entry
mon_name = hebrew_month_names[month]
print '%d %s' % (day, mon_name)
def print_calendar(calendar):
for entry in calendar: print_calendar_entry(entry)
``` |
{
"source": "j-lee-nielsen/pyxero",
"score": 3
} |
#### File: pyxero/xero/api.py
```python
from __future__ import unicode_literals
from .manager import Manager
class Xero(object):
"""An ORM-like interface to the Xero API"""
OBJECT_LIST = (
u"Attachments",
u"Accounts",
u"BankTransactions",
u"BankTransfers",
u"BrandingThemes",
u"Contacts",
u"CreditNotes",
u"Currencies",
u"Employees",
u"ExpenseClaims",
u"Invoices",
u"Items",
u"Journals",
u"ManualJournals",
u"Organisation",
u"Payments",
u"Receipts",
u"RepeatingInvoices",
u"Reports",
u"TaxRates",
u"TrackingCategories",
u"Users",
)
def __init__(self, credentials):
# Iterate through the list of objects we support, for
# each of them create an attribute on our self that is
# the lowercase name of the object and attach it to an
# instance of a Manager object to operate on it
for name in self.OBJECT_LIST:
setattr(self, name.lower(), Manager(name, credentials))
``` |
{
"source": "jleeothon/berrynet",
"score": 3
} |
#### File: berrynet/berrynet/naive.py
```python
__all__ = ["NaiveSolver"]
class NaiveSolver(object):
def __init__(self, network):
self.network = network
self._marginal_probabilities = dict()
def marginal_probability(self, **kwargs):
"""
Use like solver.marginal_probability(a="a")
"""
if len(kwargs) != 1:
raise Exception("Provide only one variable and one value")
variable = [k for k in kwargs.keys()][0]
value = [v for v in kwargs.values()][0]
mp = self._marginal_probabilities.get(variable)
if mp is None:
node = self.network.nodes.get(variable)
if node is None:
raise Exception("%s is not a variable in the network", variable)
cps = node.distribution.get(value)
if cps is None:
raise Exception("%s is not a value of %s", value, variable)
p = 0
for cp in cps:
q = 1
for pvar, pval in cp.events.items():
r = self.marginal_probability(**{pvar.name:pval})
q *= r
p += cp.probability * q
mp = p
return mp
def conditional_probability(**kwargs):
"""
Use in conjunction to ``ConditionedProbability.given``.
"""
if len(kwargs) != 1:
raise Exception("Use only one node to query")
variable = variable = [k for k in kwargs.keys()][0]
value = [v for v in kwargs.values()][0]
return ConditionedProbability(**{variable: value})
class ConditionedProbability(NaiveSolver):
def __init__(self, variable, value):
self.variable = variable # a Node, not str
self.value = value
def given(**kwargs):
pass
``` |
{
"source": "jleeothon/redpandas.com",
"score": 3
} |
#### File: redpandascom/redpanda/models.py
```python
from django.core.validators import MinValueValidator
from django.core.urlresolvers import reverse
from django.db import models
class RedPanda(models.Model):
name = models.CharField(max_length=50)
cuteness = models.IntegerField(validators=[MinValueValidator(3)])
def say_something_cute(self):
return "<3" * self.cuteness
def get_absolute_url(self):
return reverse('redpanda-detail', args=[self.name])
``` |
{
"source": "jleeothon/urlkeyword",
"score": 2
} |
#### File: urlkeyword/urlkeyword/validators.py
```python
from django.conf import settings
from django.core.exceptions import ValidationError
__all__ = ['validate_url_keyword']
_default_keywords = ('new', 'edit', 'delete')
_keywords = getattr(settings, 'URL_KEYWORDS', _default_keywords)
def validate_url_keyword(value):
"""
Validates that `value` is not one of the "keywords" to be used in
URL design for this project.
"""
if value in _keywords:
raise ValidationError("Identifier cannot be \"%s\"" % value)
``` |
{
"source": "jleeothon/urlmodel",
"score": 2
} |
#### File: urlmodel/tests/test_crudurlmodel.py
```python
from __future__ import unicode_literals
from django.test import TestCase
from .models import Region
from .models import Town
class TestTown(TestCase):
def setUp(self):
self.region1 = Region.objects.create(name="Tungurahua")
self.region2 = Region.objects.create(name="Chocolatey")
self.town1 = Town.objects.create(name="Cuenca", region=self.region1)
self.town2 = Town.objects.create(name="Baños de agua santa",
region=self.region1)
def test_list_view(self):
self.assertEquals(Region.list_url(), '/regions')
self.assertEquals(self.region1.list_url(), '/regions')
def test_create_view(self):
pass
def test_detail_view(self):
pass
``` |
{
"source": "JLefebvre55/ESC180-Labs",
"score": 4
} |
#### File: ESC180-Labs/exams/exam.py
```python
import math, numpy as np
################################################################################
# Problem 1 (25 pts)
#
# Up to 5 points will be awarded for making progress toward a correct
# solution.
#
# Assume you are given a list of filenames of text files. Assume
# that the text files only contain the punctuation
# [".", ",", "!", "?", "-"].
# The files may also contain the newline character "\n".
def get_words(content):
'''
Gets all whitespace- or punctuation-separated words from a string.
'''
# Replaces all punctuation and newlines with whitespace
for punc in [".", ",", "!", "?", "-", "\n"]:
content = content.replace(punc, " ")
# Returns the lowercase list of words
return content.lower().split()
def get_frequency(words):
'''
Gets a word-frequency object from a list of words.
'''
freq = {}
for word in words:
if word in freq:
freq[word] += 1
else:
freq[word] = 1
return freq
def get_most(freq):
most = {"word": "", "frequency": 0}
for entry in freq:
if(freq[entry] > most["frequency"]):
most = {"word": entry, "frequency": freq[entry]}
return most
def most_common_frequent_word(files):
frequency = {}
most = {}
for path in files:
frequency[path] = {}
with open(path, "r", encoding="utf-8") as file:
words = get_words(file.read())
frequency[path] = get_frequency(words)
most[path] = get_most(frequency[path])
vmost = 0
for file in most:
if(most[file]["frequency"] > vmost):
amost = most[file]["word"]
vmost = most[file]["frequency"]
pmost = file
# print(f"Most is '{amost}', with {vmost} entries in '{pmost}'.")
return amost
################################################################################
# Problem 2 (20 pts)
#
# This problem will be auto-graded.
#
# Recall that links in an html file are given in the format
# <a href = "http://engsci.utoronto.ca">EngSci homepage</a>
# Write a function that takes in the text of an html file, and returns a dictionary
# whose keys are the link texts (e.g. "EngSci homepage") and whose values are
# the corresponding URLs (e.g., "http://engsci.utoronto.ca"). You can assume
# that link texts do not repeat.
# Sample call:
# get_links('<a href = "http://engsci.utoronto.ca">EngSci homepage</a>')
# should return {"EngSci homepage": "http://engsci.utoronto.ca"}
def get_link_tags(html_text):
'''
Find all link tags in an HTML text
'''
tagstarts = []
tagends = []
for i in range(len(html_text)-1):
if html_text[i:i+2].lower() == "<a":
tagstarts.append(i)
elif html_text[i:i+2].lower() == "/a":
tagends.append(i+2)
tags = []
for tag in range(len(tagstarts)):
tags.append(html_text[tagstarts[tag]:tagends[tag]+1])
return tags
def parse_tag(tag):
'''
Parse a link tag into its elements
'''
tag = str(tag)
firstEnd = tag.index('>')
firstQuote = tag.index('"')
return {tag[firstEnd+1:tag.index('<', firstEnd+1)]: tag[firstQuote+1:tag.index('"', firstQuote+1)]}
def get_links(html_text):
'''
Assumes links are formatted exactly as: <a href = "link">title</a>
'''
links = {}
for tag in get_link_tags(str(html_text).replace("\n", "")):
links.update(parse_tag(tag))
return links
###############################################################################
# Problem 3 (10 pts)
#
# Without using for-loops or while-loops, write function for which
# the tight asymptotic bound on the runtime complexity is O((n^2)*log(n)).
# You may create helper functions, as long as they also do not use while-
# and for-loops.
# Justify your answer in a comment. The signature of the function must be
# k*(n^2)*log(n)
def f(n):
if(n < 1):
return
else:
f(n/2)
##############################################################################
###############################################################################
# Problem 4 (15 pts)
#
# This problem will be auto-graded.
#
# It is possible to combine the numbers 1, 5, 6, 7 with arithemtic operations
# to get 21 as follows: 6/(1-5/7).
#
# Write a function that takes in a list of three numbers and a target number, and
# returns a string that contains an expression that uses all the numbers
# in the list once, and results in the target. Assume that the task is possible
# without using parentheses.
#
# For example, get_target_noparens([3, 1, 2], 7) can return "2*3+1" or "1+2*3"
# (either output would be fine).
def get_all_single_ops(nums):
ops = []
# nums = list(nums)
for i in range(len(nums)):
for j in range(i+1, len(nums)):
ops.extend({"exp":f"{nums[i]}+{nums[j]}", "params":[i,j]})
ops.extend({"exp":f"{nums[i]}-{nums[j]}", "params":[i,j]})
ops.extend({"exp":f"{nums[j]}-{nums[i]}", "params":[i,j]})
ops.extend({"exp":f"{nums[i]}*{nums[j]}", "params":[i,j]})
if(nums[j] != 0):
ops.extend({"exp":f"{nums[i]}/{nums[j]}", "params":[i,j]})
if(nums[i] != 0):
ops.extend({"exp":f"{nums[j]}/{nums[i]}", "params":[i,j]})
return nums
def get_op_combinations(nums, exps=[]):
if(len(exps) == 0):
exps.extend([str(n) for n in nums])
for exp in exps:
exp = str(num)
newnums = nums
newnums.remove(num)
return exp.extend(exps)
def get_target_noparens(nums, target):
# expression.extend(ops[0])
# expression.extend(ops[1]) IF not (ops0 is +- and ops1 is */)
################################################################################
# Problem 5 (15 pts)
#
# Up to 3 pts will be awarded for making progress toward a solution.
#
# Now, write the function get_target which returns a string that contains an
# expression that uses all the numbers in the list once, and results in the
# target. The expression can contain parentheses. Assume that the task is
# possible.
# For example, get_target([1, 5, 6, 7], 21) can return "6/(1-5/7)"
def get_target(nums, target):
pass
# See partial work in problem 4.
################################################################################
if __name__ == "__main__":
# most_common_frequent_word(["exams/lorem1.txt", "exams/lorem2.txt"])
# with open("exams/webpage.html", "r") as file:
# print(get_links(file.read()))
n = 100000
f(n)
print(f"{n} : {steps}")
```
#### File: ESC180-Labs/gamify/gamify_old.py
```python
cur_hedons = 0
cur_health = 0
last_hedons = 0
last_health = 0
cur_star = None
cur_star_activity = None
last_star_time = 0
bored_with_stars = False
last_activity = None
last_activity_duration = 0
cur_time = 0
last_finished = -1000
def initialize():
'''Initializes the global variables needed for the simulation. Incomplete'''
global cur_hedons, cur_health, last_hedons, last_health
global cur_time
global last_activity, last_activity_duration
global last_finished
global bored_with_stars
cur_hedons = 0
cur_health = 0
last_hedons = 0
last_health = 0
cur_star = None
cur_star_activity = None
last_star_time = 0
bored_with_stars = False
last_activity = None
last_activity_duration = 0
cur_time = 0
last_finished = -1000
def is_tired():
'''Have there been less than 2 hours since the end of the last exercise?'''
return cur_time-last_exercise_end < 120
def update_health(health):
global last_health, cur_health
last_health = health
cur_health += health
def update_hedons(hedons):
global last_hedons, cur_hedons
last_hedons = hedons
cur_hedons += hedons
def perform_activity(activity, minutes):
'''
Perform a given activity for a given number of minutes.
Assumes activity is a string, one of "running", "textbooks", or "resting".
'''
if minutes not in ["running", "textbooks", "resting"] or minutes < 0:
return
result_health = 0
result_hedons = 0
duration = minutes
#If we are continuing the same activity, remove last increment, combine last minutes
result_health += estimate_health_delta(activity, minutes)
result_hedons += estimate_hedons_delta(activity, minutes)
update_health(result_health)
update_health(result_hedons)
last_activity = activity
last_activity_duration = minutes
cur_time += minutes
def estimate_hedons_delta(activity, minutes):
if activity == "running":
if is_tired():
return -2*minutes
else:
return 2*min(minutes, 10)-2*max(minutes-10, 0)
if activity == "textbooks":
if is_tired():
return -2*minutes
else:
return min(minutes, 20)-max(minutes-20, 0)
else:
return 0
def estimate_health_delta(activity, minutes):
result = 0
if activity == last_activity:
result -= last_health
minutes += last_activity_duration
if activity == "running":
result+=min(minutes,180)*3 + max(0, minutes-180)
if activity == "textbooks":
return 2*minutes
else:
return
def star_can_be_taken(activity):
return activity == cur_star_activity and not bored_with_stars and last_star_time == cur_time
def get_cur_hedons():
return cur_hedons
def get_cur_health():
return cur_health
def offer_star(activity):
return last_star_time == cur_time
def most_fun_activity_minute():
return max([estimate_hedons_delta(a) for a in ["running", "textbooks", "resting"]])
################################################################################
#These functions are not required, but we recommend that you use them anyway
#as helper functions
def get_effective_minutes_left_hedons(activity):
'''Return the number of minutes during which the user will get the full
amount of hedons for activity activity'''
pass
def get_effective_minutes_left_health(activity):
pass
###############################################################################
if __name__ == "__main__":
perform_activity("running", 120)
print(get_cur_health())
perform_activity("running", 30)
print(get_cur_health())
perform_activity("running", 50)
print(get_cur_health())
# initialize()
# perform_activity("running", 30)
# print(get_cur_hedons()) #-20 = 10 * 2 + 20 * (-2)
# print(get_cur_health()) #90 = 30 * 3
# print(most_fun_activity_minute()) #resting
# perform_activity("resting", 30)
# offer_star("running")
# print(most_fun_activity_minute()) #running
# perform_activity("textbooks", 30)
# print(get_cur_health()) #150 = 90 + 30*2
# print(get_cur_hedons()) #-80 = -20 + 30 * (-2)
# offer_star("running")
# perform_activity("running", 20)
# print(get_cur_health()) #210 = 150 + 20 * 3
# print(get_cur_hedons()) #-90 = -80 + 10 * (3-2) + 10 * (-2)
# perform_activity("running", 170)
# print(get_cur_health()) #700 = 210 + 160 * 3 + 10 * 1
# print(get_cur_hedons()) #-430 = -90 + 170 * (-2)
```
#### File: ESC180-Labs/gamify/gamify.py
```python
cur_hedons = 0
cur_health = 0
#Current stars assignment time (integer) and activity (string).
cur_star = None
cur_star_activity = None
#Stores if the user is bored with stars (boolean), and the times of the last 3 star offers (integers).
bored_with_stars = False
last_3_stars = [-1000, -1000, -1000]
#Which activity did the user last complete (string)?
#How long had the latest activity been going on, cumulatively (integer)?
last_activity = None
cumulative_activity_duration = 0
#Current time (integer)
cur_time = 0
#At what time did the last exercise (running/textbooks) end (integer)?
last_exercise_end = -1000
def initialize():
'''Initialize the global variables needed for the simulation.'''
global cur_hedons, cur_health, cur_star, cur_star_activity, bored_with_stars, last_3_stars
global last_activity, cumulative_activity_duration, cur_time, last_exercise_end
cur_hedons = 0
cur_health = 0
cur_star = None
cur_star_activity = None
bored_with_stars = False
last_3_stars = [-1000, -1000, -1000]
last_activity = None
cumulative_activity_duration = 0
cur_time = 0
last_exercise_end = -1000
def is_tired():
'''
Is the user tired?
The user is tired if they finished running or carrying textbooks less than 2 hours before the current activity started.
'''
return last_exercise_end + 120 > cur_time
def get_effective_minutes_left_hedons(activity):
'''Return the number of minutes during which the user will get the full
amount of hedons for activity 'activity'. Returns 0 by default, or if unlimited minutes left.'''
if activity == "resting":
return 0
elif activity == "running":
if not is_tired():
result = 10
else:
return 0
elif activity == "textbooks":
if not is_tired():
result = 20
else:
return 0
else:
return 0
if activity == last_activity:
result = max(result - cumulative_activity_duration, 0)
return result
def get_effective_minutes_left_health(activity):
'''Return the number of minutes during which the user will get the full
amount of health points for activity 'activity'. Returns -1 if activity is invalid, or unlimited minutes left.'''
if activity == "running":
result = 180
elif activity == "textbook":
return 0
else:
return 0
if activity == last_activity:
result = max(result-cumulative_activity_duration, 0)
return result
def estimate_hedons_delta(activity, duration):
'''Return the amount of hedons the user would get for performing activity
'activity' for duration minutes'''
effective = get_effective_minutes_left_hedons(activity)
star_hedons = 0
if star_can_be_taken(activity):
star_hedons = 3 * min(duration, 10)
if activity == "running":
if is_tired():
return -2 * duration + star_hedons
else:
return 2 * min(effective, duration) - 2 * max(duration-effective, 0) + star_hedons
if activity == "textbooks":
if is_tired():
return -2 * duration + star_hedons
else:
return min(effective, duration) - max(duration-effective, 0) + star_hedons
return star_hedons
def estimate_health_delta(activity, duration):
'''Return the amount of health points the user would get for performing activity
'activity' for duration 'duration'.'''
effective = get_effective_minutes_left_health(activity)
if activity == "running":
return 3 * min(effective, duration) + max(duration-effective, 0)
if activity == "textbooks":
return 2 * duration
if activity == "resting":
return 0
def perform_activity(activity, duration):
'''
Perform a given activity 'activity' for a given duration 'duration'.
Calculates and applies hedons, health points, stars, etc.
'''
global bored_with_stars, cur_health, cur_hedons, cumulative_activity_duration, last_activity, cur_time, last_exercise_end
#Check activity and duration
if activity not in ["running", "textbooks", "resting"]:
return
#Check star boredom
if last_3_stars[2] + 180 > cur_time:
bored_with_stars = True
#Get and apply hedons and health
cur_hedons += estimate_hedons_delta(activity, duration)
cur_health += estimate_health_delta(activity, duration)
#Check match, reset consecutivity
if activity != last_activity:
cumulative_activity_duration = duration
last_activity = activity
#Add time, set last exercise end
cur_time += duration
if activity in ["running", "textbooks"]:
last_exercise_end = cur_time
def star_can_be_taken(activity):
'''
Is there a star that can be taken and used on a given activity 'activity'?
True iff no time passed between the star’s being offered and the activity, and the user is not bored with
stars, and the star was offered for this activity.
'''
return cur_star_activity == activity and not bored_with_stars and cur_star == cur_time
def get_cur_hedons():
'''
Get accumulated hedons so far.
'''
return cur_hedons
def get_cur_health():
'''
Get accumulated health points so far.
'''
return cur_health
def offer_star(activity):
'''
Offer the user a star for a given activity 'activity'. Shifts star tracking list.
'''
global cur_star, cur_star_activity, last_3_stars
#Check activity
if activity not in ["running", "textbooks", "resting"]:
return
#Set star activity and time
cur_star = cur_time
cur_star_activity = activity
#Shift stars, add latest
last_3_stars[1:3] = last_3_stars[0:2]
last_3_stars[0] = cur_star
def most_fun_activity_minute():
'''
Which activity would give the most hedons for one minute?
'''
best = "resting" #True neutral, always zero
hedons = 0
for a in ["running", "textbooks", "resting"]:
h = estimate_hedons_delta(a, 1)
if h > hedons:
best = a
hedons = h
return best
# Test code
if __name__ == '__main__':
initialize()
perform_activity("running", 30)
print(get_cur_hedons()) #-20 = 10 * 2 + 20 * (-2)
print(get_cur_health()) #90 = 30 * 3
print(most_fun_activity_minute()) #resting
perform_activity("resting", 30)
offer_star("running")
print(most_fun_activity_minute()) #running
perform_activity("textbooks", 30)
print(get_cur_health()) #150 = 90 + 30*2
print(get_cur_hedons()) #-80 = -20 + 30 * (-2)
offer_star("running")
perform_activity("running", 20)
print(get_cur_health()) #210 = 150 + 20 * 3
print(get_cur_hedons()) #-90 = -80 + 10 * (3-2) + 10 * (-2)
perform_activity("running", 170)
print(get_cur_health()) #700 = 210 + 160 * 3 + 10 * 1
print(get_cur_hedons()) #-430 = -90 + 170 * (-2)
```
#### File: ESC180-Labs/labs/lab4_old.py
```python
from math import pi
def count_evens(L):
'''
Problem 1
Count even integers in list L.
'''
evens = 0
for i in L:
if i % 2 == 0:
evens+=1
return evens
def list_to_str(L):
'''
Problem 2
Convert a list L to a string representation without str().
i.e. [1, 2, 3]
'''
#List start
s = "["
#Items 0 to len-1 (with separator)
for i in range(0,len(L)-1):
s += str(L[i])
s += ', '
#Final item (no separator)
s+= str(L[-1])
s+= "]"
return s
def lists_are_the_same(list1, list2):
'''
Problem 3
Compare two lists list1 and list2 without ==.
True iff lists contain equivalent elements in the same order.
'''
for i in range(0, len(list1)):
if(list1[i] is not list2[i]):
return False
return True
# steps = 0
def simplify_fraction(n, m):
'''
Problem 4
Simplify a fraction n/m. Returns a 2-tuple (n',m')
'''
#Divisor cannot be 1 (short circuit) or larger than the smaller of the denominator or numerator (inclusive)
for div in range(2, max(n,m)):
if(n % div == 0 and m % div == 0):
n = int(n/div)
m = int(m/div)
print("{}: {}/{}".format(div,n,m))
return (n,m)
def leibniz(n):
'''
Problem 5 a)
Compute an approximation for π using the Leibniz formula for 'n' steps.
'''
total = 0
for k in range(n):
total += 4*((-1)**k)/(2*k+1)
# print("{}: {}".format(k, 4*total))
return total
#Truncates float 'i' to 'n' digits
def truncate(i, n):
return int(i*10**n)/10**n
def accuracy(lpi):
'''
Problem 5 b)
Compute the number of digits of accuracy of 'lpi' as an approximation for π.
'''
n = 0
while(True):
npi = truncate(pi, n)
nlpi = truncate(lpi, n)
if(npi == nlpi):
n+=1
else:
return n
def toAccuracy(target):
'''
Problem 5 c)
Iterates the Leibniz summation for π until the approximation is accurate to 'target' digits.
'''
k=0
lpi = 4
acc = 0
while(acc < target):
k+=1
lpi += 4*((-1)**k)/(2*k+1)
acc = accuracy(lpi)
return k
def toAccuracyFaster(target):
'''
Same as 5 c), but faster.
'''
k=10**(target-1) #Leibniz steps
lpi = leibniz(k) #Let's get like 10^target-1 out of the way
acc = accuracy(lpi)
while(acc < target):
lpi += 4*((-1)**k)/(2*k+1)
k+=1
acc = accuracy(lpi)
return k
#Problem 6
# esteps = 0
def euclid(a,b):
# global esteps
if(a%b==0):
# result = (b,esteps)
# esteps = 0
# return result
return b
# esteps += 1
return euclid(b,a%b)
#Testing
lis = [1,2,3,4,5,6]
print("P1: There are {} even numbers in {}.".format(count_evens(lis), lis))
print("P2: {}.".format(list_to_str(lis)))
print("P3 (T1): {} == {}? {}".format(lis, lis, lists_are_the_same(lis, lis)))
lis2 = [2,2,3,9,5,6]
print("P3 (T2): {} == {}? {}".format(lis, lis2, lists_are_the_same(lis, lis2)))
fraction = (2322,654)
simp = simplify_fraction(*fraction) #3/13 * [3,3,3,5,17]
print("Problem 4 (1): {}/{} simplifies to {}/{}.".format(*fraction, *simp))
esimp = euclid(*fraction)
print("Problem 4 (2): \na) GCF of {} and {}: {}.".format(*fraction, *esimp))
print("b) {}/{} simplifies to {}/{}".format(*fraction, *[int(x/esimp[0]) for x in fraction]))
# lpi = leibniz(100000)
# Uncomment for continuous Leibniz step-accuracy generation
# n=1
# while True:
# print(toAccuracyFaster(n))
# n+=1
```
#### File: ESC180-Labs/synonyms/synonyms.py
```python
import math
def norm(vec):
'''Return the norm of a vector stored as a dictionary,
as described in the handout for Project 3.
'''
sum_of_squares = 0.0
for x in vec:
sum_of_squares += vec[x] * vec[x]
return math.sqrt(sum_of_squares)
def cosine_similarity(vec1, vec2):
t = sum([vec1[i]*vec2[i] for i in set(vec1).intersection(set(vec2))])
j = sum([vec1[i]**2 for i in vec1])
k = sum([vec2[i]**2 for i in vec2])
return t/math.sqrt(j*k)
def build_semantic_descriptors(sentences):
semdesc = {}
i = 0
for sentence in sentences:
#Only concerned with number of SENTENCES IN COMMON, does not include repetition per sentence
for word in set(sentence):
if not word in semdesc:
i+=1
# if i %10000 == 0:
# print(f"{i} unique words catalogged...")
semdesc[word] = {w:1 for w in set(sentence).difference({word})}
else:
for w in set(sentence).difference({word}):
if not w in semdesc[word]:
semdesc[word][w]=1
else:
semdesc[word][w]=semdesc[word][w]+1
return semdesc
def build_semantic_descriptors_from_files(filenames):
semdesc = {}
for filename in filenames:
with open(filename, "r", encoding="utf-8") as file:
s = file.read().lower()
#Remove unnecessary punctuation - including wierd utf-8 nonsense - while preserving contractions, replace sentence-ending punct. with "."
for punc in [",", "-", "--", ":", ";", "_", "*", "(", ")", '"', "\n", "“", " ‘", " '", "' ", "‘ ", "”", " ’", "’ ", "—"]:
s = s.replace(punc, " ")
for punc in ["?", "!", "....", "..."]:
s = s.replace(punc, ".")
#Split text into sentences via "."
sentences = s.split(".")
#Split sentences into words, removes those little in betweeners
sentences = [[word for word in sentence.split() if word != ""] for sentence in sentences]
#Build semdesc
new_semdesc = build_semantic_descriptors(sentences)
#Combine the new with the old
for word in new_semdesc:
if not word in semdesc:
semdesc[word] = new_semdesc[word]
else:
for entry in new_semdesc[word]:
if not entry in semdesc[word]:
semdesc[word][entry] = new_semdesc[word][entry]
else:
semdesc[word][entry] = semdesc[word][entry]+new_semdesc[word][entry]
file.close()
return semdesc
def most_similar_word(word, choices, semantic_descriptors, similarity_fn):
largest = choices[0]
largest_sim = -1
for choice in choices:
if (not word in semantic_descriptors) or (not choice in semantic_descriptors):
continue
val = similarity_fn(semantic_descriptors[word], semantic_descriptors[choice])
if val > largest_sim:
largest = choice
largest_sim = val
return largest
def run_similarity_test(filename, semantic_descriptors, similarity_fn):
correct = 0
total = 0
with open(filename, "r", encoding="utf-8") as file:
for test in file.readlines():
q = test.lower().replace("\n", "").split()
total +=1
sim = most_similar_word(q[0], q[2:], semantic_descriptors, similarity_fn)
if sim == q[1]:
correct +=1
return 100.0*correct/total
if __name__ == "__main__":
# dict1 = {"i": 3, "am": 3, "a": 2, "sick": 1, "spiteful": 1, "an": 1, "unattractive": 1}
# dict2 = {"i": 1, "believe": 1, "my": 1, "is": 1, "diseased": 1}
# print(cosine_similarity(dict1, dict2))
# print(cosine_similarity({"a": 1, "b": 2, "c": 3}, {"b": 4, "c": 5, "d": 6}))
# sentences = [["i", "am", "a", "sick", "man"],
# ["i", "am", "a", "spiteful", "man"],
# ["i", "am", "an", "unattractive", "man"],
# ["i", "believe", "my", "liver", "is", "diseased"],
# ["however", "i", "know", "nothing", "at", "all", "about", "my",
# "disease", "and", "do", "not", "know", "for", "certain", "what", "ails", "me"]]
# print(build_semantic_descriptors(sentences)["man"])
semdesc = build_semantic_descriptors_from_files(["wp.txt", "sw.txt"])
res = run_similarity_test("test.txt", semdesc, cosine_similarity)
print(res, "of the guesses were correct")
``` |
{
"source": "jleffert/TinyTax",
"score": 2
} |
#### File: TinyTax/tinytax/transaction_group.py
```python
class TransactionGroup:
def __init__(self, id):
self.id = id
self.transactions = []
self.action = ''
```
#### File: tinytax/transactions/transaction_builder.py
```python
from transactions.payment_transaction import PaymentTransaction
from transactions.asa_transaction import AsaTransaction
from transactions.application_transaction import ApplicationTransaction
def transaction_builder(wallet, data):
match data['tx-type']:
case 'pay':
return PaymentTransaction(wallet, data)
case 'axfer':
return AsaTransaction(wallet, data)
case 'appl':
return ApplicationTransaction(wallet, data)
```
#### File: tinytax/transactions/transaction.py
```python
from datetime import datetime
from contracts import yieldlyDB, algofiDB
TRANSACTION_TYPES = {
'pay' : 'ALGO Transaction',
'axfer' : 'ASA Transaction',
'appl' : 'Application Transaction'
}
class Transaction:
def __init__(self, wallet, data):
self.wallet = wallet
self.id = data['id']
self.sender = data['sender']
self.date = str(datetime.fromtimestamp(data['round-time']))
self.platform = None
self.out_quantity = ''
self.out_asset_id = ''
self.in_quantity = ''
self.in_asset_id = ''
self.fee_id = 'ALGO'
if 'group' in data:
self.group_id = data['group']
else:
self.group_id = None
if data['sender'] == self.wallet.address:
self.fee = data['fee']
else:
# No fees when user is not sender
self.fee = 0
def readable_transaction_type(self):
if self.transaction_type in TRANSACTION_TYPES:
return TRANSACTION_TYPES[self.transaction_type]
else:
return 'Unknown'
def set_platform(self):
if self.sender in yieldlyDB or self.receiver in yieldlyDB:
self.platform = 'Yieldly'
elif self.sender in algofiDB or self.receiver in algofiDB:
self.platform = 'Algofi'
else:
self.platform = None
# TODO: Detect Tinyman
def set_rewards(self, data):
if self.sender == self.wallet.address and data['sender-rewards'] > 0:
self.rewards = data['sender-rewards']
elif self.receiver == self.wallet.address and data['receiver-rewards'] > 0:
self.rewards = data['receiver-rewards']
else:
self.rewards = 0
``` |
{
"source": "jlefkoff/GridBoard",
"score": 3
} |
#### File: jlefkoff/GridBoard/splashApp.py
```python
from PIL import Image, ImageDraw, ImageFont
from get_buttons import read
from datetime import datetime
import time
class splashApp():
def __init__(self,rgbmatrix,total_rows,total_columns):
self.rgbmatrix = rgbmatrix
self.total_rows = total_rows
self.total_columns = total_columns
self.seconds_per_screen = 1
self.last_update_time = datetime.now()
self.home_image_index = 0
home1 = Image.open("converted-gifs/Top/Homescreen/Homescreen.gif01.gif").convert("RGB")
home2 = Image.open("converted-gifs/Top/Homescreen/Homescreen.gif02.gif").convert("RGB")
self.home_images = (home1, home2)
self.num_home_images = len(self.home_images)
###############################################
# run()
#
# This function cycles through all the animated gifs until a button is
# pressed.
###############################################
def run(self):
press_detected = False
while press_detected == False:
current_time = datetime.now()
deltaT = current_time - self.last_update_time
# check to see if it's time to switch the animated gif
if deltaT.total_seconds() > self.seconds_per_screen:
self.home_image_index = (self.home_image_index + 1) % self.num_home_images
self.rgbmatrix.SetImage(self.home_images[self.home_image_index],0,0)
self.last_update_time = current_time
# check for button presses
# right now, any button press will advance us. Eventually want to
# tweak this so that only the right areas of the screen advance.
my_button = read()
if (my_button != None):
print("button event")
if my_button[2] == 'P':
print("Press detected!")
press_detected = True
if (my_button[1] == 7):
if (my_button[0] == 3) or (my_button[0] == 4):
return "ee"
# allow for other processes to run
time.sleep(0.01)
# once we have a button press, exit. From the splash screen we always
# go to the game select screen.
return "select"
``` |
{
"source": "jleg13/Django-REST-API",
"score": 3
} |
#### File: gallery/tests/test_gallery_item_api.py
```python
import tempfile
import os
from PIL import Image
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework.test import APIClient
from rest_framework import status
from core.models import GalleryItem, Gallery
from gallery.serializers import GalleryItemSerializer
GALLERY_ITEM_URL = reverse('gallery:galleryitem-list')
def image_upload_url(gallery_item_id):
"""Return URL for image upload"""
return reverse('gallery:galleryitem-upload-image', args=[gallery_item_id])
class PublicGalleryItemApiTests(TestCase):
"""Test the public gallery item API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required for retrieving gallery item"""
res = self.client.get(GALLERY_ITEM_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateGalleryItemApiTests(TestCase):
"""Test the authorized user gallery item API"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
def test_retrieve_gallery_item_(self):
"""Test retrieving gallery item list"""
GalleryItem.objects.create(user=self.user,
name='Item 1',
blurb='Blurb 1')
GalleryItem.objects.create(user=self.user,
name='Item 2',
blurb='Blurb 2')
res = self.client.get(GALLERY_ITEM_URL)
gallery_item = GalleryItem.objects.all().order_by('-name')
serializer = GalleryItemSerializer(gallery_item, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_gallery_item_limited_to_user(self):
"""Test that gallery item returned are for authenticated user"""
user2 = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
GalleryItem.objects.create(user=user2,
name='Item 3',
blurb='Blurb 3')
gallery_item = GalleryItem.objects.create(user=self.user,
name='Item 4',
blurb='Burb 4')
res = self.client.get(GALLERY_ITEM_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], gallery_item.name)
def test_create_gallery_item_success(self):
"""Test creating a new gallery item"""
payload = {'name': 'Item 5', 'blurb': 'Blurb 5'}
self.client.post(GALLERY_ITEM_URL, payload)
exists = GalleryItem.objects.filter(
user=self.user,
name=payload['name'],
blurb=payload['blurb']
).exists()
self.assertTrue(exists)
def test_create_gallery_item_invalid(self):
"""Test creating a new gallery item with invalid payload"""
payload = {'': ''}
res = self.client.post(GALLERY_ITEM_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
class Gallery_Item_Image_Upload_Tests(TestCase):
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'<EMAIL>',
'<PASSWORD>'
)
self.client.force_authenticate(self.user)
self.gallery_item = GalleryItem.objects.create(user=self.user)
def tearDown(self):
self.gallery_item.image.delete()
def test_upload_image_to_gallery_item(self):
"""Test uploading an image to gallery item"""
url = image_upload_url(self.gallery_item.id)
with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:
img = Image.new('RGB', (10, 10))
img.save(ntf, format='JPEG')
ntf.seek(0)
res = self.client.post(url, {'image': ntf}, format='multipart')
self.gallery_item.refresh_from_db()
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertIn('image', res.data)
self.assertTrue(os.path.exists(self.gallery_item.image.path))
def test_upload_image_bad_request(self):
"""Test uploading an invalid image"""
url = image_upload_url(self.gallery_item.id)
res = self.client.post(url, {'image': 'notimage'}, format='multipart')
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_gallery_items_assigned_to_galleries(self):
"""Test filtering gallery items by those assigned to Galleries"""
gallery_item1 = GalleryItem.objects.create(
user=self.user,
name='Item 1',
blurb='Blurb 1'
)
gallery_item2 = GalleryItem.objects.create(
user=self.user,
name='Item 2',
blurb='Blurb 2'
)
gallery = Gallery.objects.create(
title='Gallery 1',
description='Gallery 1 description',
user=self.user
)
gallery.gallery_items.add(gallery_item1)
res = self.client.get(GALLERY_ITEM_URL, {'assigned_only': 1})
serializer1 = GalleryItemSerializer(gallery_item1)
serializer2 = GalleryItemSerializer(gallery_item2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_gallery_items_assigned_unique(self):
"""Test filtering gallery items by assigned returns unique items"""
gallery_item = GalleryItem.objects.create(
user=self.user,
name='Item 1',
blurb='Blurb 1'
)
GalleryItem.objects.create(
user=self.user,
name='Item 2',
blurb='Blurb 2'
)
gallery1 = Gallery.objects.create(
title='Gallery 1',
description='Gallery 1 description',
user=self.user
)
gallery2 = Gallery.objects.create(
title='Gallery 2',
description='Gallery 2 description',
user=self.user
)
gallery1.gallery_items.add(gallery_item)
gallery2.gallery_items.add(gallery_item)
res = self.client.get(GALLERY_ITEM_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
``` |
{
"source": "jlegere45/Password-Safe-v.086",
"score": 3
} |
#### File: jlegere45/Password-Safe-v.086/Files.py
```python
from Crypto.Cipher import AES
import os
import base64
"""
This is the class contains the methods from encrypting and decrypting
the file containing the keys. I seperated each value with two ',' because occassionaly the encryption would contain one , I will update the code to export to json files in the future
"""
class KeyFile:
def __init__(self, path, masterKey):
self.path = path
self.masterKey = masterKey
def encryptFile(self):
#This function is never used but may be in later versions
f = open(self.path, 'r')
text = f.read()
contents = text.split(',,')
print contents
f.close()
f = open(self.path, 'w')
f.write('%s,,' % self.iv)
for i in range (len(contents)):
encryptor = AES.new(self.masterKey, AES.MODE_CFB, self.iv)
f.write('%s,,' % encryptor.encrypt(contents[i]))
f.close()
def encryptKey(self, key):
#Encrypts the key for a login object
f = open(self.path, 'a')
try:
encryptor = AES.new(self.masterKey, AES.MODE_CFB, self.getIV())
f.write('%s,,' % encryptor.encrypt(key))
except:
#Generate iv for first time
iv = base64.b64encode(os.urandom(10))
encryptor = AES.new(self.masterKey, AES.MODE_CFB, iv)
f.write('%s,,' % iv)
f.write('%s,,' % encryptor.encrypt(key))
f.close()
def encryptIV(self, iv):
#Encrypts the iv for a login object
f = open(self.path, 'a')
encryptor = AES.new(self.masterKey, AES.MODE_CFB, self.getIV())
f.write('%s,,' % encryptor.encrypt(iv))
f.close()
def decryptFile(self):
#This function is also never used but may be in later verisons
iv = self.getIV()
f = open(self.path, 'r')
text = f.read()
contents = text.split(',,')
del contents[0]
f.close()
f = open(self.path, 'w')
print contents
for i in range (len(contents)):
decryptor = AES.new(self.masterKey, AES.MODE_CFB, iv)
f.write('%s,,' % decryptor.decrypt(contents[i]))
f.close()
def decryptKey(self, index):
#decryptes key and returns it based on the program specified index
f = open(self.path, 'r')
text = f.read()
contents = text.split(',,')
del contents[0]
contents[index]
decryptor = AES.new(self.masterKey, AES.MODE_CFB, self.getIV())
key = decryptor.decrypt(contents[index])
f.close()
return key
def decryptIV(self, index):
#returns the decrypted iv
f = open(self.path, 'r')
text = f.read()
contents = text.split(',,')
del contents[0]
decryptor = AES.new(self.masterKey, AES.MODE_CFB, self.getIV())
iv = decryptor.decrypt(contents[index])
f.close()
return iv
def getIV(self):
#Returns the iv for this encryption
f = open(self.path, 'r')
iv = f.read(16)
return iv
```
#### File: jlegere45/Password-Safe-v.086/keyClass.py
```python
class keyIV:
def __init__(self, key, iv):
self.key = key
self.iv = iv
#Don't need to mutate this data
def get_key(self):
return self.key
def get_iv(self):
return self.iv
``` |
{
"source": "jlegind/GBIF-API-caller",
"score": 3
} |
#### File: jlegind/GBIF-API-caller/api_dormant_pubs.py
```python
import json
import requests
import csv
delivery = []
#List to be populated by the api_dormant function. Delivery will be returned
# upon the 'end_of_records' response being True
def api_dormant(url, offset, limit=200):
'''Recursive function where the JSON web content is collected'''
#url: a url string with var placeholder, like "https://api.gbif.org/v1/organization/nonPublishing?limit=200&offset={}"
#offset is the incrementor for paging.
#Limit holds the size of the JSON response.
#Returns the function recursively until the condition ['endOfRecords'] is reached.
nurl = url.format(offset)
print('the new url : ', nurl)
resp = requests.get(nurl)
spons = json.loads(resp.text)
jresp = spons['results']
print(len(jresp))
delivery.append(jresp)
print('endrecs bool: ', spons['endOfRecords'])
if spons['endOfRecords']:
return delivery
print('no end yet, hombre')
noffset = spons['offset'] + limit
#The neat thing here is that by adding 'offset' and 'limit', it is impossible to jump ahead and miss records (results)
return api_dormant(url, noffset, limit)
r = api_dormant('https://api.gbif.org/v1/organization/nonPublishing?limit=200&offset={}', 0)
#Here the API url can be replaced with the desired API.
csv_cols = ['key', 'endorsementStatus', 'title', 'endorsingNodeKey']
#The JSON keys of interest aka. these are the fields I want to collect from the API response.
row = dict.fromkeys(csv_cols)
#Dict initialized
file_csv = 'dormant_23032021.csv'
#output file
try:
with open(file_csv, 'w', newline='', encoding='utf-8') as fcsv:
writer = csv.DictWriter(fcsv, delimiter="\t", fieldnames=csv_cols)
writer.writeheader()
print(type(r), len(r))
flat_list = [item for sublist in r for item in sublist]
#List of Multiple lists is flattened to one list with a dict for each row
print(type(r), len(flat_list))
for j in flat_list:
for elem in row:
row[elem] = j[elem]
writer.writerow(row)
except IOError:
print('io error')
``` |
{
"source": "jlehker/bestlists",
"score": 2
} |
#### File: todovoodoo/core/models.py
```python
import datetime
import json
import os
import uuid
from calendar import day_name
from decimal import Decimal
from autoslug import AutoSlugField
from dateutil.relativedelta import relativedelta
from dateutil.rrule import DAILY, MONTHLY, WEEKLY, YEARLY, rrule, weekday
from django.contrib.postgres import fields
from django.db import models
from django.forms import model_to_dict
from django.urls import reverse
from django.utils.timezone import localdate, now
from django_slugify_processor.text import slugify
from model_utils import Choices
from model_utils.models import TimeStampedModel
from phonenumber_field.modelfields import PhoneNumberField
from todovoodoo.users.models import User
class ListItem(TimeStampedModel):
todo_list = models.ForeignKey("TodoList", on_delete=models.CASCADE)
description = models.TextField(blank=True)
due_date = models.DateField()
always_show = models.BooleanField(default=False)
pub_id = models.UUIDField(unique=True, default=uuid.uuid4, editable=False)
def postpone(self, days: int = 0):
self.due_date = localdate(now()) + relativedelta(days=days)
self.save(update_fields=["due_date"])
def mark_complete(self):
self.due_date = self._next_due_date
self.always_show = False
self.save(update_fields=["due_date", "always_show"])
@property
def _next_due_date(self) -> datetime.date:
""" Calculates the next time item will appear in master list. """
due_date, *_ = rrule(
freq=self.todo_list.frequency,
interval=self.todo_list.interval,
count=1,
dtstart=self.due_date + relativedelta(days=1),
byweekday=(*[weekday(i) for i in self.todo_list.weekdays],),
)
return due_date.date()
class TodoList(TimeStampedModel):
FREQUENCY = Choices(
(DAILY, "daily", "Daily"),
(WEEKLY, "weekly", "Weekly"),
(MONTHLY, "monthly", "Monthly"),
(YEARLY, "yearly", "Yearly"),
)
INTERVAL = [(i, i) for i in range(600)]
WEEKDAYS = Choices(*[(num, name.lower(), name) for num, name in enumerate(day_name)])
frequency = models.PositiveSmallIntegerField(choices=FREQUENCY, default=FREQUENCY.weekly)
interval = models.PositiveSmallIntegerField(choices=INTERVAL, default=1)
weekdays = fields.ArrayField(
models.PositiveSmallIntegerField(default=WEEKDAYS.monday), size=8, default=list
)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
name = models.TextField()
pub_id = models.UUIDField(unique=True, default=uuid.uuid4, editable=False)
class Meta:
unique_together = ("owner", "name")
def add_todo(self, description: str, due_date: datetime.date) -> ListItem:
return ListItem.objects.create(todo_list=self, description=description, due_date=due_date)
@property
def as_json(self) -> str:
return json.dumps(model_to_dict(self))
# --------
class Station(TimeStampedModel):
"""
Stations like "towel rack" or "dish washing station" that are defined by the administrator.
"""
STATION_TYPES = Choices(("standard", "Standard"), ("checkin", "Check-In/Check-Out"))
owner = models.ForeignKey(User, on_delete=models.CASCADE)
pub_id = models.UUIDField(
unique=True,
default=uuid.uuid4,
editable=False,
help_text="Publicly viewable station identifier.",
)
name = models.TextField(
blank=False,
default="New Station",
help_text="Name of the station. (e.g.'Towel Station', 'Bathroom')",
)
slug = AutoSlugField(always_update=True, populate_from="name", slugify=slugify, unique=True)
description = models.TextField(
blank=True,
help_text="Description of what to include in a report entry. (e.g. 'take a picture of the towels')'",
)
refund_value = models.DecimalField(max_digits=9, decimal_places=2, default=Decimal("0"))
station_type = models.TextField(
null=True, choices=STATION_TYPES, default=STATION_TYPES.standard
)
class Meta:
unique_together = ("owner", "name")
def get_absolute_url(self):
return reverse("stations-public-view", args=[self.slug])
class StationItem(TimeStampedModel):
"""
Individual item descriptions for a station.
"""
ITEM_TYPES = Choices(("text", "Text"), ("boolean", "Boolean"), ("number", "Number"))
station = models.ForeignKey("Station", on_delete=models.CASCADE)
pub_id = models.UUIDField(unique=True, default=uuid.uuid4, editable=False)
description = models.TextField(
blank=True,
help_text="Description of what to include in a report entry. (e.g. 'take a picture of the towels')'",
)
item_type = models.TextField(null=True, choices=ITEM_TYPES, default=ITEM_TYPES.text)
def get_file_path(instance, filename):
ext = filename.split(".")[-1]
today = localdate(now())
return f"photos/user_uploads/{today.year}/{today.month}/{today.day}/{uuid.uuid4()}.{ext}"
class ReportEntry(TimeStampedModel):
"""
A guest user report entry.
"""
REPORT_TYPES = Choices(("checkout", "Check-Out"), ("checkin", "Check-In"), ("other", "Other"))
station = models.ForeignKey("Station", null=True, on_delete=models.SET_NULL)
description = models.TextField(
blank=True, help_text="Description of the state of the current state of the station."
)
photo_upload = models.ImageField(
null=True, upload_to=get_file_path, help_text="Photo taken of the station."
)
phone_number = PhoneNumberField(blank=True, help_text="Reporter's phone number.")
report_type = models.TextField(null=True, choices=REPORT_TYPES, default=REPORT_TYPES.other)
def set_report_type(self):
if self.station and self.station.station_type == Station.STATION_TYPES.checkin:
last_entry = ReportEntry.objects.filter(
station=self.station, phone_number=self.phone_number
).last()
if last_entry and last_entry.report_type == self.REPORT_TYPES.checkin:
self.report_type = self.REPORT_TYPES.checkout
else:
self.report_type = self.REPORT_TYPES.checkin
class ReportEntryItem(TimeStampedModel):
instructions = models.TextField(blank=True)
report_entry = models.ForeignKey(
"ReportEntry", on_delete=models.CASCADE, related_name="%(class)s_items"
)
class Meta:
abstract = True
class ReportEntryText(ReportEntryItem):
data = models.TextField(null=False)
class ReportEntryBoolean(ReportEntryItem):
data = models.BooleanField()
class ReportEntryNumber(ReportEntryItem):
data = models.IntegerField()
```
#### File: core/tests/test_models.py
```python
import pytest
from datetime import date
from django.conf import settings
from todovoodoo.core.tests.factories import TodoListFactory
pytestmark = pytest.mark.django_db
def test_mark_complete(user: settings.AUTH_USER_MODEL):
today = date.today()
todo_list = TodoListFactory.create(owner=user)
todo = todo_list.add_todo("test todo", today)
todo.mark_complete()
assert todo.due_date > today
```
#### File: todovoodoo/users/models.py
```python
from django.contrib.auth.models import AbstractUser
from django.db.models import CASCADE, CharField, OneToOneField, TextField
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from fernet_fields import EncryptedCharField
from model_utils.models import TimeStampedModel
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = CharField(_("Name of User"), blank=True, max_length=255)
pushover_user_key = EncryptedCharField(
_("Pushover User Key"), null=True, default=None, blank=True, max_length=255
)
pushover_api_token = EncryptedCharField(
_("Pushover API Token"), null=True, default=None, blank=True, max_length=255
)
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
``` |
{
"source": "jlehker/pitutor",
"score": 3
} |
#### File: pitutor/app/ble.py
```python
import asyncio
from bleak import BleakClient, discover
class Connection:
client: BleakClient = None
def __init__(
self,
loop: asyncio.AbstractEventLoop,
feed_characteristic: str,
):
self.loop = loop
self.read_characteristic = feed_characteristic
self.write_characteristic = feed_characteristic
self.connected = False
self.connected_device = None
def on_disconnect(self, client: BleakClient):
self.connected = False
# Put code here to handle what happens on disconnet.
print(f"Disconnected from {self.connected_device.name}!")
async def cleanup(self):
if self.client:
await self.client.disconnect()
async def manager(self):
print("Starting connection manager.")
while True:
if self.client:
await self.connect()
else:
await self.select_device()
await asyncio.sleep(15.0, loop=self.loop)
async def connect(self):
if self.connected:
return
try:
await self.client.connect()
self.connected = await self.client.is_connected()
if self.connected:
print(f"Connected to {self.connected_device.name}")
self.client.set_disconnected_callback(self.on_disconnect)
while True:
if not self.connected:
break
await asyncio.sleep(3.0, loop=self.loop)
else:
print(f"Failed to connect to {self.connected_device.name}")
except Exception as e:
print(e)
async def select_device(self):
pettutor_device = None
print("Bluetooh LE hardware warming up...")
while pettutor_device is None:
await asyncio.sleep(2.0, loop=self.loop) # Wait for BLE to initialize.
devices = await discover()
print("\nSearching for PetTutor...")
for device in devices:
if device.name == "PTFeeder":
print(f"Found PetTutor: '{device}'\n")
pettutor_device = device
break
else:
print("Couldn't find PetTutor. Waiting...")
continue
break
print(f"Connecting to {pettutor_device.name}")
self.connected_device = pettutor_device
self.client = BleakClient(pettutor_device.address, loop=self.loop)
``` |
{
"source": "jlehnersd/league_of_pick_rates",
"score": 3
} |
#### File: league_of_pick_rates/src/model_functions.py
```python
import numpy as np
def adjusted_r2(X_test, y_test, y_pred):
"""
Calculates the adjusted R^2 from the residuals of the test set predictions
Parameters
----------
X_test : pandas data frame
Test data set
y_test : pandas series
Actual win rates from the test set
y_pred : numpy array
Predicted win rates for the test set
Returns
-------
r2_adj : float
An OLS linear regression model
prediction : numpy array
Contains the predicted pick rates for the test set
mse
"""
# Get number of observations and number of features in test set
n_obs = len(y_test)
n_feat = X_test.shape[1]
# Calculate sum of squares quantities
ss_residual = sum((y_test - y_pred)**2)
ss_total = sum((y_test - np.mean(y_test))**2)
# Calculate R^2 scores
r2 = 1.0 - (ss_residual / ss_total)
r2_adj = 1.0 - (1.0 - r2) * (n_obs - 1.0) / (n_obs - n_feat - 1.0)
return r2_adj
``` |
{
"source": "jlehnersd/metis_project2",
"score": 4
} |
#### File: src/utils/load_or_make.py
```python
import os, pickle
import functools
def load_or_make(creator):
"""
Loads data that is pickled at filepath if filepath exists;
otherwise, calls creator(*args, **kwargs) to create the data
and pickle it at filepath.
Returns the data in either case.
Inputs:
- filepath: path to where data is / should be stored
- creator: function to create data if it is not already pickled
- *args, **kwargs: arguments passed to creator()
Outputs:
- item: the data that is stored at filepath
Usage:
@load_or_make
def data_creator(args):
# code
# return data
my_data = data_creator(save_file_path, *args, **kwargs)
"""
@functools.wraps(creator)
def cached_creator(filepath, *args, **kwargs):
if os.path.isfile(filepath):
with open(filepath, 'rb') as pkl:
item = pickle.load(pkl)
else:
item = creator(*args, **kwargs)
with open(filepath, 'wb') as pkl:
pickle.dump(item, pkl)
return item
return cached_creator
``` |
{
"source": "jlehrer1/comparative-organoids",
"score": 2
} |
#### File: models/lib/lightning_train.py
```python
import sys
import os
import pathlib
from typing import *
import torch
import numpy as np
import pandas as pd
import anndata as an
import warnings
from functools import cached_property
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from sklearn.preprocessing import LabelEncoder
from .data import generate_dataloaders, compute_class_weights
import sys, os
from os.path import join, dirname, abspath
sys.path.append(join(dirname(abspath(__file__)), '..', '..'))
from helper import gene_intersection, download, upload
from data.downloaders.external_download import download_raw_expression_matrices
here = pathlib.Path(__file__).parent.absolute()
class UploadCallback(pl.callbacks.Callback):
"""Custom PyTorch callback for uploading model checkpoints to the braingeneers S3 bucket.
Parameters:
path: Local path to folder where model checkpoints are saved
desc: Description of checkpoint that is appended to checkpoint file name on save
upload_path: Subpath in braingeneersdev/jlehrer/ to upload model checkpoints to
"""
def __init__(
self,
path: str,
desc: str,
upload_path='model_checkpoints',
epochs: int=20,
) -> None:
super().__init__()
self.path = path
self.desc = desc
self.upload_path = upload_path
self.epochs = epochs
def on_train_epoch_end(self, trainer, pl_module):
epoch = trainer.current_epoch
if epoch % self.epochs == 0: # Save every ten epochs
checkpoint = f'checkpoint-{epoch}-desc-{self.desc}.ckpt'
trainer.save_checkpoint(os.path.join(self.path, checkpoint))
print(f'Uploading checkpoint at epoch {epoch}')
upload(
os.path.join(self.path, checkpoint),
os.path.join('jlehrer', self.upload_path, checkpoint)
)
class DataModule(pl.LightningDataModule):
def __init__(
self,
class_label: str,
datafiles: List[str]=None,
labelfiles: List[str]=None,
urls: Dict[str, List[str]]=None,
sep: str=None,
unzip: bool=True,
datapath: str=None,
assume_numeric_label: bool=True,
batch_size=4,
num_workers=0,
device=('cuda:0' if torch.cuda.is_available() else None),
*args,
**kwargs,
):
"""
Creates the DataModule for PyTorch-Lightning training.
This either takes a dictionary of URLs with the format
urls = {dataset_name.extension:
[
datafileurl,
labelfileurl,
]
}
OR two lists containing the absolute paths to the datafiles and labelfiles, respectively.
:param class_label: Class label to train on. Must be in all label files
:type class_label: str
:param datafiles: List of absolute paths to datafiles, if not using URLS. defaults to None
:type datafiles: List[str], optional
:param labelfiles: List of absolute paths to labelfiles, if not using URLS. defaults to None
:type labelfiles: List[str], optional
:param urls: Dictionary of URLS to download, as specified in the above docstring, defaults to None
:type urls: Dict[str, List[str, str]], optional
:param unzip: Boolean, whether to unzip the datafiles in the url, defaults to False
:type unzip: bool, optional
:param sep: Separator to use in reading in both datafiles and labelfiles. WARNING: Must be homogeneous between all datafile and labelfiles, defaults to '\t'
:type sep: str, optional
:param datapath: Path to local directory to download datafiles and labelfiles to, if using URL. defaults to None
:type datapath: str, optional
:param assume_numeric_label: If the class_label column in all labelfiles is numeric. Otherwise, we automatically apply sklearn.preprocessing.LabelEncoder to the intersection of all possible labels, defaults to True
:type assume_numeric_label: bool, optional
:raises ValueError: If both a dictionary of URL's is passed and labelfiles/datafiles are passed. We can only handle one, not a mix of both, since there isn't a way to determine easily if a string is an external url or not.
"""
super().__init__()
# Make sure we don't have datafiles/labelfiles AND urls at start
if urls is not None and datafiles is not None or urls is not None and labelfiles is not None:
raise ValueError("Either a dictionary of data to download, or paths to datafiles and labelfiles are supported, but not both.")
self.device = device
self.class_label = class_label
self.urls = urls
self.unzip = unzip
self.datapath = (
datapath if datapath is not None else join(here, '..', '..', '..', 'data', 'raw')
)
self.assume_numeric_label = assume_numeric_label
self.batch_size = batch_size
self.num_workers = num_workers
# If we have a list of urls, we can generate the list of paths of datafiles/labelfiles that will be downloaded after self.prepare_data()
if self.urls is not None:
self.datafiles = [join(self.datapath, f) for f in self.urls.keys()]
self.labelfiles = [join(self.datapath, f'labels_{f}') for f in self.urls.keys()]
else:
self.datafiles = datafiles
self.labelfiles = labelfiles
# Warn user in case tsv/csv ,/\t don't match, this can be annoying to diagnose
suffix = pathlib.Path(self.labelfiles[0]).suffix
if (sep == '\t' and suffix == 'csv') or (sep == ',' and suffix == '.tsv'):
warnings.warn(f'Passed delimiter {sep = } doesn\'t match file extension, continuing...')
# Infer sep based on .csv/.tsv of labelfile (assumed to be homogeneous in case of delimited datafiles) if sep is not passed
if sep is None:
if suffix == '.tsv':
self.sep = '\t'
elif suffix == '.csv':
self.sep = ','
else:
warnings.warn(f'Separator not passed and not able to be inferred from {suffix=}. Falling back to ","')
self.sep = ','
else:
self.sep = sep
self.args = args
self.kwargs = kwargs
def prepare_data(self):
if self.urls is not None:
download_raw_expression_matrices(
self.urls,
unzip=self.unzip,
sep=self.sep,
datapath=self.datapath,
)
if not self.assume_numeric_label:
print('assume_numeric_label=False, using sklearn.preprocessing.LabelEncoder and encoding target variables.')
unique_targets = list(
set(np.concatenate([pd.read_csv(df, sep=self.sep).loc[:, self.class_label].unique() for df in self.labelfiles]))
)
le = LabelEncoder()
le = le.fit(unique_targets)
for idx, file in enumerate(self.labelfiles):
labels = pd.read_csv(file, sep=self.sep)
labels.loc[:, f'categorical_{self.class_label}'] = labels.loc[:, self.class_label]
labels.loc[:, self.class_label] = le.transform(
labels.loc[:, f'categorical_{self.class_label}']
)
labels.to_csv(file, index=False, sep=self.sep) # Don't need to re-index here
# self.labelfiles[idx] = file
def setup(self, stage: Optional[str] = None):
print('Creating train/val/test DataLoaders...')
trainloader, valloader, testloader = generate_dataloaders(
datafiles=self.datafiles,
labelfiles=self.labelfiles,
class_label=self.class_label,
sep=self.sep,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=True, # For gpu training
*self.args,
**self.kwargs,
)
print('Done, continuing to training.')
self.trainloader = trainloader
self.valloader = valloader
self.testloader = testloader
print('Calculating weights')
self.weights = compute_class_weights(
labelfiles=self.labelfiles,
class_label=self.class_label,
sep=self.sep,
device=self.device,
)
def train_dataloader(self):
return self.trainloader
def val_dataloader(self):
return self.valloader
def test_dataloader(self):
return self.testloader
@cached_property
def num_labels(self):
val = []
for file in self.labelfiles:
val.append(pd.read_csv(file, sep=self.sep).loc[:, self.class_label].values.max())
return max(val) + 1
@cached_property
def num_features(self):
if self.urls is not None and not os.path.isfile(self.datafiles[0]):
print('Trying to calcuate num_features before data has been downloaded. Downloading and continuing...')
self.prepare_data()
if 'refgenes' in self.kwargs:
return len(self.kwargs['refgenes'])
elif hasattr(self, 'trainloader'):
return next(iter(self.trainloader))[0].shape[1]
elif pathlib.Path(self.datafiles[0]).suffix == '.h5ad':
return an.read_h5ad(self.datafiles[0]).X.shape[1]
else:
return pd.read_csv(self.datafiles[0], nrows=1, sep=self.sep).shape[1]
def generate_trainer(
datafiles: List[str],
labelfiles: List[str],
class_label: str,
batch_size: int,
num_workers: int,
optim_params: Dict[str, Any]={
'optimizer': torch.optim.Adam,
'lr': 0.02,
},
weighted_metrics: bool=None,
scheduler_params: Dict[str, float]=None,
wandb_name: str=None,
weights: torch.Tensor=None,
max_epochs=500,
*args,
**kwargs,
):
"""
Generates PyTorch Lightning trainer and datasets for model training.
:param datafiles: List of absolute paths to datafiles
:type datafiles: List[str]
:param labelfiles: List of absolute paths to labelfiles
:type labelfiles: List[str]
:param class_label: Class label to train on
:type class_label: str
:param weighted_metrics: To use weighted metrics in model training
:type weighted_metrics: bool
:param batch_size: Batch size in dataloader
:type batch_size: int
:param num_workers: Number of workers in dataloader
:type num_workers: int
:param optim_params: Dictionary defining optimizer and any needed/optional arguments for optimizer initializatiom
:type optim_params: Dict[str, Any]
:param wandb_name: Name of run in Wandb.ai, defaults to ''
:type wandb_name: str, optional
:return: Trainer, model, datamodule
:rtype: Trainer, model, datamodule
"""
device = ('cuda:0' if torch.cuda.is_available() else 'cpu')
print(f'Device is {device}')
here = pathlib.Path(__file__).parent.absolute()
data_path = os.path.join(here, '..', '..', '..', 'data')
wandb_logger = WandbLogger(
project=f"tabnet-classifer-sweep",
name=wandb_name
)
uploadcallback = UploadCallback(
path=os.path.join(here, 'checkpoints'),
desc=wandb_name
)
early_stop_callback = EarlyStopping(
monitor=("weighted_val_accuracy" if weighted_metrics else "val_accuarcy"),
min_delta=0.00,
patience=3,
verbose=False,
mode="max"
)
module = DataModule(
datafiles=datafiles,
labelfiles=labelfiles,
class_label=class_label,
batch_size=batch_size,
num_workers=num_workers,
)
model = TabNetLightning(
input_dim=module.num_features,
output_dim=module.num_labels,
weighted_metrics=weighted_metrics,
optim_params=optim_params,
scheduler_params=scheduler_params,
weights=weights,
)
trainer = pl.Trainer(
gpus=(1 if torch.cuda.is_available() else 0),
auto_lr_find=False,
# gradient_clip_val=0.5,
logger=wandb_logger,
max_epochs=max_epochs,
# callbacks=[
# uploadcallback,
# ],
# val_check_interval=0.25, # Calculate validation every quarter epoch instead of full since dataset is large, and would like to test this
)
return trainer, model, module
```
#### File: models/lib/neural.py
```python
from typing import *
import torch
import numpy as np
import shutil
import json
import zipfile
import io
import pytorch_lightning as pl
from scipy.sparse import csc_matrix
from pathlib import Path
from pytorch_tabnet.utils import (
create_explain_matrix,
ComplexEncoder,
)
import torch.nn.functional as F
from torchmetrics.functional import accuracy, precision, recall
from pytorch_tabnet.tab_network import TabNet
import copy
import warnings
class TabNetLightning(pl.LightningModule):
def __init__(
self,
input_dim,
output_dim,
n_d=8,
n_a=8,
n_steps=3,
gamma=1.3,
cat_idxs=[],
cat_dims=[],
cat_emb_dim=1,
n_independent=2,
n_shared=2,
epsilon=1e-15,
virtual_batch_size=128,
momentum=0.02,
mask_type="sparsemax",
lambda_sparse = 1e-3,
optim_params: Dict[str, float]={
'optimizer': torch.optim.Adam,
'lr': 0.001,
'weight_decay': 0.01,
},
metrics: Dict[str, Callable]={
'accuracy': accuracy,
'precision': precision,
'recall': recall,
},
scheduler_params: Dict[str, float]=None,
weighted_metrics=False,
weights=None,
loss=None, # will default to cross_entropy
pretrained=None,
) -> None:
super().__init__()
# Stuff needed for training
self.input_dim = input_dim
self.output_dim = output_dim
self.lambda_sparse = lambda_sparse
self.optim_params = optim_params
self.scheduler_params = scheduler_params
self.metrics = metrics
self.weighted_metrics = weighted_metrics
self.weights = weights
self.loss = loss
if pretrained is not None:
self._from_pretrained(**pretrained.get_params())
# self.device = ('cuda:0' if torch.cuda.is_available() else 'cpu!')
print(f'Initializing network')
self.network = TabNet(
input_dim=input_dim,
output_dim=output_dim,
n_d=n_d,
n_a=n_a,
n_steps=n_steps,
gamma=gamma,
cat_idxs=cat_idxs,
cat_dims=cat_dims,
cat_emb_dim=cat_emb_dim,
n_independent=n_independent,
n_shared=n_shared,
epsilon=epsilon,
virtual_batch_size=virtual_batch_size,
momentum=momentum,
mask_type=mask_type,
)
print(f'Initializing explain matrix')
self.reducing_matrix = create_explain_matrix(
self.network.input_dim,
self.network.cat_emb_dim,
self.network.cat_idxs,
self.network.post_embed_dim,
)
def forward(self, x):
return self.network(x)
def _compute_loss(self, y, y_hat):
# If user doesn't specify, just set to cross_entropy
if self.loss is None:
self.loss = F.cross_entropy
return self.loss(y, y_hat, weight=self.weights)
def _step(self, batch):
x, y = batch
y_hat, M_loss = self.network(x)
loss = self._compute_loss(y_hat, y)
# Add the overall sparsity loss
loss = loss - self.lambda_sparse * M_loss
return y, y_hat, loss
def training_step(self, batch, batch_idx):
y, y_hat, loss = self._step(batch)
self.log("train_loss", loss, logger=True, on_epoch=True, on_step=True)
self._compute_metrics(y_hat, y, 'train')
return loss
def validation_step(self, batch, batch_idx):
y, y_hat, loss = self._step(batch)
self.log("val_loss", loss, logger=True, on_epoch=True, on_step=True)
self._compute_metrics(y_hat, y, 'val')
def test_step(self, batch, batch_idx):
y, y_hat, loss = self._step(batch)
self.log("test_loss", loss, logger=True, on_epoch=True, on_step=True)
self._compute_metrics(y_hat, y, 'test')
def configure_optimizers(self):
if 'optimizer' in self.optim_params:
optimizer = self.optim_params.pop('optimizer')
optimizer = optimizer(self.parameters(), **self.optim_params)
else:
optimizer = torch.optim.Adam(self.parameters(), lr=0.2, weight_decay=1e-5)
if self.scheduler_params is not None:
scheduler = self.scheduler_params.pop('scheduler')
scheduler = scheduler(optimizer, **self.scheduler_params)
if self.scheduler_params is None:
return optimizer
return {
'optimizer': optimizer,
'lr_scheduler': scheduler,
'monitor': 'train_loss',
}
def _compute_metrics(self,
y_hat: torch.Tensor,
y: torch.Tensor,
tag: str,
on_epoch=True,
on_step=False,
):
"""
Compute metrics for the given batch
:param y_hat: logits of model
:type y_hat: torch.Tensor
:param y: tensor of labels
:type y: torch.Tensor
:param tag: log name, to specify train/val/test batch calculation
:type tag: str
:param on_epoch: log on epoch, defaults to True
:type on_epoch: bool, optional
:param on_step: log on step, defaults to True
:type on_step: bool, optional
"""
for name, metric in self.metrics.items():
if self.weighted_metrics: # We dont consider class support in calculation
val = metric(y_hat, y, average='weighted', num_classes=self.output_dim)
self.log(
f"weighted_{tag}_{name}",
val,
on_epoch=on_epoch,
on_step=on_step,
logger=True,
)
else:
val = metric(y_hat, y, num_classes=self.output_dim)
self.log(
f"{tag}_{name}",
val,
on_epoch=on_epoch,
on_step=on_step,
logger=True,
)
def explain(self, loader, normalize=False):
self.network.eval()
res_explain = []
for batch_nb, data in enumerate(loader):
if isinstance(data, tuple): # if we are running this on already labeled pairs and not just for inference
data, _ = data
M_explain, masks = self.network.forward_masks(data)
for key, value in masks.items():
masks[key] = csc_matrix.dot(
value.cpu().detach().numpy(), self.reducing_matrix
)
original_feat_explain = csc_matrix.dot(M_explain.cpu().detach().numpy(),
self.reducing_matrix)
res_explain.append(original_feat_explain)
if batch_nb == 0:
res_masks = masks
else:
for key, value in masks.items():
res_masks[key] = np.vstack([res_masks[key], value])
res_explain = np.vstack(res_explain)
if normalize:
res_explain /= np.sum(res_explain, axis=1)[:, None]
return res_explain, res_masks
def _compute_feature_importances(self, dataloader):
M_explain, _ = self.explain(dataloader, normalize=False)
sum_explain = M_explain.sum(axis=0)
feature_importances_ = sum_explain / np.sum(sum_explain)
return feature_importances_
def save_model(self, path):
saved_params = {}
init_params = {}
for key, val in self.get_params().items():
if isinstance(val, type):
# Don't save torch specific params
continue
else:
init_params[key] = val
saved_params["init_params"] = init_params
class_attrs = {
"preds_mapper": self.preds_mapper
}
saved_params["class_attrs"] = class_attrs
# Create folder
Path(path).mkdir(parents=True, exist_ok=True)
# Save models params
with open(Path(path).joinpath("model_params.json"), "w", encoding="utf8") as f:
json.dump(saved_params, f, cls=ComplexEncoder)
# Save state_dict
torch.save(self.network.state_dict(), Path(path).joinpath("network.pt"))
shutil.make_archive(path, "zip", path)
shutil.rmtree(path)
print(f"Successfully saved model at {path}.zip")
return f"{path}.zip"
def load_model(self, filepath):
try:
with zipfile.ZipFile(filepath) as z:
with z.open("model_params.json") as f:
loaded_params = json.load(f)
loaded_params["init_params"]["device_name"] = self.device_name
with z.open("network.pt") as f:
try:
saved_state_dict = torch.load(f, map_location=self.device)
except io.UnsupportedOperation:
# In Python <3.7, the returned file object is not seekable (which at least
# some versions of PyTorch require) - so we'll try buffering it in to a
# BytesIO instead:
saved_state_dict = torch.load(
io.BytesIO(f.read()),
map_location=self.device,
)
except KeyError:
raise KeyError("Your zip file is missing at least one component")
self.__init__(**loaded_params["init_params"])
self._set_network()
self.network.load_state_dict(saved_state_dict)
self.network.eval()
self.load_class_attrs(loaded_params["class_attrs"])
def load_weights_from_unsupervised(self, unsupervised_model):
update_state_dict = copy.deepcopy(self.network.state_dict())
for param, weights in unsupervised_model.network.state_dict().items():
if param.startswith("encoder"):
# Convert encoder's layers name to match
new_param = "tabnet." + param
else:
new_param = param
if self.network.state_dict().get(new_param) is not None:
# update only common layers
update_state_dict[new_param] = weights
def _from_pretrained(self, **kwargs):
update_list = [
"cat_dims",
"cat_emb_dim",
"cat_idxs",
"input_dim",
"mask_type",
"n_a",
"n_d",
"n_independent",
"n_shared",
"n_steps",
]
for var_name, value in kwargs.items():
if var_name in update_list:
try:
exec(f"global previous_val; previous_val = self.{var_name}")
if previous_val != value: # noqa
wrn_msg = f"Pretraining: {var_name} changed from {previous_val} to {value}" # noqa
warnings.warn(wrn_msg)
exec(f"self.{var_name} = value")
except AttributeError:
exec(f"self.{var_name} = value")
```
#### File: src/models/run_model_search.py
```python
import random
import pathlib
import os
import argparse
import ast
from itertools import product
import numpy as np
from scipy.stats import loguniform
def run_search(
N: int,
class_label: str,
) -> None:
"""
Runs hyperparameter search by scaling i GPU jobs, i=1,..,N on the PRP Nautilus cluster.
:param N: Number of models to train
:type N: int
:param class_label: Which target label to train for
:type class_label: str
"""
here = pathlib.Path(__file__).parent.absolute()
yaml_path = os.path.join(here, '..', '..', 'yaml', 'model.yaml')
param_dict = {
'weighted_metrics': [True],
'class_label': [class_label],
'max_epochs': [1000],
'lr': loguniform.rvs(0.001, 0.1, size=10),
'batch_size': [32],
'momentum': loguniform.rvs(0.001, 0.9, size=10),
'weight_decay': loguniform.rvs(0.001, 0.1, size=10),
}
# Generate cartesian product of dictionary
params = list(product(*param_dict.values()))
param_names = list(param_dict.keys())
for i, params in enumerate(random.sample(params, N)):
for n, p in zip(param_names, params):
os.environ[n.upper()] = str(p)
# These two are to put in job name
os.environ['NAME'] = class_label.lower()
os.environ['I'] = str(i)
os.system(f'envsubst < {yaml_path} | kubectl create -f -')
if __name__ == "__main__":
parser = argparse.ArgumentParser(usage='Hyperparameter tune with random search.')
parser.add_argument(
'--N',
help='Number of experiments to run',
required=False,
type=int,
default=100,
)
parser.add_argument(
'--class-label',
required=False,
default='Type',
type=str,
help='Class label to train classifier on',
)
args = parser.parse_args()
args = vars(args)
run_search(**args)
```
#### File: src/models/train_neural_network.py
```python
import sys
import argparse
import pathlib
import os
import ast
from typing import *
from lib.lightning_train import generate_trainer
import torch
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
import helper
def make_args() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
# parser.add_argument(
# '--width',
# required=False,
# default=1024,
# help='Width of deep layers in feedforward neural network',
# type=int,
# )
# parser.add_argument(
# '--layers',
# required=False,
# default=5,
# help='Number of deep layers in feedforward neural network',
# type=int,
# )
parser.add_argument(
'--max-epochs',
required=False,
default=1000,
help='Total number of allowable epochs the model is allowed to train for',
type=int,
)
parser.add_argument(
'--lr',
required=False,
default=3e-4,
help='Learning rate for model optimizer',
type=float,
)
parser.add_argument(
'--momentum',
required=False,
default=0,
help='Momentum for model optimizer',
type=float,
)
parser.add_argument(
'--weight-decay',
required=False,
default=0,
help='Weight decay for model optimizer',
type=float,
)
parser.add_argument(
'--class-label',
required=False,
default='Type',
type=str,
help='Class label to train classifier on',
)
parser.add_argument(
'--batch-size',
required=False,
default=4,
type=int,
help='Number of samples in minibatch'
)
parser.add_argument(
'--num-workers',
required=False,
default=32,
type=int,
help='Number of workers in DataLoaders'
)
parser.add_argument(
'--weighted-metrics',
type=ast.literal_eval, # To evaluate weighted_metrics=False as an actual bool
default=False,
required=False,
help='Whether to use class-weighted schemes in metric calculations'
)
return parser
if __name__ == "__main__":
parser = make_args()
here = pathlib.Path(__file__).parent.absolute()
data_path = os.path.join(here, '..', '..', 'data', 'interim')
label_path = os.path.join(here, '..', '..', 'data', 'processed', 'labels')
args = parser.parse_args()
params = vars(args)
print(params)
info = helper.INTERIM_DATA_AND_LABEL_FILES_LIST
datafiles = info.keys()
labelfiles = [info[file] for file in datafiles]
datafiles = [os.path.join(data_path, f) for f in datafiles]
labelfiles = [os.path.join(label_path, f) for f in labelfiles]
trainer, model, module = generate_trainer(
datafiles=datafiles,
labelfiles=labelfiles,
shuffle=True,
drop_last=True,
skip=3,
normalize=True,
optim_params={
'optimizer': torch.optim.SGD,
'lr': params.pop('lr'),
'momentum': params.pop('momentum'),
'weight_decay': params.pop('weight_decay'),
},
**params,
)
trainer.fit(model, datamodule=module)
``` |
{
"source": "jlehrer1/ConvNeXt-lightning",
"score": 2
} |
#### File: ConvNeXt-lightning/convnextpl/main.py
```python
from multiprocessing.sharedctypes import Value
from typing import List, Callable, Dict
from torchmetrics.functional import accuracy
from .data.imagedataset import ImageSet
from .models.convnext import ConvNeXt
from .models.convnext_isotropic import ConvNeXtIsotropic
def Convnext(
type=None,
in_chans: int=3,
num_classes: int=1000,
depths: list=[3, 3, 9, 3],
dims: list=[96, 192, 384, 768],
drop_path_rate: int=0.,
layer_scale_init_value: float=1e-6,
head_init_scale: float=1.,
lr: float=1e-4,
momentum: float=1e-4,
weight_decay: float=1e-2,
metrics: Dict[str, Callable] = {
'acc' : accuracy
},
):
if type == None:
model = ConvNeXt(
in_chans=in_chans,
num_classes=num_classes,
depths=depths,
dims=dims,
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
head_init_scale=head_init_scale,
lr=lr,
momentum=momentum,
weight_decay=weight_decay,
metrics=metrics
)
elif type == 'isotropic':
model = ConvNeXtIsotropic(
in_chans=in_chans,
num_classes=num_classes,
depths=depths,
dims=dims,
drop_path_rate=drop_path_rate,
layer_scale_init_value=layer_scale_init_value,
head_init_scale=head_init_scale,
lr=lr,
momentum=momentum,
weight_decay=weight_decay,
metrics=metrics
)
else:
raise ValueError(f"Invalid value in type {type}. Must be one of [None, 'isotropic']")
return model
``` |
{
"source": "jlehrer1/InstantEDA",
"score": 3
} |
#### File: InstantEDA/quickplotter/_quickplotter.py
```python
import pandas as pd
from . import _visualization as visualization
from . import _clean as clean
class QuickPlotter:
# Class initialization
def __init__(self, df: pd.DataFrame, categorical=True, categorical_subset=None):
self.df = df
self.df_clean = clean.clean(
df, categorical_all=categorical, categorical_subset=categorical_subset)
self.plotlist = {
'common': ['num_nan', 'percent_nan', 'correlation'],
'pairwise': ['pairwise'],
'distribution': ['distribution']
}
def _plot(self, plots: list):
df1 = visualization.num_nan_plot(self.df)
df2 = visualization.percent_nan_plot(self.df)
df3 = visualization.correlation_plot(self.df_clean)
all_plots = {
'num_nan': df1,
'percent_nan': df2,
'correlation': df3,
}
for plot in plots:
all_plots[plot].show()
def _validity_check(self, subset: list, diff: list, subset_columns: list, diff_columns: list):
"""
Checks each parameter in the QuickPlotter object's methods.
It is a private function which will not be of much use outside of the specified functions.
"""
# Checks for subset/diff
# ------------------------------------------
plotlist = self.plotlist
if subset is not None and diff is not None:
raise ValueError(
"subset and diff cannot both be used."
)
if subset is not None and not set(subset).issubset(plotlist):
raise ValueError(
"subset contains improper values. Check the plotlist attribute for appropriate ones."
)
if diff is not None and not set(diff).issubset(plotlist):
raise ValueError(
"diff contains improper values. Check the plotlist attribute for appropriate ones."
)
# Checks for subset_columns/subset_diff
# ------------------------------------------
col_list = list(self.df.columns)
if subset_columns is not None and diff_columns is not None:
raise ValueError(
"subset_columns and subset_diff cannot both be used"
)
if subset_columns is not None and not set(subset_columns).issubset(col_list):
raise ValueError(
"subset_columns contains improper values. Check that it only contains valid column names"
)
if diff_columns is not None and not set(diff_columns).issubset(col_list):
raise ValueError(
"diff_columns contains improper values. Check that it only contains valid column names."
)
def _numeric_check(self, df: pd.DataFrame, cols: list) -> bool:
"""Checks to make sure each DataFrame column in the given column list is numeric"""
# Error check, shouldn't be hit reguarly because of _validity_check
if not set(cols).issubset(set(df.columns)):
raise ValueError(
"Given column list contains invalid data. Check that only actual column names are passed."
)
for col in cols:
if not clean._is_numeric(df[col]):
return False
return True
def common(self, subset: list = None, diff: list = None, subset_columns: list = None, diff_columns: list = None):
"""
Plots common EDA plots.
Parameters:
----------
subset: subset of common plots to show
diff: plot all common plots except those in diff, i.e. {all plots}\\{diff}
"""
self._validity_check(subset, diff, subset_columns, diff_columns)
if subset is None and diff is None:
# plot is called without subset/diff specified, just plot all
self._plot(self.plotlist['common'])
elif subset is not None:
self._plot(subset)
else:
self._plot(list(set(self.plotlist) - set(diff)))
def pairwise(self, subset_columns: list = None, diff_columns: list = None):
"""
Plots each feature X_i against X_j, i=1,...,length(X.columns)
Parameters
---------
subset: subset of features to plot
diff: plot all features except those in diff
"""
self._validity_check(None, None, subset_columns, diff_columns)
if subset_columns is None and diff_columns is None:
visualization.pairwise_plot(
self.df_clean, self.df_clean.columns).show()
elif subset_columns is not None:
visualization.pairwise_plot(self.df_clean, subset_columns).show()
else:
visualization.pairwise_plot(self.df_clean, list(
set(self.df_clean.columns) - set(diff_columns))).show()
def distribution(self, subset: list = None, diff: list = None, subset_columns: list = None, diff_columns: list = None):
""" Plots distributions of given DataFrame columns"""
self._validity_check(subset, diff, subset_columns, diff_columns)
# Need to add warning / error checking for plotting non-numerical values
if subset is None and diff is None:
# Check if all columns are numeric, else raise warning (will error in visualization function)
visualization.distribution_plot(
self.df_clean, self.df_clean.columns).show()
elif subset is not None:
visualization.distribution_plot(self.df_clean, subset).show()
else:
visualization.distribution_plot(self.df_clean, list(
set(self.df_clean.columns) - set(diff)))
```
#### File: InstantEDA/tests/test_quickplotter.py
```python
import pandas as pd
import numpy as np
import quickplotter
# Define dummy DataFrame for testing
d = {
'col1': [1, 2],
'col2': [3, 4],
'col3': [np.nan, 6],
'col4': [7,8],
'col5':[9, np.nan],
}
df = pd.DataFrame(data=d)
qp = quickplotter.QuickPlotter(df)
def test_class_is_creatable():
qp = quickplotter.QuickPlotter(df)
def test_class_common_plots_show():
qp.common()
def test_class_pairwise_plots_show():
qp.pairwise()
def test_class_distribution_plots_show():
qp.distribution()
def test_class_subset_passes():
# only for .common() now
qp.common(['num_nan', 'percent_nan'])
def test_class_diff_passes():
# only for .common() now
qp = quickplotter.QuickPlotter(df)
def test_class_subset_col_passes():
pass
def test_class_diff_col_passes():
pass
``` |
{
"source": "jlehrer1/sql-to-pandas",
"score": 3
} |
#### File: sql_to_pandas/select/select.py
```python
import pandas as pd
import re
import sqlparse
from ..where import where
from ..helpers import helpers
def _SELECT_COUNT(df: pd.DataFrame) -> pd.DataFrame:
return df.shape[0]
def _SELECT_AVG(df: pd.DataFrame) -> pd.DataFrame:
return df.mean()
def _SELECT_SUM(df: pd.DataFrame) -> pd.DataFrame:
return df.sum()
def _SELECT_MAX(df: pd.DataFrame) -> pd.DataFrame:
return df.max()
def _SELECT_MIN(df: pd.DataFrame) -> pd.DataFrame:
return df.min()
def _SELECT_STDEV(df: pd.DataFrame) -> pd.DataFrame:
return df.std()
def _parse_SELECT(df: pd.DataFrame, string: str):
"""Parses which columns to use from the DataFrame. Runs in place of SELECT <cols> FROM <df>"""
option_map = {
'count' : _SELECT_COUNT,
'avg' : _SELECT_AVG,
'sum' : _SELECT_SUM,
'max' : _SELECT_MAX,
'min' : _SELECT_MIN,
'stdev' : _SELECT_STDEV,
}
# Obtain the possible optional function -- will always be "SELECT <FUNCTION>(col) FROM df"
checkstring = string[1].replace('(', ' ').replace(')', ' ').split()
df = where._parse_WHERE(df, string)
# Get optional keyword and column it will work on if it exists
if checkstring[0] in option_map.keys():
if len(checkstring[1:]) > 1:
raise ValueError('Error: aggregate functions can only be applied to a single column.')
return option_map[checkstring[0]](df[[checkstring[1]]])
else:
cols = []
if string[1] == '*':
cols = df.columns.to_list()
else:
string = string[string.index('select') + 1: string.index('from')]
cols = helpers._clean_listlike(string)
df = df[cols]
return df
``` |
{
"source": "jlehrer1/tinycomp",
"score": 3
} |
#### File: tinycomp/tinycomp/tinycomp.py
```python
import csv
import linecache
import numpy as np
class Dataset:
def __init__(self, filename: str, rows: list=None):
"""
Dataset method can either be initialized with a list of rows (mutable by changing the rows attribute),
or a new list of rows may be passed in for each method that requires it, but not both (this would be ambiguous).
Parameters:
filename: Path to csv file
rows (optional): List of rows to initialize the dataset with
Returns:
None
"""
# Private attributes
self._filename = filename
self._total_data = self._numline(filename)
# Public attributes
self.rows = rows
# Public attributes (Pandas API-like)
self.index = rows
self.columns = self._get_columns()
self.shape = (self._total_data, len(self.columns))
# Python dunder methods
def __getitem__(self, idx):
if isinstance(idx, slice):
step = (1 if idx.step == None else idx.step)
return np.array([self._getline(i) for i in range(idx.start, idx.stop, step)]).astype(float)
elif isinstance(idx, (list, range)):
return np.array([self._getline(i) for i in idx]).astype(float)
elif isinstance(idx, int):
return np.array(self._getline(idx)).astype(float)
else:
raise TypeError(f"Index must be list or int, not {type(idx).__name__}")
def __len__(self):
return self._total_data
def __str__(self):
if self.rows is not None:
return str(self.__getitem__(self.rows))
else:
return 'Dataset()'
def __repr__(self):
return self.__str__()
def _getline(self, idx):
"""
Returns a line from a csv file as a list of strings (not type-checked)
Parameters:
idx: Row to return from file
Returns:
list: Row of file with each comma-separated value as a distinct value in the list
"""
line = linecache.getline(self._filename, idx + 2)
csv_data = csv.reader([line])
data = [x for x in csv_data][0]
return data
def _numline(self, filename):
"""
Gets the number of lines in a file, should only be used for getting the total number of rows on object initialization
Parameters:
filename: Path to the file to get the number of lines from
Returns:
n: Number of lines in the file
"""
n = 0
with open(filename, "r") as f:
n = len(f.readlines()) - 1
return n
def _row_get(self, rows: list):
"""
Returns rows from a file, either with a passed list or from the list of rows upon object initialization.
Also performs error checking to make sure either rows were set upon initialization or passed, but not both or neither.
Parameters:
rows: List of rows
Returns:
list: Array of row values from file
"""
if self.rows is None and rows is None:
raise ValueError(
f"""{self.__class__} object was not initialized with a list of rows.
Either reinitialize with a list or rows or pass a list of rows to this method."""
)
if self.rows is not None and rows is not None:
raise ValueError(
f"""{self.__class__} object was initialized with a list of rows. Therefore, a list of rows may not be
passed to this method. Either reinitialize without a defined list of rows or do not pass a list into this method. """
)
return rows if rows != None else self.rows
def _get_columns(self):
"""
Get all the columns of the csv
Parameters:
None
Returns:
list: List of column names as strings
"""
line = linecache.getline(self._filename, 1)
csv_data = csv.reader([line])
return [x for x in csv_data][0]
def sum(self, rows=None, axis=0):
"""Sums the given rows by the given axis"""
rows = self._row_get(rows)
return np.sum(self[rows], axis=axis)
def nlargest(self, rows=None, n=20, axis=0, ascending=False):
"""
Gets the n largest rows or columns (summed), depending on the axis
"""
rows = self._row_get(rows)
s = np.sum(self[rows], axis=axis)
if axis == 0:
data = [self.columns[idx] for idx in np.argsort(s)[-n: ]]
else:
data = np.argsort(s)[-n: ]
return data if ascending else data[::-1]
def nsmallest(self, rows=None, n=20, axis=0, ascending=False):
"""
Gets the n smallest rows or columns (summed), depending on the axis
"""
rows = self._row_get(rows)
s = np.sum(self[rows], axis=axis)
print(s)
if axis == 0:
data = [self.columns[idx] for idx in np.argsort(s)[0: n]]
else:
data = np.argsort(s)[0: n]
return data[::-1] if ascending else data
``` |
{
"source": "jlehtomaa/JAX_MPPI",
"score": 3
} |
#### File: JAX_MPPI/src/mppi.py
```python
import numpy as np
from src.rollout import make_vec_rollout_fn, lax_wrapper_step
class MPPI:
""" JAX implementation of the MPPI algorithm.
Williams et al. 2017,
Information Theoretic MPC for Model-Based Reinforcement Learning
https://ieeexplore.ieee.org/document/7989202
Some MPPI modifications based on Nagabandi et al. 2019,
Deep Dynamics Models for Learning Dexterous Manipulation.
https://github.com/google-research/pddm
Much inspired by the MPPI implementation by Shunichi09, see
https://github.com/Shunichi09/PythonLinearNonlinearControl
Assume terminal cost phi(x) = 0.
"""
def __init__(self, config):
self.env_cfg = config["environment"]
self.ctrl_cfg = config["controller"]
self.temperature = self.ctrl_cfg["temperature"] # \lambda
self.n_samples = self.ctrl_cfg["n_samples"] # K
self.n_timesteps = self.ctrl_cfg["n_timesteps"] # T
self.noise_sigma = self.ctrl_cfg["noise_sigma"] # \Sigma
self.act_dim = self.env_cfg["act_dim"]
self.act_max = self.env_cfg["max_torque"]
self.act_min = -self.env_cfg["max_torque"]
self.rollout_fn = self._build_rollout_fn(lax_wrapper_step, self.env_cfg)
self.reset()
def reset(self):
"""Reset the previous control trajectory to zero (assumes that
the action space is symmetric around zero)."""
self.plan = np.zeros((self.n_timesteps, self.act_dim))
def _build_rollout_fn(self, step_fn, env_params):
"""Construct the JAX rollout function.
Arguments:
---------
step_fn: a rollout function that takes as input the current state
of the system and a sequence of noisy control inputs
with shape (n_samples, n_timesteps, act_dim).
env_params: a dict of parameters consumed by the rollout fn.
The resulting rollout function should return a (states, rewards) tuple,
where states has the shape (n_samples, n_timesteps, obs_dim), and
rewards the shape (n_samples, n_timesteps, 1).
"""
return make_vec_rollout_fn(step_fn, env_params)
def _get_action_noise(self):
"""Get the additive noise applied to the nominal control trajectory."""
noise = np.random.normal(size=(
self.n_samples, self.n_timesteps, self.act_dim)) * self.noise_sigma
return noise
def get_action(self, obs):
""" Determine the next optimal action.
Uses https://github.com/Shunichi09/PythonLinearNonlinearControl ...
controllers/mppi.py, based on
Nagabandi et al. (2019). Deep Dynamics Models for Learning
Dexterous Manipulation. arXiv:1909.11652.
Arguments:
----------
obs (np.ndarray) : the current state of the system
Returns:
--------
act (np.ndarray): the next optimal control input
"""
noise = self._get_action_noise()
acts = self.plan + noise
acts = np.clip(acts, self.act_min, self.act_max)
_, rewards = self.rollout_fn(obs, acts) # (K, T, 1)
# Stage costs from the environment.
rewards = rewards.sum(axis=1).squeeze() # (K,)
exp_rewards = np.exp(self.temperature * (rewards - np.max(rewards)))
denom = np.sum(exp_rewards) + 1e-10
weighted_inputs = exp_rewards[:, np.newaxis, np.newaxis] * acts
sol = np.sum(weighted_inputs, axis=0) / denom
# Return the first element as an immediate input, and roll the next
# actions forward by one position.
self.plan[:-1] = sol[1:]
self.plan[-1] = sol[-1]
return sol[0]
``` |
{
"source": "jlei2821/iwildcam",
"score": 3
} |
#### File: jlei2821/iwildcam/iwildcam_image_readers.py
```python
import numpy as np
import cv2
import zipfile
def read_zipped_images(files, archive, target_size, scale=1., flip_image=False, rotate=0, print_update=10000):
with zipfile.ZipFile(archive, 'r') as zf:
imgs = np.empty((len(files),) + target_size + (3,))
for i, filename in enumerate(files):
data = zf.read(filename)
img = cv2.imdecode(np.frombuffer(data, np.uint8), 1)
if flip_image == True:
img = cv2.flip(img, 1)
if rotate > 0:
ctr = tuple(np.array(img.shape[1::-1]) / 2)
rot = cv2.getRotationMatrix2D(ctr, rotate, 1.0)
img = cv2.warpAffine(img, rot, img.shape[1::-1], flags=cv2.INTER_LINEAR)
img = cv2.resize(img, target_size)
imgs[i, :, :, :] = img * scale
if (i % print_update) == 0:
print(f"Loading image {i} of {len(files)}...")
return imgs
def read_images(files, directory, target_size, scale=1., flip_image=False, rotate=0, print_update=10000):
imgs = np.empty((len(files),) + target_size + (3,))
for i, filename in enumerate(files):
img = cv2.imread(str(directory)+"/"+filename, 1)
if flip_image == True:
img = cv2.flip(img, 1)
if rotate > 0:
ctr = tuple(np.array(img.shape[1::-1]) / 2)
rot = cv2.getRotationMatrix2D(ctr, rotate, 1.0)
img = cv2.warpAffine(img, rot, img.shape[1::-1], flags=cv2.INTER_LINEAR)
img = cv2.resize(img, target_size)
imgs[i, :, :, :] = img * scale
if (i % print_update) == 0:
print(f"Loading image {i} of {len(files)}...")
return imgs
``` |
{
"source": "jleidel/sst-elements",
"score": 3
} |
#### File: ember/test/loadFileParse.py
```python
import sys
class Buffer:
def __init__(self):
self.buffer = ''
self.offset = 0
def write( self, data ):
self.buffer += data
def readline( self ):
end = self.offset
while end < len(self.buffer) and self.buffer[end] != '\n':
end += 1
start = self.offset
self.offset = end + 1
return self.buffer[start:self.offset]
class ParseLoadFile:
def __init__( self, filename, fileVars ):
self.fp = open(filename, 'r')
self.buffer = Buffer()
self.preprocess( fileVars )
self.lastLine = self.getline()
self.stuff = []
while True:
key, value = self.getKeyValue();
if key == None:
break
if key == '[JOB_ID]':
self.stuff = self.stuff + [{ 'jobid': int(value) }]
self.stuff[-1]['motifs'] = []
self.stuff[-1]['params'] = {}
elif key == '[NID_LIST]':
value = ''.join(value.split())
if value[0].isdigit():
self.stuff[-1]['nid_list'] = value.strip()
else:
left,right = value.split('=')
if left == 'generateNidList':
self.stuff[-1]['nid_list'] = self.generateNidList( right )
else:
sys.exit('ERROR: invalid NID_LIST {0}'.format(value))
elif key == '[NUM_CORES]':
self.stuff[-1]['num_cores'] = value.strip()
elif key == '[PARAM]':
key,value = value.strip().split('=')
if key == 'ember:famAddrMapper.nidList' and value[0:len('generateNidList')] == 'generateNidList':
self.stuff[-1]['params'][key] = self.generateNidList( value )
else:
self.stuff[-1]['params'][key] = value
elif key == '[MOTIF]':
self.stuff[-1]['motifs'] = self.stuff[-1]['motifs'] + [value]
else:
print('Warning: unknown key {0}'.format(key))
self.fp.close()
def __iter__(self):
return self
def generateNidList( self, generator ):
name,args = generator.split('(',1)
try:
module = __import__( name, fromlist=[''] )
except:
sys.exit('Failed: could not import nidlist generator `{0}`'.format(name) )
return module.generate( args.split(')',1)[0] )
def next(self):
if len(self.stuff) == 0:
raise StopIteration
else :
jobid = self.stuff[0]['jobid']
nidlist = self.stuff[0]['nid_list']
numCores = 1
if 'num_cores' in self.stuff[0]:
numCores = self.stuff[0]['num_cores']
params = self.stuff[0]['params']
motifs = self.stuff[0]['motifs']
self.stuff.pop(0)
return jobid, nidlist, numCores, params, motifs
def substitute( self, line, variables ):
retval = ''
line = line.replace('}','{')
line = line.split('{')
for x in line:
if x in variables:
retval += variables[x]
else:
retval += x
return retval
def preprocess( self, vars ):
while True:
line = self.fp.readline()
if len(line) > 0:
if line[0] != '#' and not line.isspace():
if line[0:len('[VAR]')] == '[VAR]':
tag, rem = line.split(' ',1);
var,value = rem.split('=');
vars[var] = value.rstrip()
else:
self.buffer.write( self.substitute(line,vars) )
else:
return
def getKeyValue( self ):
if self.lastLine == None:
return None,None
value = ''
if self.lastLine[0] != '[':
sys.exit('badly formed file ');
tag = None
rem = ''
try:
tag, rem = self.lastLine.split(' ',1);
rem = rem.replace('\n', ' ')
except ValueError:
tag = self.lastLine
tag = tag.replace('\n', ' ')
value = value + rem
while True:
self.lastLine = self.getline()
if self.lastLine == None:
break
if self.lastLine[0] == '[':
break
else:
value = value + self.lastLine.replace('\n',' ')
return tag, value
def getline(self):
while True:
line = self.buffer.readline()
if len(line) > 0:
if not line.isspace() and line[0] != '#':
return line
else:
return None
```
#### File: ember/test/paramUtils.py
```python
import sys
Truncate=40
def truncate(value):
if Truncate and len(value) > Truncate:
return value[0:Truncate] + '...'
else:
return value
def updateDict( name, params, key, value ):
if key in params:
if str(value) != str(params[key]):
print "override {0} {1}={2} with {3}".format( name, key, params[key], truncate(value) )
params[ key ] = value
else:
print "set {0} {1}={2}".format( name, key, truncate(value) )
params[ key ] = value
def updateParams( params, merlinParams, nicParams, emberParams ):
for key, value in params.items():
prefix, suffix = key.split(':',1)
if prefix == 'nic':
updateDict( 'nicParams', nicParams, suffix, value )
elif prefix == 'ember':
updateDict( 'emberParams', emberParams, suffix, value )
elif prefix == 'merlin':
updateDict( 'merlinParams', merlinParams, suffix, value )
else:
sys.exit('ERROR: unknown dictionary {0}'.format(prefix))
```
#### File: memHierarchy/tests/testBackendCramSim.py
```python
import sst
from mhlib import componentlist
def read_arguments():
boolUseDefaultConfig = True
def setup_config_params():
l_params = {}
if g_boolUseDefaultConfig:
print "Config file not found... using default configuration"
l_params = {
"clockCycle": "1ns",
"stopAtCycle": "10us",
"numChannels":"""1""",
"numRanksPerChannel":"""2""",
"numBankGroupsPerRank":"""2""",
"numBanksPerBankGroup":"""2""",
"numRowsPerBank":"""32768""",
"numColsPerBank":"""2048""",
"numBytesPerTransaction":"""32""",
"relCommandWidth":"""1""",
"readWriteRatio":"""1""",
"boolUseReadA":"""0""",
"boolUseWriteA":"""0""",
"boolUseRefresh":"""0""",
"boolAllocateCmdResACT":"""0""",
"boolAllocateCmdResREAD":"""1""",
"boolAllocateCmdResREADA":"""1""",
"boolAllocateCmdResWRITE":"""1""",
"boolAllocateCmdResWRITEA":"""1""",
"boolAllocateCmdResPRE":"""0""",
"boolCmdQueueFindAnyIssuable":"""1""",
"boolPrintCmdTrace":"""0""",
"strAddressMapStr":"""_r_l_R_B_b_h_""",
"bankPolicy":"CLOSE",
"nRC":"""55""",
"nRRD":"""4""",
"nRRD_L":"""6""",
"nRRD_S":"""4""",
"nRCD":"""16""",
"nCCD":"""4""",
"nCCD_L":"""6""",
"nCCD_L_WR":"""1""",
"nCCD_S":"""4""",
"nAL":"""15""",
"nCL":"""16""",
"nCWL":"""12""",
"nWR":"""18""",
"nWTR":"""3""",
"nWTR_L":"""9""",
"nWTR_S":"""3""",
"nRTW":"""4""",
"nEWTR":"""6""",
"nERTW":"""6""",
"nEWTW":"""6""",
"nERTR":"""6""",
"nRAS":"""39""",
"nRTP":"""9""",
"nRP":"""16""",
"nRFC":"""420""",
"nREFI":"""9360""",
"nFAW":"""16""",
"nBL":"""4"""
}
else:
l_configFile = open(g_config_file, 'r')
for l_line in l_configFile:
l_tokens = l_line.split(' ')
#print l_tokens[0], ": ", l_tokens[1]
l_params[l_tokens[0]] = l_tokens[1]
return l_params
# Command line arguments
g_boolUseDefaultConfig = True
#g_config_file = ""
# Setup global parameters
#[g_boolUseDefaultConfig, g_config_file] = read_arguments()
g_params = setup_config_params()
# Define SST core options
sst.setProgramOption("timebase", "1ps")
#sst.setProgramOption("stopAtCycle", "11000us")
# Define the simulation components
comp_cpu0 = sst.Component("cpu0", "memHierarchy.trivialCPU")
comp_cpu0.addParams({
"commFreq" : "100",
"rngseed" : "101",
"do_write" : "1",
"num_loadstore" : "1000",
"memSize" : "0x100000",
})
iface0 = comp_cpu0.setSubComponent("memory", "memHierarchy.memInterface")
comp_c0_l1cache = sst.Component("c0.l1cache", "memHierarchy.Cache")
comp_c0_l1cache.addParams({
"access_latency_cycles" : "5",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "4",
"cache_line_size" : "64",
"cache_size" : "4 KB",
"L1" : "1",
"debug" : "0"
})
comp_cpu1 = sst.Component("cpu1", "memHierarchy.trivialCPU")
comp_cpu1.addParams({
"commFreq" : "100",
"rngseed" : "301",
"do_write" : "1",
"num_loadstore" : "1000",
"memSize" : "0x100000",
})
iface1 = comp_cpu1.setSubComponent("memory", "memHierarchy.memInterface")
comp_c1_l1cache = sst.Component("c1.l1cache", "memHierarchy.Cache")
comp_c1_l1cache.addParams({
"access_latency_cycles" : "5",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "4",
"cache_line_size" : "64",
"cache_size" : "4 KB",
"L1" : "1",
"debug" : "0"
})
comp_n0_bus = sst.Component("n0.bus", "memHierarchy.Bus")
comp_n0_bus.addParams({
"bus_frequency" : "2 Ghz"
})
comp_n0_l2cache = sst.Component("n0.l2cache", "memHierarchy.Cache")
comp_n0_l2cache.addParams({
"access_latency_cycles" : "20",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "8",
"cache_line_size" : "64",
"cache_size" : "32 KB",
"debug" : "0"
})
comp_cpu2 = sst.Component("cpu2", "memHierarchy.trivialCPU")
iface2 = comp_cpu2.setSubComponent("memory", "memHierarchy.memInterface")
comp_cpu2.addParams({
"commFreq" : "100",
"rngseed" : "501",
"do_write" : "1",
"num_loadstore" : "1000",
"memSize" : "0x100000",
})
comp_c2_l1cache = sst.Component("c2.l1cache", "memHierarchy.Cache")
comp_c2_l1cache.addParams({
"access_latency_cycles" : "5",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "4",
"cache_line_size" : "64",
"cache_size" : "4 KB",
"L1" : "1",
"debug" : "0"
})
comp_cpu3 = sst.Component("cpu3", "memHierarchy.trivialCPU")
iface3 = comp_cpu3.setSubComponent("memory", "memHierarchy.memInterface")
comp_cpu3.addParams({
"commFreq" : "100",
"rngseed" : "701",
"do_write" : "1",
"num_loadstore" : "1000",
"memSize" : "0x100000",
})
comp_c3_l1cache = sst.Component("c3.l1cache", "memHierarchy.Cache")
comp_c3_l1cache.addParams({
"access_latency_cycles" : "5",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "4",
"cache_line_size" : "64",
"cache_size" : "4 KB",
"L1" : "1",
"debug" : "0"
})
comp_n1_bus = sst.Component("n1.bus", "memHierarchy.Bus")
comp_n1_bus.addParams({
"bus_frequency" : "2 Ghz"
})
comp_n1_l2cache = sst.Component("n1.l2cache", "memHierarchy.Cache")
comp_n1_l2cache.addParams({
"access_latency_cycles" : "20",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "8",
"cache_line_size" : "64",
"cache_size" : "32 KB",
"debug" : "0"
})
comp_n2_bus = sst.Component("n2.bus", "memHierarchy.Bus")
comp_n2_bus.addParams({
"bus_frequency" : "2 Ghz"
})
l3cache = sst.Component("l3cache", "memHierarchy.Cache")
l3cache.addParams({
"access_latency_cycles" : "100",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MSI",
"associativity" : "16",
"cache_line_size" : "64",
"cache_size" : "64 KB",
"debug" : "0",
"network_bw" : "25GB/s",
})
comp_chiprtr = sst.Component("chiprtr", "merlin.hr_router")
comp_chiprtr.addParams({
"xbar_bw" : "1GB/s",
"link_bw" : "1GB/s",
"input_buf_size" : "1KB",
"num_ports" : "2",
"flit_size" : "72B",
"output_buf_size" : "1KB",
"id" : "0",
"topology" : "merlin.singlerouter"
})
comp_dirctrl = sst.Component("dirctrl", "memHierarchy.DirectoryController")
comp_dirctrl.addParams({
"coherence_protocol" : "MSI",
"debug" : "0",
"entry_cache_size" : "32768",
"network_bw" : "25GB/s",
"addr_range_end" : "0x1F000000",
"addr_range_start" : "0x0"
})
comp_memctrl = sst.Component("memory", "memHierarchy.MemController")
comp_memctrl.addParams({
"debug" : "0",
"clock" : "1GHz",
"request_width" : "64"
})
comp_memory = comp_memctrl.setSubComponent("backend", "memHierarchy.cramsim")
comp_memory.addParams({
"access_time" : "2 ns", # Phy latency
"mem_size" : "512MiB",
})
# txn gen <--> memHierarchy Bridge
comp_memhBridge = sst.Component("memh_bridge", "CramSim.c_MemhBridge")
comp_memhBridge.addParams(g_params);
comp_memhBridge.addParams({
"verbose" : "0",
"numTxnPerCycle" : g_params["numChannels"],
"strTxnTraceFile" : "arielTrace",
"boolPrintTxnTrace" : "1"
})
# controller
comp_controller0 = sst.Component("MemController0", "CramSim.c_Controller")
comp_controller0.addParams(g_params)
comp_controller0.addParams({
"verbose" : "0",
"TxnConverter" : "CramSim.c_TxnConverter",
"AddrHasher" : "CramSim.c_AddressHasher",
"CmdScheduler" : "CramSim.c_CmdScheduler" ,
"DeviceController" : "CramSim.c_DeviceController"
})
# bank receiver
comp_dimm0 = sst.Component("Dimm0", "CramSim.c_Dimm")
comp_dimm0.addParams(g_params)
# Enable statistics
sst.setStatisticLoadLevel(7)
sst.setStatisticOutput("sst.statOutputConsole")
for a in componentlist:
sst.enableAllStatisticsForComponentType(a)
# Define the simulation links
link_c0_l1cache = sst.Link("link_c0_l1cache")
link_c0_l1cache.connect( (iface0, "port", "1000ps"), (comp_c0_l1cache, "high_network_0", "1000ps") )
link_c0L1cache_bus = sst.Link("link_c0L1cache_bus")
link_c0L1cache_bus.connect( (comp_c0_l1cache, "low_network_0", "10000ps"), (comp_n0_bus, "high_network_0", "10000ps") )
link_c1_l1cache = sst.Link("link_c1_l1cache")
link_c1_l1cache.connect( (iface1, "port", "1000ps"), (comp_c1_l1cache, "high_network_0", "1000ps") )
link_c1L1cache_bus = sst.Link("link_c1L1cache_bus")
link_c1L1cache_bus.connect( (comp_c1_l1cache, "low_network_0", "10000ps"), (comp_n0_bus, "high_network_1", "10000ps") )
link_bus_n0L2cache = sst.Link("link_bus_n0L2cache")
link_bus_n0L2cache.connect( (comp_n0_bus, "low_network_0", "10000ps"), (comp_n0_l2cache, "high_network_0", "10000ps") )
link_n0L2cache_bus = sst.Link("link_n0L2cache_bus")
link_n0L2cache_bus.connect( (comp_n0_l2cache, "low_network_0", "10000ps"), (comp_n2_bus, "high_network_0", "10000ps") )
link_c2_l1cache = sst.Link("link_c2_l1cache")
link_c2_l1cache.connect( (iface2, "port", "1000ps"), (comp_c2_l1cache, "high_network_0", "1000ps") )
link_c2L1cache_bus = sst.Link("link_c2L1cache_bus")
link_c2L1cache_bus.connect( (comp_c2_l1cache, "low_network_0", "10000ps"), (comp_n1_bus, "high_network_0", "10000ps") )
link_c3_l1cache = sst.Link("link_c3_l1cache")
link_c3_l1cache.connect( (iface3, "port", "1000ps"), (comp_c3_l1cache, "high_network_0", "1000ps") )
link_c3L1cache_bus = sst.Link("link_c3L1cache_bus")
link_c3L1cache_bus.connect( (comp_c3_l1cache, "low_network_0", "10000ps"), (comp_n1_bus, "high_network_1", "10000ps") )
link_bus_n1L2cache = sst.Link("link_bus_n1L2cache")
link_bus_n1L2cache.connect( (comp_n1_bus, "low_network_0", "10000ps"), (comp_n1_l2cache, "high_network_0", "10000ps") )
link_n1L2cache_bus = sst.Link("link_n1L2cache_bus")
link_n1L2cache_bus.connect( (comp_n1_l2cache, "low_network_0", "10000ps"), (comp_n2_bus, "high_network_1", "10000ps") )
link_bus_l3cache = sst.Link("link_bus_l3cache")
link_bus_l3cache.connect( (comp_n2_bus, "low_network_0", "10000ps"), (l3cache, "high_network_0", "10000ps") )
link_cache_net_0 = sst.Link("link_cache_net_0")
link_cache_net_0.connect( (l3cache, "directory", "10000ps"), (comp_chiprtr, "port1", "2000ps") )
link_dir_net_0 = sst.Link("link_dir_net_0")
link_dir_net_0.connect( (comp_chiprtr, "port0", "2000ps"), (comp_dirctrl, "network", "2000ps") )
link_dir_mem_link = sst.Link("link_dir_mem_link")
link_dir_mem_link.connect( (comp_dirctrl, "memory", "10000ps"), (comp_memctrl, "direct_link", "10000ps") )
link_dir_cramsim_link = sst.Link("link_dir_cramsim_link")
link_dir_cramsim_link.connect( (comp_memory, "cramsim_link", "2ns"), (comp_memhBridge, "cpuLink", "2ns") )
# memhBridge(=TxnGen) <-> Memory Controller
memHLink = sst.Link("memHLink_1")
memHLink.connect( (comp_memhBridge, "memLink", g_params["clockCycle"]), (comp_controller0, "txngenLink", g_params["clockCycle"]) )
# Controller <-> Dimm
cmdLink = sst.Link("cmdLink_1")
cmdLink.connect( (comp_controller0, "memLink", g_params["clockCycle"]), (comp_dimm0, "ctrlLink", g_params["clockCycle"]) )
# End of generated output.
``` |
{
"source": "jleighfields/electric_generation_planning",
"score": 2
} |
#### File: electric_generation_planning/src/LP_ortools_func.py
```python
def run_lp(run_name, inputs):
"""Run a linear program that minimizes costs with the constraints:
1. load must be served
2. battery state of charge limits
3. RE and hydro fixed profiles
This optimization does not consider an outside market, it only minimizes
costs to serve native load with native resources.
Keyword arguments:
peak_load -- peak load to scale load profile
min_obj -- objective to minimize, cost or co2
max_batt_mw -- max battery capacity to install
min_batt_mw -- min battery capacity to install
max_gas_mw -- max gas capacity to install
min_gas_mw -- min gas capacity to install
max_wind_mw -- max wind capacity to install
min_wind_mw -- min wind capacity to install
max_solar_mw -- max solar capacity to install
min_solar_mw -- min solar capacity to install
restrict_gas -- the maximum amount of gas generation as a percent of load
min_charge_level -- minimum charge level of batteries
init_ch_level -- initial charge level of batteries
batt_hours -- duration of hours for batteries
batt_eff -- efficiency of batteries
use_outside_energy -- use outside energy to meet load
outside_energy_cost -- cost of outside energy
gas_mw_cost -- gas fixed cost $/MW including carbon costs
gas_mwh_cost -- gas variable cost $/MWh including carbon costs
batt_mw_cost -- battery fixed cost $/MW including carbon costs
wind_mw_cost -- wind fixed costs $/MW including carbon costs
wind_mwh_cost -- wind variable costs $/MWh including carbon costs
solar_mw_cost -- solar fixed costs $/MW including carbon costs
solar_mwh_cost -- solar variable costs $/MWh including carbon costs
re_outage_start -- start date for RE outage stress test
re_outage_days -- number of days for RE outage
co2_cost -- cost of carbon emissions
gas_co2_ton_per_mwh -- emissions from energy generation
gas_co2_ton_per_mw -- emissions associated with construction, O&M, decommission
wind_co2_ton_per_mwh -- emissions from energy generation
wind_co2_ton_per_mw -- emissions associated with construction, O&M, decommission
solar_co2_ton_per_mwh -- emissions from energy generation
solar_co2_ton_per_mw -- emissions associated with construction, O&M, decommission
batt_co2_ton_per_mw -- emissions associated with construction, O&M, decommission
"""
import pandas as pd
import numpy as np
import seaborn as sns
import time
# import ortools
from ortools.linear_solver import pywraplp
import sys
########################################################
# set inputs for optimization
########################################################
peak_load = inputs['peak_load']
min_obj = inputs['min_obj']
max_batt_mw = inputs['max_batt_mw']
min_batt_mw = inputs['min_batt_mw']
max_gas_mw = inputs['max_gas_mw']
min_gas_mw = inputs['min_gas_mw']
max_wind_mw = inputs['max_wind_mw']
min_wind_mw = inputs['min_wind_mw']
max_solar_mw = inputs['max_solar_mw']
min_solar_mw = inputs['min_solar_mw']
restrict_gas = inputs['restrict_gas']
min_charge_level = inputs['min_charge_level']
init_ch_level = inputs['init_ch_level']
batt_hours = inputs['batt_hours']
batt_eff = inputs['batt_eff']
use_outside_energy = inputs['use_outside_energy']
outside_energy_cost = inputs['outside_energy_cost']
gas_mw_cost = inputs['gas_mw_cost']
gas_mwh_cost = inputs['gas_mwh_cost']
batt_mw_cost = inputs['batt_mw_cost']
wind_mw_cost = inputs['wind_mw_cost']
wind_mwh_cost = inputs['wind_mwh_cost']
solar_mw_cost = inputs['solar_mw_cost']
solar_mwh_cost = inputs['solar_mwh_cost']
re_outage_start = inputs['re_outage_start']
re_outage_days = inputs['re_outage_days']
co2_cost = inputs['co2_cost']
gas_co2_ton_per_mwh = inputs['gas_co2_ton_per_mwh']
gas_co2_ton_per_mw = inputs['gas_co2_ton_per_mw']
wind_co2_ton_per_mwh = inputs['wind_co2_ton_per_mwh']
wind_co2_ton_per_mw = inputs['wind_co2_ton_per_mw']
solar_co2_ton_per_mwh = inputs['solar_co2_ton_per_mwh']
solar_co2_ton_per_mw = inputs['solar_co2_ton_per_mw']
batt_co2_ton_per_mw = inputs['batt_co2_ton_per_mw']
# boolean for debug printing
debug_print = False
# restrict gas to a portion of total load, 0-1 or None
# e.g. 0.05 -> 5% limit on gas generation
# and 1.0 -> no limit on gas generation
# divide by 100 since input is in percentages
restrict_gas = restrict_gas / 100
# read profile data for load and re gen
df = pd.read_csv('src/profiles.csv', index_col='Hour')
df = df[df.index < 8760]
df['2030_load'] = df.load * peak_load
# get RE outage times index
outage_hours = pd.date_range(re_outage_start, periods=24 * re_outage_days, freq="H")
idx = np.isin(pd.date_range('2030-01-01', periods=8760, freq="H"), outage_hours, assume_unique=True)
# apply RE outages
df.loc[idx, ['solar', 'wind']] = 0
df.loc[idx, ['solar', 'wind']]
########################################################
# Build optimization model
# create decision variables and constraints
########################################################
# start timer
total_time_0 = time.time()
# Create the linear solver with the GLOP backend.
solver = pywraplp.Solver('simple_lp_program', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
# build capacity decision variables for the resources
batt = solver.NumVar(min_batt_mw, max_batt_mw, 'batt')
solar = solver.NumVar(min_solar_mw, max_solar_mw, 'solar')
wind = solver.NumVar(min_wind_mw, max_wind_mw, 'wind')
gas = solver.NumVar(min_gas_mw, max_gas_mw, 'gas')
print('Adding variables for build capacity')
print('Number of variables =', solver.NumVariables(), '\n')
# generation decision variables
print('Adding hourly variables and constraints')
t0 = time.time()
# create arrays to hold hourly varaibles
batt_ch = [None] * len(df.index)
batt_disch = [None] * len(df.index)
wind_gen = [None] * len(df.index)
solar_gen = [None] * len(df.index)
gas_gen = [None] * len(df.index)
hydro_gen = [None] * len(df.index)
SOC = [None] * len(df.index)
if use_outside_energy:
outside_energy = [None] * len(df.index)
t_h = time.time() # for tracking time in hourly loop
for h in df.index:
if h % 100 == 0 and debug_print:
print("h: ", h, "\tET: ", round(time.time() - t_h, 2))
t_h = time.time()
# add hourly decision variables
batt_ch[h] = solver.NumVar(0, solver.infinity(), 'batt_ch[{}]'.format(h))
batt_disch[h] = solver.NumVar(0, solver.infinity(), 'batt_disch[{}]'.format(h))
wind_gen[h] = solver.NumVar(0, solver.infinity(), 'wind_gen[{}]'.format(h))
solar_gen[h] = solver.NumVar(0, solver.infinity(), 'solar_gen[{}]'.format(h))
gas_gen[h] = solver.NumVar(0, solver.infinity(), 'gas_gen[{}]'.format(h))
hydro_gen[h] = solver.NumVar(0, solver.infinity(), 'hydro_gen[{}]'.format(h))
SOC[h] = solver.NumVar(0, solver.infinity(), 'SOC[{}]'.format(h))
if use_outside_energy:
outside_energy[h] = solver.NumVar(0, solver.infinity(), 'outside_energy[{}]'.format(h))
# add hourly constraints
# set SOC[h] equal to previous hour SOC
# plus the change from charging or discharging
if h == 0:
solver.Add(SOC[h] <= init_ch_level * 4 * batt + batt_ch[h] - batt_disch[h] / batt_eff)
solver.Add(SOC[h] >= init_ch_level * 4 * batt + batt_ch[h] - batt_disch[h] / batt_eff)
else:
solver.Add(SOC[h] <= SOC[h - 1] + batt_ch[h] - batt_disch[h] / batt_eff)
solver.Add(SOC[h] >= SOC[h - 1] + batt_ch[h] - batt_disch[h] / batt_eff)
# fix hourly hydro profile
solver.Add(hydro_gen[h] <= df.loc[h, 'hydro'])
solver.Add(hydro_gen[h] >= df.loc[h, 'hydro'])
# SOC mwh constraints
# max mwh constraint
solver.Add(SOC[h] <= batt_hours * batt)
# min mwh constraint
solver.Add(SOC[h] >= min_charge_level * batt_hours * batt)
# fix hourly RE gen profiles
solver.Add(solar_gen[h] >= df.solar[h] * solar)
solver.Add(solar_gen[h] <= df.solar[h] * solar)
solver.Add(wind_gen[h] >= df.wind[h] * wind)
solver.Add(wind_gen[h] <= df.wind[h] * wind)
# hourly demand constraints
# must be able to serve load
if use_outside_energy:
solver.Add(hydro_gen[h] +
solar_gen[h] + wind_gen[h] +
gas_gen[h] +
batt_disch[h] - batt_ch[h] +
outside_energy[h]
>= df['2030_load'][h])
# only import 20% of demand
solver.Add(outside_energy[h] <= 0.2 * df['2030_load'][h])
else:
solver.Add(hydro_gen[h] +
solar_gen[h] + wind_gen[h] +
gas_gen[h] +
batt_disch[h] - batt_ch[h]
>= df['2030_load'][h])
# hourly generation constraints base on installed capacity
solver.Add(batt_ch[h] <= batt)
solver.Add(batt_disch[h] <= batt)
solver.Add(gas_gen[h] <= gas)
# total gas gen constraint
if restrict_gas != None:
solver.Add(solver.Sum(gas_gen) <= restrict_gas * sum(df['2030_load']))
if use_outside_energy:
# no more than 5% total imports
solver.Add(solver.Sum(outside_energy) <= float(0.05 * df['2030_load'].sum()))
t1 = time.time()
print('time to build model (seconds): {0:,.2f}\n'.format((t1 - t0), 1))
print('Number of variables: {0:,}'.format(solver.NumVariables()))
print('Number of contraints: {0:,}'.format(solver.NumConstraints()))
print('\n', flush=True)
########################################################
# Build objective function
########################################################
objective = solver.Objective()
if min_obj == 'minimize cost':
# set the coefficients in the objective function for the capacity variables
objective.SetCoefficient(batt, batt_mw_cost)
objective.SetCoefficient(solar, solar_mw_cost)
objective.SetCoefficient(wind, wind_mw_cost)
objective.SetCoefficient(gas, gas_mw_cost)
# add energy costs
for h in df.index:
objective.SetCoefficient(gas_gen[h], gas_mwh_cost)
objective.SetCoefficient(wind_gen[h], wind_mwh_cost)
objective.SetCoefficient(solar_gen[h], solar_mwh_cost)
if use_outside_energy:
objective.SetCoefficient(outside_energy[h], outside_energy_cost)
else:
# set the co2 per mw coefficients
objective.SetCoefficient(batt, batt_co2_ton_per_mw)
objective.SetCoefficient(solar, solar_co2_ton_per_mw)
objective.SetCoefficient(wind, wind_co2_ton_per_mw)
objective.SetCoefficient(gas, gas_co2_ton_per_mw)
for h in df.index:
objective.SetCoefficient(gas_gen[h], gas_co2_ton_per_mwh)
objective.SetCoefficient(solar_gen[h], solar_co2_ton_per_mwh)
objective.SetCoefficient(wind_gen[h], wind_co2_ton_per_mwh)
# assume outside energy is worse than gas
if use_outside_energy:
objective.SetCoefficient(outside_energy[h], 2 * gas_co2_ton_per_mwh)
for h in df.index:
# disincentivize charging and discharging at the same time
# this removes hours that both charge and discharge
objective.SetCoefficient(batt_disch[h], 0.0000002)
# benefit to keeping the batteries charged
objective.SetCoefficient(SOC[h], -0.0000001)
# minimize the cost to serve the system
objective.SetMinimization()
########################################################
# solve the system
########################################################
print('Starting optimization...')
t0 = time.time()
status = solver.Solve()
t1 = time.time()
print('time to solve (minutes): {0:,.2f}\n'.format((t1 - t0) / 60, 1))
print('Solution is optimal: ', status == solver.OPTIMAL, '\n')
obj_val = objective.Value()
print('Solution:')
print('Objective value = {0:,.0f}\n'.format(obj_val))
print('Build variables:')
batt_mw = batt.solution_value()
solar_mw = solar.solution_value()
wind_mw = wind.solution_value()
gas_mw = gas.solution_value()
cap_mw = {'batt_mw': batt_mw,
'solar_mw': solar_mw,
'wind_mw': wind_mw,
'gas_mw': gas_mw}
# print results for build variables
for r in [batt, solar, wind, gas]:
print('{0} \t= {1:,.0f}'.format(r, r.solution_value()) + '\n')
print('\n')
########################################################
# get the solved values to return to user
########################################################
# create a new data frame to hold the final solution values
print('Gathering hourly data...\n')
final_df = df[['Date', '2030_load', 'solar', 'wind']].copy()
final_df['solar'] = final_df['solar'] * solar.solution_value()
final_df['wind'] = final_df['wind'] * wind.solution_value()
final_df['gas'] = 0
final_df['batt_charge'] = 0
final_df['batt_discharge'] = 0
final_df['SOC'] = 0
final_df['crsp'] = 0
final_df['lap'] = 0
if use_outside_energy: final_df['outside_energy'] = 0
# get the battery charge and discharge by hour
batt_ch_hourly = [None] * len(df.index)
batt_disch_hourly = [None] * len(df.index)
for h in df.index:
batt_ch_hourly[h] = batt_ch[h].solution_value()
batt_disch_hourly[h] = batt_disch[h].solution_value()
# get cumulative sums for calculating SOC
batt_ch_hourly = np.cumsum(batt_ch_hourly)
batt_disch_hourly = np.cumsum(batt_disch_hourly)
# get the hourly data
final_df['gas'] = [gas_gen[h].solution_value() for h in range(df.shape[0])]
final_df['batt_charge'] = [batt_ch[h].solution_value() for h in range(df.shape[0])]
final_df['batt_discharge'] = [batt_disch[h].solution_value() for h in range(df.shape[0])]
final_df['SOC'] = [SOC[h].solution_value() for h in range(df.shape[0])]
final_df['hydro'] = [hydro_gen[h].solution_value() for h in range(df.shape[0])]
if use_outside_energy:
final_df['outside_energy'] = [outside_energy[h].solution_value() for h in range(df.shape[0])]
# calc net load for a check on the results
if use_outside_energy:
final_df['net_load'] = round((final_df['hydro'] +
final_df['solar'] +
final_df['wind'] +
final_df['gas'] +
final_df['batt_discharge'] -
final_df['batt_charge'] +
final_df['outside_energy'] -
final_df['2030_load']), 2)
else:
final_df['net_load'] = round((final_df['hydro'] +
final_df['solar'] +
final_df['wind'] +
final_df['gas'] +
final_df['batt_discharge'] -
final_df['batt_charge'] -
final_df['2030_load']), 2)
final_df['load_and_charge'] = round((final_df['batt_charge'] +
final_df['2030_load']), 2)
# set the index to hours in 2030
final_df.set_index(
pd.date_range(start='2030-01-01 01:00:00', periods=final_df.shape[0], freq='h'),
inplace=True
)
# summarize the data
# print('Summary of hourly data:\n')
# print(final_df.describe().T)
# print('\n')
# this should be empty...
# print('Any negative net load? Should be empty...')
# print(final_df[(final_df.net_load < 0)].T)
# print('\n')
# this should be empty...
# print('Any hours with both charging and discharging? Should be empty...')
# print(final_df[(final_df.batt_discharge > 0) & (final_df.batt_charge > 0)].T)
# print('\n')
########################################################
# calculate metrics to return to the user
########################################################
metrics = {}
if use_outside_energy:
outside_energy_percent = 100 * final_df.outside_energy.sum() / final_df['2030_load'].sum()
print('Outside energy as a percentage of load: {0:,.3f}%\n'.format(outside_energy_percent))
total_outside_energy = 100 * final_df.outside_energy.sum()
print('Total outside energy: {0:,.2f} MWh\n'.format(total_outside_energy))
metrics['outside_energy_percent'] = outside_energy_percent
metrics['total_outside_energy'] = total_outside_energy
else:
outside_energy_percent = 0
print('Outside energy as a percentage of load: {0:,.3f}%\n'.format(outside_energy_percent))
total_outside_energy = 0
print('Total outside energy: {0:,.2f} MWh\n'.format(total_outside_energy))
metrics['outside_energy_percent'] = outside_energy_percent
metrics['total_outside_energy'] = total_outside_energy
gas_percent = 100 * final_df.gas.sum() / final_df['2030_load'].sum()
print('Gas generation as a percentage of load: {0:,.2f}%\n'.format(gas_percent))
metrics['gas_percent'] = gas_percent
re_percent = 100 * ((final_df.solar.sum() + final_df.wind.sum()) / final_df['2030_load'].sum())
print('RE generation as a percentage of load: {0:,.2f}%\n'.format(re_percent))
metrics['re_percent'] = re_percent
excess_gen_percent = 100 * (final_df.net_load.sum() / final_df['2030_load'].sum())
print('Excess generation as a percentage of load: {0:,.2f}%\n'.format(excess_gen_percent))
metrics['excess_gen_percent'] = excess_gen_percent
batt_efficiency = 100 * final_df.batt_discharge.sum() / final_df.batt_charge.sum()
print('Batt discharge as a percentage of batt charge: {0:,.2f}%\n'.format(batt_efficiency))
metrics['batt_efficiency'] = batt_efficiency
# calculate total co2 generation
gas_gen = final_df.gas.sum()
wind_gen = final_df.wind.sum()
solar_gen = final_df.solar.sum()
total_co2 = (
gas_co2_ton_per_mw * cap_mw['gas_mw'] + gas_co2_ton_per_mwh * gas_gen +
wind_co2_ton_per_mw * cap_mw['wind_mw'] + wind_co2_ton_per_mwh * wind_gen +
solar_co2_ton_per_mw * cap_mw['solar_mw'] + solar_co2_ton_per_mwh * solar_gen +
batt_co2_ton_per_mw * cap_mw['batt_mw']
)
total_co2_cost = total_co2 * co2_cost
metrics['total_co2_thou_tons'] = total_co2 / 1000
metrics['total_co2_cost_mill'] = total_co2_cost / 1000000
total_cost = (
gas_mw_cost * cap_mw['gas_mw'] + gas_mwh_cost * gas_gen +
wind_mw_cost * cap_mw['wind_mw'] + wind_mwh_cost * wind_gen +
solar_mw_cost * cap_mw['solar_mw'] + solar_mwh_cost * solar_gen +
batt_mw_cost * cap_mw['batt_mw']
)
metrics['total_cost_mill'] = total_cost / 1000000
metrics['total_gen_cost_mill'] = metrics['total_cost_mill'] - metrics['total_co2_cost_mill']
total_time_1 = time.time()
print('total time to build, solve, and verify (minutes): {0:,.2f}\n'.format((total_time_1 - total_time_0) / 60))
print('\n', flush=True)
# return dictionary for displaying results
return {'run_name': run_name,
'inputs': inputs,
'obj_val': obj_val,
'cap_mw': cap_mw,
'metrics': metrics,
'final_df': final_df}
# for testing
if __name__ == '__main__':
print('\n')
import datetime
from joblib import dump, load
# set up inputs for optimization
# select year for profiles
profile_year = 'mix'
use_outside_energy = True
outside_energy_cost = 10000
# restrict gas to a portion of total load, 0-1 or None
# e.g. 0.05 -> 5% limit on gas generation
# and 1.0 -> no limit on gas generation
restrict_gas = 20
# battery parameters
min_charge_level = 0.1
init_ch_level = 0.5
batt_hours = 4
batt_eff = 0.85
# cost of CO2
# C02 values from CSU study
gas_co2_ton_per_mwh = (411 + 854) / 2000
# assumed 20 year life
gas_co2_ton_per_mw = (5981 + 1000 + 35566 + 8210 + 10165 + 1425) / (6 * 18) / 20
wind_co2_ton_per_mwh = 0.2 / 2000
# assumed 20 year life
wind_co2_ton_per_mw = (754 + 10 - 241) / 20
solar_co2_ton_per_mwh = 2.1 / 2000
# assumed 20 year life
solar_co2_ton_per_mw = (1202 + 250 - 46) / 20
# battery C02 given in lbs
# assumed 15 year life
batt_co2_ton_per_mw = (1940400 - 83481 + 4903) / 2000 / 15
# Carbon 2030 $/ton = $9.06
# co2_cost = 9.06
co2_cost = 160
# calculate costs in $/MWh and $/MW
# these costs include the cost of carbon
cc_gas_mwh = co2_cost * gas_co2_ton_per_mwh
cc_gas_mw = co2_cost * gas_co2_ton_per_mw
cc_wind_mwh = co2_cost * wind_co2_ton_per_mwh
cc_wind_mw = co2_cost * wind_co2_ton_per_mw
cc_solar_mwh = co2_cost * solar_co2_ton_per_mwh
cc_solar_mw = co2_cost * solar_co2_ton_per_mw
cc_batt_mw = co2_cost * batt_co2_ton_per_mw
# capacity cost in $/kw-mo
gas_cap_cost = 11.27
gas_fixed_cost = cc_gas_mw + gas_cap_cost * 12 * 1000 # converted to $/MW-yr
heat_rate = 8883 # btu/kwh
vom = 7.16 # $/mwh
gas_fuel_cost = 4.37 # $/mmbtu
gas_variable_cost = cc_gas_mwh + gas_fuel_cost * heat_rate / 1000 + vom
batt_cost = 8.25
batt_fixed_cost = cc_batt_mw + batt_cost * 12 * 1000 # converted to $/MW-yr
wind_kw_mo = 1.13
wind_fixed_cost = cc_wind_mw + wind_kw_mo * 12 * 1000 # converted to $/MW-yr
wind_variable_cost = cc_wind_mwh + 41.01 # $/mwh
solar_kw_mo = 1.13
solar_fixed_cost = cc_solar_mw + solar_kw_mo * 12 * 1000 # converted to $/MW-yr
solar_variable_cost = cc_solar_mwh + 33.51 # $/mwh
inputs = {}
inputs['peak_load'] = 1000
inputs['min_obj'] = 'minimize cost'
inputs['max_batt_mw'] = 3000
inputs['min_batt_mw'] = 0
inputs['max_gas_mw'] = 1000
inputs['min_gas_mw'] = 0
inputs['max_wind_mw'] = 2000
inputs['min_wind_mw'] = 0
inputs['max_solar_mw'] = 3000
inputs['min_solar_mw'] = 0
inputs['restrict_gas'] = 20
inputs['min_charge_level'] = min_charge_level
inputs['init_ch_level'] = init_ch_level
inputs['batt_hours'] = batt_hours
inputs['batt_eff'] = batt_eff
inputs['use_outside_energy'] = use_outside_energy
inputs['outside_energy_cost'] = outside_energy_cost
inputs['gas_mw_cost'] = gas_fixed_cost
inputs['gas_mwh_cost'] = gas_variable_cost
inputs['batt_mw_cost'] = batt_fixed_cost
inputs['wind_mw_cost'] = wind_fixed_cost
inputs['wind_mwh_cost'] = wind_variable_cost
inputs['solar_mw_cost'] = solar_fixed_cost
inputs['solar_mwh_cost'] = solar_variable_cost
inputs['re_outage_start'] = datetime.date(2030, 7, 3)
inputs['re_outage_days'] = 3
inputs['co2_cost'] = co2_cost
inputs['gas_co2_ton_per_mwh'] = gas_co2_ton_per_mwh
inputs['gas_co2_ton_per_mw'] = gas_co2_ton_per_mw
inputs['wind_co2_ton_per_mwh'] = wind_co2_ton_per_mwh
inputs['wind_co2_ton_per_mw'] = wind_co2_ton_per_mw
inputs['solar_co2_ton_per_mwh'] = solar_co2_ton_per_mwh
inputs['solar_co2_ton_per_mw'] = solar_co2_ton_per_mw
inputs['batt_co2_ton_per_mw'] = batt_co2_ton_per_mw
results = run_lp(run_name='test', inputs=inputs)
print(results)
print('saving results')
dump(results, 'results.joblib')
# create 2nd run for testing db.py
results2 = load('results.joblib')
results['run_name'] = 'test2'
dump(results, 'results2.joblib')
print('Finished')
``` |
{
"source": "jleightcap/ScrollingScore",
"score": 3
} |
#### File: jleightcap/ScrollingScore/main.py
```python
import os, sys
from PIL import Image
### TERMINAL ARGUMENTS ###
# -h := help
# -q := quiet
# -single := forced one page per slide
args = sys.argv[1:]
# -h, print README and quit
if "-h" in args:
with open('./README.md') as f:
print(f.read())
quit()
# -q, toggle print statements
loud = True
if "-q" in args:
loud = False
# -single, toggle forced single image per slide
double = True
if "-single" in args:
double = False
def verifyDirectory(dirname):
if not os.path.isdir(dirname):
try:
os.mkdir(dirname)
except OSError:
if loud: print("Could not create {} directory.".format(dirname))
quit()
verifyDirectory('./Sheets')
if not os.listdir('./Sheets'): # Empty sheets directory
if loud: print("No images to convert.")
verifyDirectory('./Slides')
### IMAGE MANIPULATION ###
# Is better suited for a double slide (two tall images side by side)?
def isTall(img):
# img = Image.open(img)
return img.size[0]/img.size[1] < (16/9)
# White dimensioned BG image
def bgImg(size):
return Image.new('RGB', size, (255,255,255))
def singleImage(img):
W, H = img.size
if W/H > (16/9):
size = W, int((9/16) * W)
else:
size = int((16/9) * H), H
# size = tuple(buff*x for x in size)
imgBG = bgImg(size)
imgBG.paste(img, (int((size[0] - W) / 2), int((size[1] - H) /2))) # Centered on BG
return imgBG
def twoImage(img1, img2):
# img1 = Image.open('./Sheets/{}'.format(img1))
# img2 = Image.open('./Sheets/{}'.format(img2))
W1, H1 = img1.size
W2, H2 = img2.size
imgBG = bgImg((W1 + W2, max(H1, H2)))
if H1 < H2:
imgBG.paste(img1, (0,int((H2-H1)/2)))
imgBG.paste(img2, (W1,0))
else: # H1 = H2 reduces to either case.
imgBG.paste(img1, (0,0))
imgBG.paste(img2, (W1,int((H1-H2)/2)))
return singleImage(imgBG)
def main():
imageFormats = ('.jpg', '.png') # If adding image formats, check compatibility with PIL.
pages = list(filter(lambda x: x.endswith(imageFormats), sorted(os.listdir('./Sheets'))))
pages = list(map(lambda x: Image.open('./Sheets/{}'.format(x)), pages))
os.chdir('./Slides')
filenum = 0
if double:
while pages:
if not pages[1:]:
singleImage(pages[0]).save('{}.png'.format(filenum))
if loud: print('e',pages[0])
break
elif isTall(pages[0]) and isTall(pages[1]):
twoImage(pages[0], pages[1]).save('{}.png'.format(filenum))
if loud: print('d',pages[0],pages[1])
pages = pages[2:]
else:
singleImage(pages[0]).save('{}.png'.format(filenum))
if loud: print('s',pages[0])
filenum += 1
else: # -single
for page in pages:
singleImage(page).save('{}.png'.format(filenum))
filenum += 1
if __name__ == "__main__":
main()
``` |
{
"source": "jleimhofer/transnet",
"score": 2
} |
#### File: transnet/app/CimWriter.py
```python
import io
import logging
import re
import uuid
from collections import OrderedDict
from string import maketrans
from xml.dom.minidom import parse
import ast
import ogr
import osr
from CIM14.ENTSOE.Equipment.Core import BaseVoltage, GeographicalRegion, SubGeographicalRegion, ConnectivityNode, \
Terminal
from CIM14.ENTSOE.Equipment.LoadModel import LoadResponseCharacteristic
from CIM14.ENTSOE.Equipment.Wires import PowerTransformer, SynchronousMachine, TransformerWinding
from CIM14.IEC61968.Common import Location, PositionPoint
from CIM14.IEC61970.Core import Substation
from CIM14.IEC61970.Generation.Production import GeneratingUnit
from CIM14.IEC61970.Wires import ACLineSegment, EnergyConsumer
from PyCIM import cimwrite
from shapely.ops import linemerge
from LoadEstimator import LoadEstimator
from CSVWriter import CSVWriter
class CimWriter:
circuits = None
centroid = None
population_by_station_dict = ()
voltage_levels = None
id = 0
winding_types = ['primary', 'secondary', 'tertiary']
root = logging.getLogger()
base_voltages_dict = dict()
region = SubGeographicalRegion(Region=GeographicalRegion(name='EU'))
# osm id -> cim uuid
uuid_by_osmid_dict = dict()
# cim uuid -> cim object
cimobject_by_uuid_dict = OrderedDict()
# cim uuid -> cim connectivity node object
connectivity_by_uuid_dict = dict()
def __init__(self, circuits, centroid, population_by_station_dict, voltage_levels, country_name, count_substations):
self.circuits = circuits
self.centroid = centroid
self.population_by_station_dict = population_by_station_dict
self.voltage_levels = voltage_levels
self.base_voltages_dict = dict()
self.uuid_by_osmid_dict = dict()
self.cimobject_by_uuid_dict = OrderedDict()
self.connectivity_by_uuid_dict = dict()
self.country_name = country_name
self.count_substations = count_substations
self.root = logging.getLogger()
def publish(self, file_name):
self.region.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[self.region.UUID] = self.region
self.add_location(self.centroid.x, self.centroid.y, is_center=True)
total_line_length = 0
voltages = set()
cables = set()
wires = set()
types = set()
line_length = 0
for circuit in self.circuits:
station1 = circuit.members[0]
station2 = circuit.members[-1]
try:
for line_part in circuit.members[1:-1]:
tags_list = ast.literal_eval(str(line_part.tags))
line_tags = dict(zip(tags_list[::2], tags_list[1::2]))
line_tags_keys = line_tags.keys()
voltages.update([CSVWriter.try_parse_int(v) for v in line_part.voltage.split(';')])
if 'cables' in line_tags_keys:
cables.update([CSVWriter.try_parse_int(line_tags['cables'])])
if 'wires' in line_tags_keys:
wires.update(
CSVWriter.convert_wire_names_to_numbers(CSVWriter.sanitize_csv(line_tags['wires'])))
types.update([line_part.type])
line_length += line_part.length
except Exception as ex:
print('Error line_to_cim_param_extraction')
if 'station' in station1.type:
connectivity_node1 = self.substation_to_cim(station1, circuit.voltage)
elif 'plant' in station1.type or 'generator' in station1.type:
connectivity_node1 = self.generator_to_cim(station1, circuit.voltage)
else:
self.root.error('Invalid circuit! - Skip circuit')
circuit.print_circuit()
continue
if 'station' in station2.type:
connectivity_node2 = self.substation_to_cim(station2, circuit.voltage)
elif 'plant' in station2.type or 'generator' in station2.type:
connectivity_node2 = self.generator_to_cim(station2, circuit.voltage)
else:
self.root.error('Invalid circuit! - Skip circuit')
circuit.print_circuit()
continue
lines_wsg84 = []
line_length = 0
for line_wsg84 in circuit.members[1:-1]:
lines_wsg84.append(line_wsg84.geom)
line_length += line_wsg84.length
line_wsg84 = linemerge(lines_wsg84)
total_line_length += line_length
self.root.debug('Map line from (%lf,%lf) to (%lf,%lf) with length %s meters', station1.geom.centroid.y,
station1.geom.centroid.x, station2.geom.centroid.y, station2.geom.centroid.x,
str(line_length))
self.line_to_cim(connectivity_node1, connectivity_node2, line_length, circuit.name, circuit.voltage,
line_wsg84.centroid.y, line_wsg84.centroid.x, line_length, cables, voltages, wires)
# self.root.info('The inferred net\'s length is %s meters', str(total_line_length))
self.attach_loads()
cimwrite(self.cimobject_by_uuid_dict, file_name + '.xml', encoding='utf-8')
cimwrite(self.cimobject_by_uuid_dict, file_name + '.rdf', encoding='utf-8')
# pretty print cim file
xml = parse(file_name + '.xml')
pretty_xml_as_string = xml.toprettyxml(encoding='utf-8')
matches = re.findall('#x[0-9a-f]{4}', pretty_xml_as_string)
for match in matches:
pretty_xml_as_string = pretty_xml_as_string.replace(match, unichr(int(match[2:len(match)], 16)))
pretty_file = io.open(file_name + '_pretty.xml', 'w', encoding='utf8')
pretty_file.write(unicode(pretty_xml_as_string))
pretty_file.close()
def substation_to_cim(self, osm_substation, circuit_voltage):
transformer_winding = None
if osm_substation.id in self.uuid_by_osmid_dict:
self.root.debug('Substation with OSMID %s already covered', str(osm_substation.id))
cim_substation = self.cimobject_by_uuid_dict[self.uuid_by_osmid_dict[osm_substation.id]]
transformer = cim_substation.getEquipments()[0] # TODO check if there is actually one equipment
for winding in transformer.getTransformerWindings():
if int(circuit_voltage) == winding.ratedU:
self.root.debug('Transformer of Substation with OSMID %s already has winding for voltage %s',
str(osm_substation.id), circuit_voltage)
transformer_winding = winding
break
else:
self.root.debug('Create CIM Substation for OSMID %s', str(osm_substation.id))
cim_substation = Substation(name='SS_' + str(osm_substation.id), Region=self.region,
Location=self.add_location(osm_substation.lat, osm_substation.lon))
transformer = PowerTransformer(name='T_' + str(osm_substation.id) + '_' + CimWriter.escape_string(
osm_substation.voltage) + '_' + CimWriter.escape_string(osm_substation.name),
EquipmentContainer=cim_substation)
cim_substation.UUID = str(CimWriter.uuid())
transformer.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[cim_substation.UUID] = cim_substation
self.cimobject_by_uuid_dict[transformer.UUID] = transformer
self.uuid_by_osmid_dict[osm_substation.id] = cim_substation.UUID
if transformer_winding is None:
transformer_winding = self.add_transformer_winding(osm_substation.id, int(circuit_voltage), transformer)
return self.connectivity_by_uuid_dict[transformer_winding.UUID]
def generator_to_cim(self, generator, circuit_voltage):
if generator.id in self.uuid_by_osmid_dict:
self.root.debug('Generator with OSMID %s already covered', str(generator.id))
generating_unit = self.cimobject_by_uuid_dict[self.uuid_by_osmid_dict[generator.id]]
else:
self.root.debug('Create CIM Generator for OSMID %s', str(generator.id))
generating_unit = GeneratingUnit(name='G_' + str(generator.id), maxOperatingP=generator.nominal_power,
minOperatingP=0,
nominalP=generator.nominal_power if generator.nominal_power else '',
Location=self.add_location(generator.lat, generator.lon))
synchronous_machine = SynchronousMachine(
name='G_' + str(generator.id) + '_' + CimWriter.escape_string(generator.name),
operatingMode='generator', qPercent=0, x=0.01,
r=0.01, ratedS='' if generator.nominal_power is None else generator.nominal_power, type='generator',
GeneratingUnit=generating_unit, BaseVoltage=self.base_voltage(int(circuit_voltage)))
generating_unit.UUID = str(CimWriter.uuid())
synchronous_machine.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[generating_unit.UUID] = generating_unit
self.cimobject_by_uuid_dict[synchronous_machine.UUID] = synchronous_machine
self.uuid_by_osmid_dict[generator.id] = generating_unit.UUID
connectivity_node = ConnectivityNode(name='CN_' + str(generator.id) + '_' + circuit_voltage)
connectivity_node.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[connectivity_node.UUID] = connectivity_node
terminal = Terminal(ConnectivityNode=connectivity_node, ConductingEquipment=synchronous_machine,
sequenceNumber=1)
terminal.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[terminal.UUID] = terminal
self.connectivity_by_uuid_dict[generating_unit.UUID] = connectivity_node
return self.connectivity_by_uuid_dict[generating_unit.UUID]
def line_to_cim(self, connectivity_node1, connectivity_node2, length, name, circuit_voltage, lat, lon, line_length
, cables, voltages, wires):
r = 0.3257
x = 0.3153
# r0 = 0.5336
# x0 = 0.88025
r0 = 0
x0 = 0
coeffs_of_voltage = {
220000: dict(wires_typical=2.0, r=0.08, x=0.32, c=11.5, i=1.3),
380000: dict(wires_typical=4.0, r=0.025, x=0.25, c=13.7, i=2.6)
}
length_selected = round(line_length)
cables_selected = CSVWriter.convert_max_set_to_string(cables)
voltage_selected = CSVWriter.convert_max_set_to_string(voltages)
wires_selected = CSVWriter.convert_max_set_to_string(wires)
voltage_selected_round = 0
if 360000 <= int(voltage_selected) <= 400000:
voltage_selected_round = 380000
elif 180000 <= int(voltage_selected) <= 260000:
voltage_selected_round = 220000
try:
if length_selected and cables_selected and int(
voltage_selected_round) in coeffs_of_voltage and wires_selected:
coeffs = coeffs_of_voltage[int(voltage_selected_round)]
# Specific resistance of the transmission lines.
if coeffs['wires_typical']:
r = coeffs['r'] / (int(wires_selected) / coeffs['wires_typical']) / (
int(cables_selected) / 3.0)
# Specific reactance of the transmission lines.
x = coeffs['x'] / (int(wires_selected) / coeffs['wires_typical']) / (
int(cables_selected) / 3.0)
except Exception as ex:
print('Error line_to_cim')
line = ACLineSegment(
name=CimWriter.escape_string(name) + '_' + connectivity_node1.name + '_' + connectivity_node2.name, bch=0,
r=r, x=x, r0=r0, x0=x0, length=length, BaseVoltage=self.base_voltage(int(circuit_voltage)),
Location=self.add_location(lat, lon))
line.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[line.UUID] = line
terminal1 = Terminal(ConnectivityNode=connectivity_node1, ConductingEquipment=line, sequenceNumber=1)
terminal1.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[terminal1.UUID] = terminal1
terminal2 = Terminal(ConnectivityNode=connectivity_node2, ConductingEquipment=line, sequenceNumber=2)
terminal2.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[terminal2.UUID] = terminal2
@staticmethod
def uuid():
return uuid.uuid1()
def increase_winding_type(self, winding):
index = 0
for winding_type in self.winding_types:
if winding_type == winding.windingType:
winding.windingType = self.winding_types[index + 1]
break
index += 1
def add_transformer_winding(self, osm_substation_id, winding_voltage, transformer):
new_transformer_winding = TransformerWinding(name='TW_' + str(osm_substation_id) + '_' + str(winding_voltage),
b=0, x=1.0, r=1.0, connectionType='Yn',
ratedU=winding_voltage, ratedS=5000000,
BaseVoltage=self.base_voltage(winding_voltage))
# init with primary
index = 0
for winding in transformer.getTransformerWindings():
# already a primary winding with at least as high voltage as the new one
if winding.ratedU >= winding_voltage:
index += 1
else:
self.increase_winding_type(winding)
new_transformer_winding.windingType = self.winding_types[index]
new_transformer_winding.setPowerTransformer(transformer)
new_transformer_winding.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[new_transformer_winding.UUID] = new_transformer_winding
connectivity_node = ConnectivityNode(name='CN_' + str(osm_substation_id) + '_' + str(winding_voltage))
connectivity_node.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[connectivity_node.UUID] = connectivity_node
terminal = Terminal(ConnectivityNode=connectivity_node, ConductingEquipment=new_transformer_winding,
sequenceNumber=1)
terminal.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[terminal.UUID] = terminal
self.connectivity_by_uuid_dict[new_transformer_winding.UUID] = connectivity_node
return new_transformer_winding
def attach_loads(self):
for load in self.cimobject_by_uuid_dict.values():
if isinstance(load, PowerTransformer):
transformer = load
osm_substation_id = transformer.name.split('_')[1]
# self.root.info('Attach load to substation %s', osm_substation_id)
transformer_lower_voltage = CimWriter.determine_load_voltage(transformer)
self.attach_load(osm_substation_id, transformer_lower_voltage, transformer)
@staticmethod
def determine_load_voltage(transformer):
transformer_lower_voltage = transformer.getTransformerWindings()[0].ratedU
for winding in transformer.getTransformerWindings():
transformer_lower_voltage = winding.ratedU if winding.ratedU < transformer_lower_voltage \
else transformer_lower_voltage
return transformer_lower_voltage
def attach_load(self, osm_substation_id, winding_voltage, transformer):
transformer_winding = None
if len(transformer.getTransformerWindings()) >= 2:
for winding in transformer.getTransformerWindings():
if winding_voltage == winding.ratedU:
transformer_winding = winding
break
# add winding for lower voltage, if not already existing or
# add winding if sub-station is a switching station (only one voltage level)
if transformer_winding is None:
transformer_winding = self.add_transformer_winding(osm_substation_id, winding_voltage, transformer)
connectivity_node = self.connectivity_by_uuid_dict[transformer_winding.UUID]
estimated_load = LoadEstimator.estimate_load(self.population_by_station_dict[str(
osm_substation_id)]) if self.population_by_station_dict is not None else LoadEstimator.estimate_load_country(
self.country_name, self.count_substations)
load_response_characteristic = LoadResponseCharacteristic(exponentModel=False, pConstantPower=estimated_load)
load_response_characteristic.UUID = str(CimWriter.uuid())
energy_consumer = EnergyConsumer(name='L_' + osm_substation_id, LoadResponse=load_response_characteristic,
BaseVoltage=self.base_voltage(winding_voltage))
energy_consumer.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[load_response_characteristic.UUID] = load_response_characteristic
self.cimobject_by_uuid_dict[energy_consumer.UUID] = energy_consumer
terminal = Terminal(ConnectivityNode=connectivity_node, ConductingEquipment=energy_consumer,
sequenceNumber=1)
terminal.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[terminal.UUID] = terminal
@staticmethod
def escape_string(string):
if string is not None:
string = unicode(string.translate(maketrans('-]^$/. ', '_______')), 'utf-8')
hexstr = ''
for c in string:
if ord(c) > 127:
hexstr += "#x%04x" % ord(c)
else:
hexstr += c
return hexstr
return ''
def add_location(self, lat, lon, is_center=False):
pp = PositionPoint(yPosition=lat, xPosition=lon)
if is_center:
pp.zPosition = 1
pp.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[pp.UUID] = pp
location = Location(PositionPoints=[pp])
location.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[location.UUID] = location
return location
@staticmethod
def convert_mercator_to_wgs84(merc_lat, merc_lon):
# Spatial Reference System
input_epsg = 3857
output_epsg = 4326
# create a geometry from coordinates
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(merc_lon, merc_lat)
# create coordinate transformation
in_spatial_ref = osr.SpatialReference()
in_spatial_ref.ImportFromEPSG(input_epsg)
out_spatial_ref = osr.SpatialReference()
out_spatial_ref.ImportFromEPSG(output_epsg)
coord_transform = osr.CoordinateTransformation(in_spatial_ref, in_spatial_ref)
# transform point
point.Transform(coord_transform)
# return point in EPSG 4326
return point.GetY(), point.GetX()
def base_voltage(self, voltage):
if voltage in self.base_voltages_dict:
return self.base_voltages_dict[voltage]
base_voltage = BaseVoltage(nominalVoltage=voltage)
base_voltage.UUID = str(CimWriter.uuid())
self.cimobject_by_uuid_dict[base_voltage.UUID] = base_voltage
self.base_voltages_dict[voltage] = base_voltage
return base_voltage
```
#### File: transnet/app/CimWriterTest.py
```python
from datetime import datetime
from optparse import OptionParser
import psycopg2
from shapely import wkb
from CimWriter import CimWriter
from Circuit import Circuit
from Line import Line
from Station import Station
from Transnet import Transnet
# noinspection PyShadowingBuiltins,PyShadowingBuiltins,PyShadowingBuiltin
# noinspection PyShadowingBuiltins,PyShadowingBuiltins,PyShadowingBuiltins
# noinspection PyShadowingBuiltins,PyPep8,PyPep8,PyPep8,PyPep8,PyPep8,PyStringFormat
class CimWriterTest:
def __init__(self, database, user, host, port, password):
# Initializes the SciGRID class with the database connection parameters.
# These parameters are: database name, database user, database password, database host and port.
# Notice: The password will not be stored.
self.cur = self.conn.cursor()
self.conn = psycopg2.connect(password=password, **self.connection)
self.connection = {'database': database, 'user': user, 'host': host, 'port': port}
def get_connection_data(self):
# Obtain the database connection parameters.
return self.connection
def retrieve_relations(self):
circuits = []
sql = "SELECT parts FROM planet_osm_rels r1 WHERE ARRAY[27124619]::BIGINT[] <@ r1.parts AND hstore(r1.tags)->'voltage' ~ '110000|220000|380000' AND hstore(r1.tags)->'type'='route' AND hstore(r1.tags)->'route'='power'"
self.cur.execute(sql)
result = self.cur.fetchall()
for (parts,) in result:
relation = []
for part in parts:
sql = "SELECT hstore(tags)->'power' FROM planet_osm_ways WHERE id = " + str(part)
self.cur.execute(sql)
[(type,)] = self.cur.fetchall()
if 'station' in type:
sql = "SELECT id,create_polygon(id) AS geom, hstore(tags)->'power' AS type, hstore(tags)->'name' AS name, hstore(tags)->'ref' AS ref, hstore(tags)->'voltage' AS voltage, nodes, tags, ST_Y(ST_Transform(ST_Centroid(create_polygon(id)),4326)) AS lat, ST_X(ST_Transform(ST_Centroid(create_polygon(id)),4326)) AS lon FROM planet_osm_ways WHERE id = " + str(
part)
self.cur.execute(sql)
[(id, geom, type, name, ref, voltage, nodes, tags, lat, lon)] = self.cur.fetchall()
polygon = wkb.loads(geom, hex=True)
relation.append(Station(id, polygon, type, name, ref, voltage, nodes, tags, lat, lon, geom))
elif 'generator' in type or 'plant' in type:
sql = "SELECT id,create_polygon(id) AS geom, hstore(tags)->'power' AS type, hstore(tags)->'name' AS name, hstore(tags)->'ref' AS ref, hstore(tags)->'voltage' AS voltage, hstore(tags)->'plant:output:electricity' AS output1, hstore(tags)->'generator:output:electricity' AS output2, nodes, tags, ST_Y(ST_Transform(ST_Centroid(create_polygon(id)),4326)) AS lat, ST_X(ST_Transform(ST_Centroid(create_polygon(id)),4326)) AS lon FROM planet_osm_ways WHERE id = " + str(
part)
self.cur.execute(sql)
[(
id, geom, type, name, ref, voltage, output1, output2, nodes, tags, lat,
lon)] = self.cur.fetchall()
polygon = wkb.loads(geom, hex=True)
generator = Station(id, polygon, type, name, ref, voltage, nodes, tags, lat, lon, geom)
generator.nominal_power = Transnet.parse_power(
output1) if output1 is not None else Transnet.parse_power(output2)
relation.append(generator)
elif 'line' in type or 'cable' in type:
sql = "SELECT id, create_line(id) AS geom, hstore(tags)->'power' AS type, hstore(tags)->'name' AS name, hstore(tags)->'ref' AS ref, hstore(tags)->'voltage' AS voltage, hstore(tags)->'cables' AS cables, nodes, tags, ST_Y(ST_Transform(ST_Centroid(create_line(id)),4326)) AS lat, ST_X(ST_Transform(ST_Centroid(create_line(id)),4326)) AS lon FROM planet_osm_ways WHERE id = " + str(
part)
self.cur.execute(sql)
[(id, geom, type, name, ref, voltage, cables, nodes, tags, lat, lon)] = self.cur.fetchall()
line = wkb.loads(geom, hex=True)
relation.append(
Line(id, line, type, name, ref, voltage, cables, nodes, tags, lat, lon, None, None, None, geom))
else:
print('Unknown power tag ' + type)
sorted_relation = CimWriterTest.sort_relation(relation)
reference_line = CimWriterTest.get_reference_line(sorted_relation)
circuits.append(Circuit(sorted_relation, reference_line.voltage, reference_line.name, reference_line.ref))
for circuit in circuits:
circuit.print_circuit()
for circuit in circuits:
circuit.print_overpass()
print('CIM model generation started ...')
cim_writer = CimWriter(circuits, None, None, None)
cim_writer.publish('../results/cim')
return
@staticmethod
def get_reference_line(relation):
suspect1 = relation[1]
suspect2 = relation[len(relation) - 2]
if ',' in suspect1.voltage or ';' in suspect1.voltage:
return suspect2
return suspect1
@staticmethod
def sort_relation( unsorted_relation):
station1 = None
station2 = None
lines = []
for part in unsorted_relation:
if isinstance(part, Station):
if station1:
station2 = part
else:
station1 = part
else: # part is a line
lines.append(part)
sorted_circuit = []
sorted_circuit.extend(lines)
sorted_circuit.insert(0, station1)
sorted_circuit.append(station2)
return sorted_circuit
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-D", "--dbname", action="store", dest="dbname",
help="database name of the topology network")
parser.add_option("-H", "--dbhost", action="store", dest="dbhost",
help="database host address of the topology network")
parser.add_option("-P", "--dbport", action="store", dest="dbport",
help="database port of the topology network")
parser.add_option("-U", "--dbuser", action="store", dest="dbuser",
help="database user name of the topology network")
parser.add_option("-X", "--dbpwrd", action="store", dest="dbpwrd",
help="database user password of the topology network")
(options, args) = parser.parse_args()
# get connection data via command line or set to default values
dbname = options.dbname if options.dbname else 'power_de'
dbhost = options.dbhost if options.dbhost else '127.0.0.1'
dbport = options.dbport if options.dbport else '5432'
dbuser = options.dbuser if options.dbuser else 'postgres'
dbpwrd = options.dbpwrd if options.dbpwrd else '<PASSWORD>'
# Connect to DB
# noinspection PyBroadException
try:
CimWriterTest_instance = CimWriterTest(database=dbname, user=dbuser, port=dbport, host=dbhost, password=dbpwrd)
time = datetime.now()
CimWriterTest_instance.retrieve_relations()
print('Took ' + str(datetime.now() - time) + ' millies')
except Exception as e:
print "Could not connect to database. Please check the values of host,port,user,password, and database name."
parser.print_help()
exit()
```
#### File: transnet/app/Station.py
```python
from Way import Way
class Station(Way):
# all starting lines for which a circuit has already been extracted
covered_line_ids = None
# remember the connection to a station: map starting line to station
nominal_power = None # only used by generators
# for validation purposes
connected_stations = None
def __init__(self, _id, geom, _type, name, ref, voltage, nodes, tags, lat, lon, raw_geom):
Way.__init__(self, _id, geom, _type, name, ref, voltage, nodes, tags, lat, lon, raw_geom)
self.covered_line_ids = []
self.connected_stations = dict()
self.nominal_power = None
self.missing_voltage_estimate = None
self.missing_connection = False
def __str__(self):
return 'Station - ' + Way.__str__(self)
def add_connected_station(self, station_id, voltage):
if voltage not in self.connected_stations:
self.connected_stations[voltage] = set()
self.connected_stations[voltage].add(station_id)
def add_missing_data_estimation(self, voltage=None):
self.missing_voltage_estimate = voltage
def add_missing_connection(self):
self.missing_connection = True
def serialize(self):
station = {
'id': self.id,
'geom': str(self.geom),
'type': self.type,
'name': str(self.name),
'voltage': str(self.voltage),
'nodes': self.nodes,
'tags': str(self.tags),
'lat': str(self.lat),
'lon': str(self.lon),
'length': str(self.length()),
'raw_geom': str(self.raw_geom),
'nominal_power': str(self.nominal_power)
}
if self.missing_voltage_estimate:
station['estimated_voltage'] = self.missing_voltage_estimate
if self.missing_connection:
station['missing_connection'] = self.missing_connection
return station
```
#### File: transnet/app/Transnet.py
```python
import json
import logging
import sys
import urllib
from datetime import datetime
from optparse import OptionParser
from os import makedirs, remove
from os import walk
from os.path import dirname, getsize
from os.path import exists
from os.path import join
from subprocess import call
import psycopg2
import pyproj
from shapely import wkb, wkt
from shapely.geometry import MultiPoint, LinearRing
from CSVWriter import CSVWriter
from CimWriter import CimWriter
from Circuit import Circuit
from InferenceValidator import InferenceValidator
from Line import Line
from LoadEstimator import LoadEstimator
from Plotter import Plotter
from PolyParser import PolyParser
from Station import Station
root = logging.getLogger()
root.setLevel(logging.DEBUG)
class Transnet:
def __init__(self, _database, _user, _host, _port, _password, _ssid, _poly, _bpoly, _verbose, _validate,
_topology, _voltage_levels, _load_estimation, _destdir, _continent, _whole_planet, _find_missing_data,
_close_nodes, _overpass):
self.length_all = 0
self.all_lines = dict()
self.all_stations = dict()
self.all_power_planet = dict()
self.db_name = _database
self.ssid = _ssid
self.poly = _poly
self.bpoly = _bpoly
self.verbose = _verbose
self.validate = _validate
self.topology = _topology
self.voltage_levels = _voltage_levels
self.load_estimation = _load_estimation
self.destdir = _destdir
self.chose_continent = _continent
self.whole_planet = _whole_planet
self.find_missing_data = _find_missing_data
self.close_nodes = _close_nodes
self.overpass = _overpass
self.connection = {'database': _database, 'user': _user, 'host': _host, 'port': _port}
self.conn = psycopg2.connect(password=_password, **self.connection)
self.cur = self.conn.cursor()
self.covered_nodes = None
self.geod = pyproj.Geod(ellps='WGS84')
# noinspection PyMethodMayBeStatic
def prepare_poly_country(self, continent_name, country):
if not exists('../data/{0}/{1}/'.format(continent_name, country)):
makedirs('../data/{0}/{1}/'.format(continent_name, country))
root.info('Downloading poly for {0}'.format(country))
if continent_name == 'usa':
download_string = 'http://download.geofabrik.de/north-america/us/{0}.poly'.format(country)
root.info(download_string)
elif continent_name == 'germany':
download_string = 'http://download.geofabrik.de/europe/germany/{0}.poly'.format(country)
root.info(download_string)
else:
download_string = 'http://download.geofabrik.de/{0}/{1}.poly'.format(continent_name, country)
urllib.URLopener().retrieve(download_string, '../data/{0}/{1}/pfile.poly'.format(continent_name, country))
# noinspection PyMethodMayBeStatic
def prepare_poly_continent(self, continent_name):
if not exists('../data/planet/{0}/'.format(continent_name)):
makedirs('../data/planet/{0}/'.format(continent_name))
root.info('Downloading poly for {0}'.format(continent_name))
if continent_name == 'usa':
download_string = 'http://svn.openstreetmap.org/applications/utils/' \
'osm-extract/polygons/united_states_inc_ak_and_hi.poly'
elif continent_name == 'germany':
download_string = 'http://download.geofabrik.de/europe/germany.poly'
else:
download_string = 'http://download.geofabrik.de/{0}.poly'.format(continent_name)
urllib.URLopener().retrieve(download_string, '../data/planet/{0}/pfile.poly'.format(continent_name))
def reset_params(self):
self.covered_nodes = None
def create_relations(self, stations, lines, _ssid, voltage):
# root.info('\nStart inference for Substation %s', str(ssid))
relations = []
relations.extend(self.infer_relations(stations, lines, stations[_ssid]))
circuits = []
for relation in relations:
# at least two end points + one line
if self.num_subs_in_relation(relation) == 2 and len(relation) >= 3:
first_line = relation[1]
station1 = relation[0]
station2 = relation[-1]
station1.add_connected_station(station2.id, voltage)
station2.add_connected_station(station1.id, voltage)
circuit = Circuit(relation, voltage, first_line.name, first_line.ref)
circuits.append(circuit)
return circuits
# inferences circuits around a given station
# station - represents the station to infer circuits for
# stations - dict of all possibly connected stations
# lines - list of all lines that could connect stations
def infer_relations(self, stations, lines, station):
# find lines that cross the station's area - note that
# the end point of the line has to be within the substation for valid crossing
relations = []
for line in lines.values():
node_to_continue_id = None
# here it checks to find the intersecting lines and station, if no intersecting found then looks for line
# nodes with distance less than 50 meters
if self.node_intersect_with_any_station(line.end_point_dict[line.first_node()], [station]):
node_to_continue_id = line.last_node()
elif self.node_intersect_with_any_station(line.end_point_dict[line.last_node()], [station]):
node_to_continue_id = line.first_node()
if self.close_nodes and self.node_within_distance_any_station(line.end_point_dict[line.first_node()],
[station]):
node_to_continue_id = line.last_node()
elif self.close_nodes and self.node_within_distance_any_station(line.end_point_dict[line.last_node()],
[station]):
node_to_continue_id = line.first_node()
if node_to_continue_id:
self.covered_nodes = set(line.nodes)
self.covered_nodes.remove(node_to_continue_id)
if line.id in station.covered_line_ids:
root.debug('Relation with %s at %s already covered', str(line), str(station))
continue
root.debug('%s', str(station))
root.debug('%s', str(line))
station.covered_line_ids.append(line.id)
# init new circuit
# here we have the beginning of the relation which is one station with one line connected to it
relation = [station, line]
relations.extend(
self.infer_relation(stations, lines, relation, node_to_continue_id, line))
return relations
# recursive function that infers electricity circuits
# circuit - sorted member array
# line - line of circuit
# stations - all known stations
def infer_relation(self, stations, lines, relation, node_to_continue_id, from_line):
relation = list(relation) # make a copy
start_station = relation[0]
# here also check for intersection
station_id = self.node_intersect_with_any_station(
from_line.end_point_dict[node_to_continue_id], stations.values())
if not station_id and self.close_nodes:
self.node_within_distance_any_station(
from_line.end_point_dict[node_to_continue_id], stations.values())
if station_id and station_id == start_station.id: # if node to continue is at the starting station --> LOOP
root.debug('Encountered loop: %s', self.to_overpass_string(relation))
return []
elif station_id and station_id != start_station.id:
# if a node is within another station --> FOUND THE 2nd ENDPOINT
station = stations[station_id]
root.debug('%s', str(station))
if from_line.id in station.covered_line_ids:
root.debug('Relation with %s at %s already covered', str(from_line), str(station))
return []
station.covered_line_ids.append(from_line.id)
relation.append(station)
root.debug('Could obtain relation')
return [list(relation)]
# no endpoints encountered - handle line subsection
# at first find all lines that cover the node to continue
relations = []
for line in lines.values():
if from_line.end_point_dict[node_to_continue_id].intersects(line.geom):
if line.id == from_line.id:
continue
root.debug('%s', str(line))
if from_line.end_point_dict[node_to_continue_id].intersects(line.end_point_dict[line.first_node()]):
new_node_to_continue_id = line.last_node()
else:
new_node_to_continue_id = line.first_node()
if new_node_to_continue_id in self.covered_nodes:
relation.append(line)
root.debug('Encountered loop - stopping inference at line (%s): %s', str(line.id),
self.to_overpass_string(relation))
relation.remove(line)
self.covered_nodes.update(line.nodes)
continue
relation_copy = list(relation)
relation_copy.append(line)
self.covered_nodes.update(line.nodes)
self.covered_nodes.remove(new_node_to_continue_id)
relations.extend(self.infer_relation(stations, lines, relation_copy, new_node_to_continue_id, line))
# if not relations:
# root.debug('Could not obtain circuit')
return relations
# noinspection PyMethodMayBeStatic
def to_overpass_string(self, relation):
overpass = ''
for member in relation:
overpass += 'way(' + str(member.id) + ');(._;>;);out;'
return overpass
# noinspection PyMethodMayBeStatic
def circuit_to_overpass_string(self, circuit):
overpass = ''
for member in circuit.members:
overpass += 'way(' + str(member.id) + ');(._;>;);out;'
return overpass
# returns if node is in station
# noinspection PyMethodMayBeStatic
def node_intersect_with_any_station(self, node, stations):
for station in stations:
if node.intersects(station.geom):
return station.id
return None
# returns if node is within curtain distance
def node_within_distance_any_station(self, node, stations):
for station in stations:
distance = self.get_node_station_ditance(node, station)
if distance and distance < 50:
return station.id
return None
def get_node_station_ditance(self, node, station):
pol_ext = LinearRing(station.geom.exterior.coords)
touch_node = pol_ext.interpolate(pol_ext.project(node))
angle1, angle2, distance = self.geod.inv(touch_node.coords.xy[0], touch_node.coords.xy[1],
node.coords.xy[0], node.coords.xy[1])
if distance and len(distance):
return distance[0]
return None
# noinspection PyMethodMayBeStatic
def num_subs_in_relation(self, relation):
num_stations = 0
for way in relation:
if isinstance(way, Station):
num_stations += 1
return num_stations
# noinspection PyMethodMayBeStatic
def get_close_components(self, components, center_component):
close_components = dict()
for component in components:
distance = center_component.geom.centroid.distance(component.geom.centroid)
if distance <= 300000:
close_components[component.id] = component
return close_components
# noinspection PyMethodMayBeStatic
def parse_power(self, power_string):
if not power_string:
return None
power_string = power_string.replace(',', '.').replace('W', '')
try:
if 'k' in power_string:
tokens = power_string.split('k')
return float(tokens[0].strip()) * 1000
elif 'K' in power_string:
tokens = power_string.split('K')
return float(tokens[0].strip()) * 1000
elif 'm' in power_string:
tokens = power_string.split('m')
return float(tokens[0].strip()) * 1000000
elif 'M' in power_string:
tokens = power_string.split('M')
return float(tokens[0].strip()) * 1000000
elif 'g' in power_string:
tokens = power_string.split('g')
return float(tokens[0].strip()) * 1000000000
elif 'G' in power_string:
tokens = power_string.split('G')
return float(tokens[0].strip()) * 1000000000
else:
return float(power_string.strip())
except ValueError:
root.debug('Could not extract power from string %s', power_string)
return None
def create_relations_of_region(self, substations, generators, lines, voltage):
stations = substations.copy()
stations.update(generators)
circuits = []
for substation_id in substations.keys():
close_stations_dict = self.get_close_components(stations.values(), stations[substation_id])
close_lines_dict = self.get_close_components(lines.values(), stations[substation_id])
circuits.extend(self.create_relations(close_stations_dict, close_lines_dict, substation_id, voltage))
return circuits
# noinspection PyMethodMayBeStatic
def remove_duplicates(self, circuits):
root.info('Remove duplicates from %s circuits', str(len(circuits)))
covered_connections = []
filtered_circuits = []
total_line_length = 0
for circuit in circuits:
station1 = circuit.members[0]
station2 = circuit.members[-1]
for line in circuit.members[1:-1]:
total_line_length += line.length
if str(station1.id) + str(station2.id) + str(circuit.voltage) in covered_connections \
or str(station2.id) + str(station1.id) + str(circuit.voltage) in covered_connections:
continue
covered_connections.append(str(station1.id) + str(station2.id) + str(circuit.voltage))
filtered_circuits.append(circuit)
root.info('%s circuits remain', str(len(filtered_circuits)))
root.info('Line length with duplicates is %s meters', str(total_line_length))
return filtered_circuits
@staticmethod
def run_matlab_for_continent(matlab_command, continent_folder, root_log):
matlab_dir = join(dirname(__file__), '../matlab')
try:
log_dir = join(dirname(__file__), '../logs/planet/{0}'.format(continent_folder))
if not exists(log_dir):
makedirs(log_dir)
command = 'cd {0} && {1} -r "transform planet/{2};quit;"| tee ../logs/planet/{2}/transnet_matlab.log' \
.format(matlab_dir, matlab_command, continent)
root_log.info('running MATLAB modeling for {0}'.format(continent_folder))
return_code = call(command, shell=True)
root_log.info('MATLAB return code {0}'.format(return_code))
except Exception as ex:
root_log.error(ex.message)
@staticmethod
def run_matlab_for_countries(matlab_command, continent_folder, root_log):
dirs = [x[0] for x in walk(join(dirname(__file__), '../../transnet-models/{0}/'.format(continent_folder)))]
matlab_dir = join(dirname(__file__), '../matlab')
for DIR in dirs[1:]:
try:
country = DIR.split('/')[-1]
log_dir = join(dirname(__file__), '../logs/{0}/{1}'.format(continent_folder, country))
if not exists(log_dir):
makedirs(log_dir)
command = 'cd {0} && {1} -r "transform {2}/{3};quit;"| tee ../logs/{2}/{3}/transnet_matlab.log' \
.format(matlab_dir, matlab_command, continent, country)
root_log.info('running MATLAB modeling for {0}'.format(country))
return_code = call(command, shell=True)
root_log.info('MATLAB return code {0}'.format(return_code))
except Exception as ex:
root_log.error(ex.message)
# noinspection PyMethodMayBeStatic
def try_parse_int(self, string):
try:
return int(string)
except ValueError:
return 0
# noinspection PyMethodMayBeStatic
def convert_size_mega_byte(self, size):
return size / 1048576.0
def prepare_continent_json(self, continent_name):
with open('meta/{0}.json'.format(continent_name), 'r+') as continent_file:
continent_json = json.load(continent_file)
for country in continent_json:
self.prepare_poly_country(continent_name, country)
boundary = PolyParser.poly_to_polygon('../data/{0}/{1}/pfile.poly'.format(continent_name, country))
where_clause = "st_intersects(l.way, st_transform(st_geomfromtext('" + boundary.wkt + "',4269),3857))"
query = '''SELECT DISTINCT(voltage) AS voltage, count(*)
AS num FROM planet_osm_line l WHERE %s
GROUP BY voltage ORDER BY num DESC''' % where_clause
continent_json[country]['voltages'] = self.get_voltages_from_query(query=query)
continent_file.seek(0)
continent_file.write(json.dumps(continent_json, indent=4))
continent_file.truncate()
def prepare_planet_json(self, continent_name):
with open('meta/planet.json'.format(continent_name), 'r+') as continent_file:
continent_json = json.load(continent_file)
self.prepare_poly_continent(continent_name)
query = '''SELECT DISTINCT(voltage) AS voltage, count(*) AS num
FROM planet_osm_line l
GROUP BY voltage ORDER BY num DESC'''
continent_json[continent_name]['voltages'] = self.get_voltages_from_query(query=query)
continent_file.seek(0)
continent_file.write(json.dumps(continent_json, indent=4))
continent_file.truncate()
def get_voltages_from_query(self, query):
voltages = set()
voltages_string = ''
first_round = True
self.cur.execute(query)
result = self.cur.fetchall()
for (voltage, num) in result:
if num > 30 and voltage:
raw_voltages = [self.try_parse_int(x) for x in str(voltage).strip().split(';')]
voltages = voltages.union(set(raw_voltages))
for voltage in sorted(voltages):
if voltage > 99999:
if first_round:
voltages_string += str(voltage)
first_round = False
else:
voltages_string += '|' + str(voltage)
return voltages_string
def export_to_json(self, all_circuits):
try:
with open('{0}/relations.json'.format(self.destdir), 'w') as outfile:
json.dump([c.serialize() for c in all_circuits], outfile, indent=4)
file_size = self.convert_size_mega_byte(getsize('{0}/relations.json'.format(self.destdir)))
if file_size >= 100:
command = 'split --bytes=50M {0}/relations.json {0}/_relations'.format(self.destdir)
return_code = call(command, shell=True)
root.info('Relation file split return {0}'.format(return_code))
remove('{0}/relations.json'.format(self.destdir))
except Exception as ex:
root.error(ex.message)
def inference_for_voltage(self, voltage_level, where_clause, length_found_lines, equipment_points, all_substations,
all_generators, boundary):
root.info('Infer net for voltage level %sV', voltage_level)
substations = dict()
generators = dict()
lines = dict()
# create lines dictionary
sql = '''SELECT l.osm_id AS id,
st_transform(create_line(l.osm_id), 4326) AS geom,
l.way AS srs_geom,
l.power AS type,
l.name,
l.ref,
l.voltage,
l.cables,
w.nodes,
w.tags,
st_transform(create_point(w.nodes[1]), 4326) AS first_node_geom,
st_transform(create_point(w.nodes[array_length(w.nodes, 1)]), 4326) AS last_node_geom,
ST_Y(ST_Transform(ST_Centroid(l.way),4326)) AS lat,
ST_X(ST_Transform(ST_Centroid(l.way),4326)) AS lon,
st_length(st_transform(l.way, 4326), TRUE) AS spheric_length
FROM planet_osm_line l, planet_osm_ways w
WHERE l.osm_id >= 0
AND l.power ~ 'line|cable|minor_line'
AND l.voltage ~ '%s'
AND l.osm_id = w.id AND %s''' % (voltage_level, where_clause)
self.cur.execute(sql)
result = self.cur.fetchall()
# noinspection PyShadowingBuiltins,PyShadowingBuiltins
for (id, geom, srs_geom, type, name, ref, voltage, cables, nodes, tags, first_node_geom, last_node_geom,
lat, lon, length) in result:
line = wkb.loads(geom, hex=True)
raw_geom = geom
srs_line = wkb.loads(srs_geom, hex=True)
length_found_lines += length
first_node = wkb.loads(first_node_geom, hex=True)
last_node = wkb.loads(last_node_geom, hex=True)
end_points_geom_dict = dict()
end_points_geom_dict[nodes[0]] = first_node
end_points_geom_dict[nodes[-1]] = last_node
lines[id] = Line(id, line, srs_line, type, name.replace(',', ';') if name else None,
ref.replace(',', ';') if ref is not None else None,
voltage.replace(',', ';').replace('/', ';') if voltage else None, cables,
nodes, tags, lat, lon,
end_points_geom_dict, length, raw_geom)
equipment_points.append((lat, lon))
root.info('Found %s lines', str(len(result)))
# create station dictionary by quering only ways
sql = '''SELECT DISTINCT(p.osm_id) AS id,
st_transform(p.way, 4326) AS geom,
p.power AS type,
p.name,
p.ref,
p.voltage,
p.tags,
ST_Y(ST_Transform(ST_Centroid(p.way),4326)) AS lat,
ST_X(ST_Transform(ST_Centroid(p.way),4326)) AS lon
FROM planet_osm_line l, planet_osm_polygon p
WHERE l.osm_id >= 0
AND p.osm_id >= 0
AND p.power ~ 'substation|station|sub_station'
AND (p.voltage ~ '%s' OR (p.voltage = '') IS NOT FALSE)
AND l.power ~ 'line|cable|minor_line'
AND l.voltage ~ '%s' AND %s''' % (self.voltage_levels, voltage_level, where_clause)
if self.close_nodes:
sql += ''' AND (st_intersects(l.way, p.way) OR st_distance(l.way, p.way) < 100)'''
else:
sql += ''' AND st_intersects(l.way, p.way)'''
self.cur.execute(sql)
result = self.cur.fetchall()
# noinspection PyShadowingBuiltins,PyShadowingBuiltins
for (id, geom, type, name, ref, voltage, tags, lat, lon) in result:
if id not in all_substations:
polygon = wkb.loads(geom, hex=True)
raw_geom = geom
substations[id] = Station(id, polygon, type, name, ref,
voltage.replace(',', ';').replace('/', ';') if voltage else None,
None, tags, lat, lon, raw_geom)
equipment_points.append((lat, lon))
else:
substations[id] = all_substations[id]
root.info('Found %s stations', str(len(equipment_points)))
# add power plants with area
sql = '''SELECT DISTINCT(p.osm_id) AS id,
st_transform(p.way, 4326) AS geom,
p.power AS type,
p.name,
p.ref,
p.voltage,
p.\"plant:output:electricity\" AS output1,
p.\"generator:output:electricity\" AS output2,
p.tags,
ST_Y(ST_Transform(ST_Centroid(p.way),4326)) AS lat,
ST_X(ST_Transform(ST_Centroid(p.way),4326)) AS lon
FROM planet_osm_line l, planet_osm_polygon p
WHERE l.osm_id >= 0
AND p.osm_id >= 0
AND p.power ~ 'plant|generator'
AND l.power ~ 'line|cable|minor_line'
AND l.voltage ~ '%s' AND %s''' % (voltage_level, where_clause)
if self.close_nodes:
sql += ''' AND (st_intersects(l.way, p.way) OR st_distance(l.way, p.way) < 100)'''
else:
sql += ''' AND st_intersects(l.way, p.way)'''
self.cur.execute(sql)
result = self.cur.fetchall()
# noinspection PyShadowingBuiltins,PyShadowingBuiltins
for (id, geom, type, name, ref, voltage, output1, output2, tags, lat, lon) in result:
if id not in all_generators:
polygon = wkb.loads(geom, hex=True)
raw_geom = geom
generators[id] = Station(id, polygon, type, name, ref,
voltage.replace(',', ';').replace('/', ';') if voltage else None,
None, tags, lat, lon, raw_geom)
generators[id].nominal_power = self.parse_power(
output1) if output1 is not None else self.parse_power(output2)
equipment_points.append((lat, lon))
else:
generators[id] = all_generators[id]
root.info('Found %s generators', str(len(generators)))
if boundary:
circuits = self.create_relations_of_region(substations, generators, lines, voltage_level)
else:
stations = substations.copy()
stations.update(generators)
circuits = self.create_relations(stations, lines, self.ssid, voltage_level)
return length_found_lines, equipment_points, generators, substations, circuits
def find_missing_data_for_country(self):
root.info('Finding missing data')
if not exists(self.destdir):
makedirs(self.destdir)
if self.poly:
boundary = PolyParser.poly_to_polygon(self.poly)
where_clause = "st_intersects(l.way, st_transform(st_geomfromtext('" + boundary.wkt + "',4269),3857))"
where_clause_station = "st_intersects(p.way, st_transform(st_geomfromtext('" + \
boundary.wkt + "',4269),3857))"
elif self.bpoly:
boundary = wkt.loads(self.bpoly)
where_clause = "st_intersects(l.way, st_transform(st_geomfromtext('" + boundary.wkt + "',4269),3857))"
where_clause_station = "st_intersects(p.way, st_transform(st_geomfromtext('" + \
boundary.wkt + "',4269),3857))"
else:
where_clause = "st_distance(l.way, (select way from planet_osm_polygon where osm_id = " + str(
self.ssid) + ")) <= 300000"
where_clause_station = "st_distance(p.way, (select way from planet_osm_polygon where osm_id = " + str(
self.ssid) + ")) <= 300000"
voltages_line = set()
voltages_cable = set()
voltages_minor_line = set()
line_voltage_query = '''SELECT DISTINCT(voltage) AS voltage, power as power_type, count(*) AS num
FROM planet_osm_line l WHERE %s
GROUP BY power, voltage''' % where_clause
self.cur.execute(line_voltage_query)
result_voltages = self.cur.fetchall()
for (voltage, power_type, num) in result_voltages:
if num > 30 and voltage:
raw_voltages = [self.try_parse_int(x) for x in str(voltage).strip().split(';')]
if power_type == 'line':
voltages_line = voltages_line.union(set(raw_voltages))
elif power_type == 'cable':
voltages_cable = voltages_cable.union(set(raw_voltages))
elif power_type == 'minor_line':
voltages_minor_line = voltages_minor_line.union(set(raw_voltages))
cables_line = set()
cables_cable = set()
cables_minor_line = set()
line_cables_query = '''SELECT DISTINCT(cables) AS cables, power as power_type, count(*) AS num
FROM planet_osm_line l WHERE %s
GROUP BY power, cables''' % where_clause
self.cur.execute(line_cables_query)
result_cables = self.cur.fetchall()
for (cables, power_type, num) in result_cables:
if num > 30 and cables:
raw_cables = [self.try_parse_int(x) for x in str(cables).strip().split(';')]
if power_type == 'line':
cables_line = cables_line.union(set(raw_cables))
elif power_type == 'cable':
cables_cable = cables_cable.union(set(raw_cables))
elif power_type == 'minor_line':
cables_minor_line = cables_minor_line.union(set(raw_cables))
voltages_line_str = ';'.join([str(x) for x in voltages_line])
cables_line_str = ';'.join([str(x) for x in cables_line])
voltages_cable_str = ';'.join([str(x) for x in voltages_cable])
cables_cable_str = ';'.join([str(x) for x in cables_cable])
voltages_minor_line_str = ';'.join([str(x) for x in voltages_minor_line])
cables_minor_line_str = ';'.join([str(x) for x in cables_minor_line])
lines = dict()
lines_sql = '''SELECT l.osm_id AS osm_id,
st_transform(create_line(l.osm_id), 4326) AS geom,
l.way AS srs_geom, l.power AS power_type,
l.name, l.ref, l.voltage, l.cables, w.nodes, w.tags,
st_transform(create_point(w.nodes[1]), 4326) AS first_node_geom,
st_transform(create_point(w.nodes[array_length(w.nodes, 1)]), 4326) AS last_node_geom,
ST_Y(ST_Transform(ST_Centroid(l.way),4326)) AS lat,
ST_X(ST_Transform(ST_Centroid(l.way),4326)) AS lon,
st_length(st_transform(l.way, 4326), TRUE) AS spheric_length
FROM planet_osm_line l, planet_osm_ways w
WHERE l.osm_id >= 0 AND l.power ~ 'line|cable|minor_line'
AND (l.voltage IS NULL OR l.cables IS NULL) AND l.osm_id = w.id AND %s''' % where_clause
self.cur.execute(lines_sql)
lines_result = self.cur.fetchall()
for (osm_id, geom, srs_geom, power_type, name, ref, voltage, cables, nodes, tags, first_node_geom,
last_node_geom, lat, lon, length) in lines_result:
line = wkb.loads(geom, hex=True)
raw_geom = geom
srs_line = wkb.loads(srs_geom, hex=True)
first_node = wkb.loads(first_node_geom, hex=True)
last_node = wkb.loads(last_node_geom, hex=True)
end_points_geom_dict = dict()
end_points_geom_dict[nodes[0]] = first_node
end_points_geom_dict[nodes[-1]] = last_node
temp_line = Line(osm_id, line, srs_line, power_type, name.replace(',', ';') if name else None,
ref.replace(',', ';') if ref is not None else None,
voltage.replace(',', ';').replace('/', ';') if voltage else None, cables,
nodes, tags, lat, lon,
end_points_geom_dict, length, raw_geom)
if power_type == 'line':
temp_line.add_missing_data_estimation(voltage=voltages_line_str, cables=cables_line_str)
elif power_type == 'cable':
temp_line.add_missing_data_estimation(voltage=voltages_cable_str, cables=cables_cable_str)
elif power_type == 'minor_line':
temp_line.add_missing_data_estimation(voltage=voltages_minor_line_str, cables=cables_minor_line_str)
if power_type in ['line', 'cable', 'minor_line']:
lines[osm_id] = temp_line
with open('{0}/lines_missing_data.json'.format(self.destdir), 'w') as outfile:
json.dump([l.serialize() for osm_id, l in lines.iteritems()], outfile, indent=4)
file_size = self.convert_size_mega_byte(getsize('{0}/lines_missing_data.json'.format(self.destdir)))
if file_size >= 100:
command = 'split --bytes=50M {0}/lines_missing_data.json {0}/_lines_missing_data'.format(self.destdir)
return_code = call(command, shell=True)
root.info('Lines Missing Data file split return {0}'.format(return_code))
remove('{0}/lines_missing_data.json'.format(self.destdir))
stations_missing_connections_sql = '''SELECT DISTINCT
p.osm_id AS osm_id,
st_transform(p.way, 4326) AS geom,
p.power AS power_type,
p.name,
p.ref,
p.voltage,
p.tags,
ST_Y(ST_Transform(ST_Centroid(p.way), 4326)) AS lat,
ST_X(ST_Transform(ST_Centroid(p.way), 4326)) AS lon
FROM planet_osm_polygon p
WHERE %s
EXCEPT
SELECT DISTINCT
p.osm_id AS osm_id,
st_transform(p.way, 4326) AS geom,
p.power AS power_type,
p.name,
p.ref,
p.voltage,
p.tags,
ST_Y(ST_Transform(ST_Centroid(p.way), 4326)) AS lat,
ST_X(ST_Transform(ST_Centroid(p.way), 4326)) AS lon
FROM planet_osm_line l, planet_osm_polygon p
WHERE %s
AND l.osm_id >= 0
AND p.osm_id >= 0
AND p.power ~ 'substation|station|sub_station|plant|generator'
AND l.power ~ 'line|cable|minor_line'
AND st_intersects(l.way, p.way);''' % \
(where_clause_station, where_clause)
stations_missing_voltage = '''SELECT DISTINCT
p.osm_id AS osm_id,
st_transform(p.way, 4326) AS geom,
p.power AS power_type,
p.name,
p.ref,
p.voltage,
p.tags,
ST_Y(ST_Transform(ST_Centroid(p.way), 4326)) AS lat,
ST_X(ST_Transform(ST_Centroid(p.way), 4326)) AS lon
FROM planet_osm_polygon p
WHERE %s
AND p.voltage IS NULL;''' % where_clause_station
stations_voltages = '''SELECT
p.voltage AS voltage,
p.power AS power_type,
count(*) AS num
FROM planet_osm_polygon p
WHERE %s
GROUP BY power, voltage;''' % where_clause_station
voltages_substations = set()
voltages_stations = set()
voltages_plant = set()
self.cur.execute(stations_voltages)
result_station_voltages = self.cur.fetchall()
for (voltage, power_type, num) in result_station_voltages:
if num > 30 and voltage:
raw_voltages = [self.try_parse_int(x) for x in str(voltage).strip().split(';')]
if power_type in ['substation', 'sub_station']:
voltages_substations = voltages_substations.union(set(raw_voltages))
elif power_type == 'station':
voltages_stations = voltages_stations.union(set(raw_voltages))
elif power_type in ['plant', 'generator']:
voltages_plant = voltages_plant.union(set(raw_voltages))
voltages_substations_str = ';'.join([str(x) for x in voltages_substations])
voltages_stations_str = ';'.join([str(x) for x in voltages_stations])
voltages_plant_str = ';'.join([str(x) for x in voltages_plant])
stations_missing_data = dict()
self.cur.execute(stations_missing_connections_sql)
result_stations_missing_connection = self.cur.fetchall()
for (osm_id, geom, power_type, name, ref, voltage, tags, lat, lon) in result_stations_missing_connection:
if osm_id not in stations_missing_data:
polygon = wkb.loads(geom, hex=True)
raw_geom = geom
temp_station = Station(osm_id, polygon, power_type, name, ref,
voltage.replace(',', ';').replace('/', ';') if voltage else None,
None, tags, lat, lon, raw_geom)
temp_station.add_missing_connection()
if power_type in ['substation', 'sub_station']:
temp_station.add_missing_data_estimation(voltage=voltages_substations_str)
elif power_type == 'station':
temp_station.add_missing_data_estimation(voltage=voltages_stations_str)
elif power_type in ['plant', 'generator']:
temp_station.add_missing_data_estimation(voltage=voltages_plant_str)
if power_type in ['substation', 'sub_station', 'station', 'plant', 'generator']:
stations_missing_data[osm_id] = temp_station
self.cur.execute(stations_missing_voltage)
result_stations_missing_voltage = self.cur.fetchall()
for (osm_id, geom, power_type, name, ref, voltage, tags, lat, lon) in result_stations_missing_voltage:
if osm_id not in stations_missing_data:
polygon = wkb.loads(geom, hex=True)
raw_geom = geom
temp_station = Station(osm_id, polygon, power_type, name, ref,
voltage.replace(',', ';').replace('/', ';') if voltage else None,
None, tags, lat, lon, raw_geom)
if power_type in ['substation', 'sub_station']:
temp_station.add_missing_data_estimation(voltage=voltages_substations_str)
elif power_type == 'station':
temp_station.add_missing_data_estimation(voltage=voltages_stations_str)
elif power_type in ['plant', 'generator']:
temp_station.add_missing_data_estimation(voltage=voltages_plant_str)
if power_type in ['substation', 'sub_station', 'station', 'plant', 'generator']:
stations_missing_data[osm_id] = temp_station
with open('{0}/stations_missing_data.json'.format(self.destdir), 'w') as outfile:
json.dump([s.serialize() for osm_id, s in stations_missing_data.iteritems()], outfile, indent=4)
file_size = self.convert_size_mega_byte(getsize('{0}/stations_missing_data.json'.format(self.destdir)))
if file_size >= 100:
command = 'split --bytes=50M {0}/stations_missing_data.json {0}/_stations_missing_data'.format(self.destdir)
return_code = call(command, shell=True)
root.info('Stations Missing Data file split return {0}'.format(return_code))
remove('{0}/stations_missing_data.json'.format(self.destdir))
def run(self):
if self.whole_planet and self.chose_continent:
with open('meta/planet.json'.format(continent)) as continent_file:
continent_json = json.load(continent_file)
try:
self.voltage_levels = continent_json[self.chose_continent]['voltages']
self.poly = '../data/planet/{0}/pfile.poly'.format(continent)
self.destdir = '../../transnet-models/planet/{0}/'.format(continent)
if self.voltage_levels:
self.reset_params()
self.modeling(continent)
if self.find_missing_data:
self.find_missing_data_for_country()
except Exception as ex:
root.error(ex.message)
elif self.chose_continent:
with open('meta/{0}.json'.format(continent)) as continent_file:
continent_json = json.load(continent_file)
for country in continent_json:
try:
self.voltage_levels = continent_json[country]['voltages']
self.poly = '../data/{0}/{1}/pfile.poly'.format(continent, country)
self.destdir = '../../transnet-models/{0}/{1}/'.format(continent, country)
if self.voltage_levels:
self.reset_params()
self.modeling(country)
if self.find_missing_data:
self.find_missing_data_for_country()
except Exception as ex:
root.error(ex.message)
else:
self.modeling(self.db_name)
if self.find_missing_data:
self.find_missing_data_for_country()
def modeling(self, country_name):
# create dest dir
if not exists(self.destdir):
makedirs(self.destdir)
root.info('Infer for %s', country_name)
time = datetime.now()
# build location where clause for succeeding queries
boundary = None
if self.poly:
boundary = PolyParser.poly_to_polygon(self.poly)
where_clause = "st_intersects(l.way, st_transform(st_geomfromtext('" + boundary.wkt + "',4269),3857))"
elif self.bpoly:
boundary = wkt.loads(self.bpoly)
where_clause = "st_intersects(l.way, st_transform(st_geomfromtext('" + boundary.wkt + "',4269),3857))"
else:
where_clause = "st_distance(l.way, (select way from planet_osm_polygon where osm_id = " + str(
self.ssid) + ")) <= 300000"
# do inference for each voltage level
all_circuits = []
all_substations = dict()
all_generators = dict()
equipment_points = []
length_found_lines = 0
for voltage_level in self.voltage_levels.split('|'):
(length_found_lines, equipment_points, generators, substations, circuits) = self.inference_for_voltage(
voltage_level, where_clause, length_found_lines, equipment_points,
all_substations, all_generators, boundary)
all_generators.update(generators)
all_substations.update(substations)
all_circuits.extend(circuits)
root.info('Total length of all found lines is %s meters', str(length_found_lines))
equipments_multipoint = MultiPoint(equipment_points)
map_centroid = equipments_multipoint.centroid
logging.debug('Centroid lat:%lf, lon:%lf', map_centroid.x, map_centroid.y)
all_circuits = self.remove_duplicates(all_circuits)
root.info('Inference took %s millies', str(datetime.now() - time))
transnet_instance.export_to_json(all_circuits)
partition_by_station_dict = None
population_by_station_dict = None
cities = None
if self.load_estimation:
root.info('Start partitioning into Voronoi-portions')
load_estimator = LoadEstimator(all_substations, boundary)
partition_by_station_dict, population_by_station_dict = load_estimator.partition()
cities = load_estimator.cities
if self.topology:
root.info('Plot inferred transmission system topology')
plotter = Plotter(self.voltage_levels)
plotter.plot_topology(all_circuits, equipments_multipoint, partition_by_station_dict, cities, self.destdir)
try:
root.info('CSV generation started ...')
csv_writer = CSVWriter(all_circuits, root)
csv_writer.publish(self.destdir + '/csv')
except Exception as ex:
root.error(ex.message)
try:
root.info('CIM model generation started ...')
cim_writer = CimWriter(all_circuits, map_centroid, population_by_station_dict, self.voltage_levels,
country_name, len(all_substations))
cim_writer.publish(self.destdir + '/cim')
except Exception as ex:
root.error(ex.message)
###########################################################
if self.overpass:
for circuit in all_circuits:
root.info(self.circuit_to_overpass_string(circuit))
for circuit in all_circuits:
for line in circuit.members[1:-1]:
if line.id not in self.all_lines:
self.length_all += line.length
self.all_lines[line.id] = line.id
root.info('All lines length without duplicates %d', round(self.length_all / 1000))
self.length_all = 0
for circuit in all_circuits:
for line in circuit.members[1:-1]:
self.length_all += line.length
root.info('All lines length with duplicates %d', round(self.length_all / 1000))
for circuit in all_circuits:
sts = [circuit.members[0], circuit.members[-1]]
for st in sts:
if st.id not in self.all_stations:
self.all_stations[st.id] = 1
else:
self.all_stations[st.id] += 1
root.info('All Stations count %d', len(self.all_stations))
for circuit in all_circuits:
for gen in [circuit.members[0], circuit.members[-1]]:
if gen.type in ['plant', 'generator']:
if gen.id not in self.all_power_planet:
self.all_power_planet[gen.id] = '%s_%s' % (gen.lat, gen.lon)
root.info('All power Planets count %s', len(self.all_power_planet))
#####################################################
if self.validate:
validator = InferenceValidator(self.cur)
if boundary:
all_stations = all_substations.copy()
all_stations.update(all_generators)
validator.validate2(all_circuits, all_stations, boundary, self.voltage_levels)
else:
validator.validate(self.ssid, all_circuits, None, self.voltage_levels)
root.info('Took %s in total', str(datetime.now() - time))
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-D", "--dbname", action="store", dest="dbname",
help="database name of the topology network")
parser.add_option("-H", "--dbhost", action="store", dest="dbhost",
help="database host address of the topology network")
parser.add_option("-P", "--dbport", action="store", dest="dbport",
help="database port of the topology network")
parser.add_option("-U", "--dbuser", action="store", dest="dbuser",
help="database user name of the topology network")
parser.add_option("-X", "--dbpwrd", action="store", dest="dbpwrd",
help="database user password of the topology network")
parser.add_option("-s", "--ssid", action="store", dest="ssid",
help="substation id to start the inference from")
parser.add_option("-p", "--poly", action="store", dest="poly",
help="poly file that defines the region to perform the inference for")
parser.add_option("-b", "--bpoly", action="store", dest="bounding_polygon",
help="defines the region to perform the inference for within the specified polygon in WKT, e.g."
"'POLYGON((128.74 41.68, 142.69 41.68, 142.69 30.84, 128.74 30.84, 128.74 41.68))'")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
help="enable verbose logging")
parser.add_option("-e", "--evaluate", action="store_true", dest="evaluate",
help="enable inference-to-existing-relation evaluation")
parser.add_option("-t", "--topology", action="store_true", dest="topology",
help="enable plotting topology graph")
parser.add_option("-V", "--voltage", action="store", dest="voltage_levels",
help="voltage levels in format 'level 1|...|level n', e.g. '220000|380000'")
parser.add_option("-l", "--loadestimation", action="store_true", dest="load_estimation",
help="enable load estimation based on Voronoi partitions")
parser.add_option("-d", "--destdir", action="store", dest="destdir",
help="destination of the inference results; "
"results will be stored in directory transnet/models/<destdir>")
parser.add_option("-c", "--continent", action="store", dest="continent",
help="name of continent, options: 'africa', 'antarctica', 'asia', "
"'australia-oceania', 'central-america', 'europe', 'north-america', 'south-america' ")
parser.add_option("-m", "--matlab", action="store", dest="matlab",
help="run matlab for all countries in continent modeling")
parser.add_option("-j", "--preparejson", action="store_true", dest="prepare_json",
help="prepare json files of planet")
parser.add_option("-g", "--globe", action="store_true", dest="whole_planet",
help="run global commmands")
parser.add_option("-f", "--findmissing", action="store_true", dest="find_missing",
help="find missing data from OSM")
parser.add_option("-n", "--closenodes", action="store_true", dest="close_nodes",
help="Include nodes close to station")
parser.add_option("-o", "--overpass", action="store_true", dest="overpass",
help="Print overpass string")
(options, args) = parser.parse_args()
# get connection data via command line or set to default values
dbname = options.dbname
dbhost = options.dbhost if options.dbhost else '127.0.0.1'
dbport = options.dbport if options.dbport else '5432'
dbuser = options.dbuser
dbpwrd = options.dbpwrd
ssid = options.ssid if options.ssid else '23025610'
poly = options.poly
bpoly = options.bounding_polygon
verbose = options.verbose if options.verbose else False
validate = options.evaluate if options.evaluate else False
topology = options.topology if options.topology else False
voltage_levels = options.voltage_levels
load_estimation = options.load_estimation if options.load_estimation else False
destdir = '../models/countries/' + options.destdir if options.destdir else '../results'
continent = options.continent
matlab = options.matlab
# configure logging
ch = logging.StreamHandler(sys.stdout)
if verbose:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
root.addHandler(ch)
if matlab and continent:
if options.whole_planet:
Transnet.run_matlab_for_continent(matlab, continent, root)
else:
Transnet.run_matlab_for_countries(matlab, continent, root)
exit()
try:
logging.info("Running for %s " % destdir)
logging.info("Running for %s " % dbname)
transnet_instance = Transnet(_database=dbname, _host=dbhost, _port=dbport,
_user=dbuser, _password=<PASSWORD>, _ssid=ssid,
_poly=poly, _bpoly=bpoly, _verbose=verbose,
_validate=validate, _topology=topology, _voltage_levels=voltage_levels,
_load_estimation=load_estimation, _destdir=destdir, _continent=continent,
_whole_planet=options.whole_planet, _find_missing_data=options.find_missing,
_close_nodes=options.close_nodes, _overpass=options.overpass)
if options.prepare_json and continent:
transnet_instance.prepare_continent_json(continent)
if options.whole_planet:
transnet_instance.prepare_planet_json(continent)
else:
transnet_instance.run()
logging.info("#################################################")
except Exception as e:
root.error(e.message)
parser.print_help()
exit()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.